X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e..refs/heads/master:/osfmk/arm/cpu_common.c diff --git a/osfmk/arm/cpu_common.c b/osfmk/arm/cpu_common.c index 327434ece..faa3b1e80 100644 --- a/osfmk/arm/cpu_common.c +++ b/osfmk/arm/cpu_common.c @@ -31,11 +31,12 @@ * cpu routines common to all supported arm variants */ -#include #include #include #include +#include #include +#include #include #include #include @@ -53,26 +54,31 @@ #include #include #include +#include #include -#if KPERF -void kperf_signal_handler(unsigned int cpu_number); -#endif - -cpu_data_t BootCpuData; +SECURITY_READ_ONLY_LATE(struct percpu_base) percpu_base; +vm_address_t percpu_base_cur; +cpu_data_t PERCPU_DATA(cpu_data); cpu_data_entry_t CpuDataEntries[MAX_CPUS]; -struct processor BootProcessor; +static LCK_GRP_DECLARE(cpu_lck_grp, "cpu_lck_grp"); +static LCK_RW_DECLARE(cpu_state_lock, &cpu_lck_grp); unsigned int real_ncpus = 1; boolean_t idle_enable = FALSE; uint64_t wake_abstime = 0x0ULL; +extern uint64_t xcall_ack_timeout_abstime; + +#if defined(HAS_IPI) +extern unsigned int gFastIPI; +#endif /* defined(HAS_IPI) */ cpu_data_t * cpu_datap(int cpu) { - assert(cpu < MAX_CPUS); + assert(cpu <= ml_get_max_cpu_number()); return CpuDataEntries[cpu].cpu_data_vaddr; } @@ -189,8 +195,8 @@ cpu_idle_tickle(void) intr = ml_set_interrupts_enabled(FALSE); cpu_data_ptr = getCpuDatap(); - if (cpu_data_ptr->idle_timer_notify != (void *)NULL) { - ((idle_timer_t)cpu_data_ptr->idle_timer_notify)(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks); + if (cpu_data_ptr->idle_timer_notify != NULL) { + cpu_data_ptr->idle_timer_notify(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks); if (new_idle_timeout_ticks != 0x0ULL) { /* if a new idle timeout was requested set the new idle timer deadline */ clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); @@ -214,21 +220,25 @@ cpu_handle_xcall(cpu_data_t *cpu_data_ptr) * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/ if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) { xfunc = cpu_data_ptr->cpu_xcall_p0; + INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI); xparam = cpu_data_ptr->cpu_xcall_p1; cpu_data_ptr->cpu_xcall_p0 = NULL; cpu_data_ptr->cpu_xcall_p1 = NULL; os_atomic_thread_fence(acq_rel); os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcall, relaxed); xfunc(xparam); + INTERRUPT_MASKED_DEBUG_END(); } if (cpu_data_ptr->cpu_imm_xcall_p0 != NULL && cpu_data_ptr->cpu_imm_xcall_p1 != NULL) { xfunc = cpu_data_ptr->cpu_imm_xcall_p0; + INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI); xparam = cpu_data_ptr->cpu_imm_xcall_p1; cpu_data_ptr->cpu_imm_xcall_p0 = NULL; cpu_data_ptr->cpu_imm_xcall_p1 = NULL; os_atomic_thread_fence(acq_rel); os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcallImm, relaxed); xfunc(xparam); + INTERRUPT_MASKED_DEBUG_END(); } } @@ -246,6 +256,10 @@ cpu_broadcast_xcall_internal(unsigned int signal, int cpu; int max_cpu = ml_get_max_cpu_number() + 1; + //yes, param ALSO cannot be NULL + assert(func); + assert(parm); + intr = ml_set_interrupts_enabled(FALSE); cpu_data_ptr = getCpuDatap(); @@ -300,6 +314,38 @@ cpu_broadcast_xcall(uint32_t *synch, return cpu_broadcast_xcall_internal(SIGPxcall, synch, self_xcall, func, parm); } +struct cpu_broadcast_xcall_simple_data { + broadcastFunc func; + void* parm; + uint32_t sync; +}; + +static void +cpu_broadcast_xcall_simple_cbk(void *parm) +{ + struct cpu_broadcast_xcall_simple_data *data = (struct cpu_broadcast_xcall_simple_data*)parm; + + data->func(data->parm); + + if (os_atomic_dec(&data->sync, relaxed) == 0) { + thread_wakeup((event_t)&data->sync); + } +} + +static unsigned int +cpu_xcall_simple(boolean_t self_xcall, + broadcastFunc func, + void *parm, + bool immediate) +{ + struct cpu_broadcast_xcall_simple_data data = {}; + + data.func = func; + data.parm = parm; + + return cpu_broadcast_xcall_internal(immediate ? SIGPxcallImm : SIGPxcall, &data.sync, self_xcall, cpu_broadcast_xcall_simple_cbk, &data); +} + unsigned int cpu_broadcast_immediate_xcall(uint32_t *synch, boolean_t self_xcall, @@ -309,6 +355,22 @@ cpu_broadcast_immediate_xcall(uint32_t *synch, return cpu_broadcast_xcall_internal(SIGPxcallImm, synch, self_xcall, func, parm); } +unsigned int +cpu_broadcast_xcall_simple(boolean_t self_xcall, + broadcastFunc func, + void *parm) +{ + return cpu_xcall_simple(self_xcall, func, parm, false); +} + +unsigned int +cpu_broadcast_immediate_xcall_simple(boolean_t self_xcall, + broadcastFunc func, + void *parm) +{ + return cpu_xcall_simple(self_xcall, func, parm, true); +} + static kern_return_t cpu_xcall_internal(unsigned int signal, int cpu_number, broadcastFunc func, void *param) { @@ -367,6 +429,11 @@ cpu_signal_internal(cpu_data_t *target_proc, } if ((signal == SIGPxcall) || (signal == SIGPxcallImm)) { + uint64_t start_mabs_time, max_mabs_time, current_mabs_time; + current_mabs_time = start_mabs_time = mach_absolute_time(); + max_mabs_time = xcall_ack_timeout_abstime + current_mabs_time; + assert(max_mabs_time > current_mabs_time); + do { current_signals = target_proc->cpu_signal; if ((current_signals & SIGPdisabled) == SIGPdisabled) { @@ -387,7 +454,20 @@ cpu_signal_internal(cpu_data_t *target_proc, if (!swap_success && (current_proc->cpu_signal & signal)) { cpu_handle_xcall(current_proc); } - } while (!swap_success); + } while (!swap_success && ((current_mabs_time = mach_absolute_time()) < max_mabs_time)); + + /* + * If we time out while waiting for the target CPU to respond, it's possible that no + * other CPU is available to handle the watchdog interrupt that would eventually trigger + * a panic. To prevent this from happening, we just panic here to flag this condition. + */ + if (__improbable(current_mabs_time >= max_mabs_time)) { + uint64_t end_time_ns, xcall_ack_timeout_ns; + absolutetime_to_nanoseconds(current_mabs_time - start_mabs_time, &end_time_ns); + absolutetime_to_nanoseconds(xcall_ack_timeout_abstime, &xcall_ack_timeout_ns); + panic("CPU%u has failed to respond to cross-call after %llu nanoseconds (timeout = %llu ns)", + target_proc->cpu_number, end_time_ns, xcall_ack_timeout_ns); + } if (signal == SIGPxcallImm) { target_proc->cpu_imm_xcall_p0 = p0; @@ -419,9 +499,25 @@ cpu_signal_internal(cpu_data_t *target_proc, if (!(target_proc->cpu_signal & SIGPdisabled)) { if (defer) { +#if defined(HAS_IPI) + if (gFastIPI) { + ml_cpu_signal_deferred(target_proc->cpu_phys_id); + } else { + PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id); + } +#else PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id); +#endif /* defined(HAS_IPI) */ } else { +#if defined(HAS_IPI) + if (gFastIPI) { + ml_cpu_signal(target_proc->cpu_phys_id); + } else { + PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id); + } +#else PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id); +#endif /* defined(HAS_IPI) */ } } @@ -449,7 +545,15 @@ cpu_signal_cancel(cpu_data_t *target_proc) { /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */ if (!(target_proc->cpu_signal & SIGPdisabled)) { +#if defined(HAS_IPI) + if (gFastIPI) { + ml_cpu_signal_retract(target_proc->cpu_phys_id); + } else { + PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id); + } +#else PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id); +#endif /* defined(HAS_IPI) */ } } @@ -465,11 +569,9 @@ cpu_signal_handler_internal(boolean_t disable_signal) cpu_data_t *cpu_data_ptr = getCpuDatap(); unsigned int cpu_signal; - cpu_data_ptr->cpu_stat.ipi_cnt++; cpu_data_ptr->cpu_stat.ipi_cnt_wake++; - - SCHED_STATS_IPI(current_processor()); + SCHED_STATS_INC(ipi_count); cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed); @@ -482,33 +584,46 @@ cpu_signal_handler_internal(boolean_t disable_signal) while (cpu_signal & ~SIGPdisabled) { if (cpu_signal & SIGPdec) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdec, relaxed); + INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_IPI); rtclock_intr(FALSE); + INTERRUPT_MASKED_DEBUG_END(); } #if KPERF - if (cpu_signal & SIGPkptimer) { - os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkptimer, relaxed); - kperf_signal_handler((unsigned int)cpu_data_ptr->cpu_number); + if (cpu_signal & SIGPkppet) { + os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkppet, relaxed); + extern void kperf_signal_handler(void); + INTERRUPT_MASKED_DEBUG_START(kperf_signal_handler, DBG_INTR_TYPE_IPI); + kperf_signal_handler(); + INTERRUPT_MASKED_DEBUG_END(); } -#endif +#endif /* KPERF */ if (cpu_signal & (SIGPxcall | SIGPxcallImm)) { cpu_handle_xcall(cpu_data_ptr); } if (cpu_signal & SIGPast) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPast, relaxed); - ast_check(cpu_data_ptr->cpu_processor); + INTERRUPT_MASKED_DEBUG_START(ast_check, DBG_INTR_TYPE_IPI); + ast_check(current_processor()); + INTERRUPT_MASKED_DEBUG_END(); } if (cpu_signal & SIGPdebug) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdebug, relaxed); + INTERRUPT_MASKED_DEBUG_START(DebuggerXCall, DBG_INTR_TYPE_IPI); DebuggerXCall(cpu_data_ptr->cpu_int_state); + INTERRUPT_MASKED_DEBUG_END(); } -#if __ARM_SMP__ && defined(ARMA7) +#if defined(ARMA7) if (cpu_signal & SIGPLWFlush) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWFlush, relaxed); + INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI); cache_xcall_handler(LWFlush); + INTERRUPT_MASKED_DEBUG_END(); } if (cpu_signal & SIGPLWClean) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWClean, relaxed); + INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI); cache_xcall_handler(LWClean); + INTERRUPT_MASKED_DEBUG_END(); } #endif @@ -517,12 +632,28 @@ cpu_signal_handler_internal(boolean_t disable_signal) } void -cpu_exit_wait(int cpu) -{ - if (cpu != master_cpu) { +cpu_exit_wait(int cpu_id) +{ +#if USE_APPLEARMSMP + if (!ml_is_quiescing()) { + // For runtime disable (non S2R) the CPU will shut down immediately. + ml_topology_cpu_t *cpu = &ml_get_topology_info()->cpus[cpu_id]; + assert(cpu && cpu->cpu_IMPL_regs); + volatile uint64_t *cpu_sts = (void *)(cpu->cpu_IMPL_regs + CPU_PIO_CPU_STS_OFFSET); + + // Poll the "CPU running state" field until it is 0 (off) + while ((*cpu_sts & CPU_PIO_CPU_STS_cpuRunSt_mask) != 0x00) { + __builtin_arm_dsb(DSB_ISH); + } + return; + } +#endif /* USE_APPLEARMSMP */ + + if (cpu_id != master_cpu) { + // For S2R, ml_arm_sleep() will do some extra polling after setting ARM_CPU_ON_SLEEP_PATH. cpu_data_t *cpu_data_ptr; - cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr; + cpu_data_ptr = CpuDataEntries[cpu_id].cpu_data_vaddr; while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) { } ; @@ -543,7 +674,7 @@ cpu_machine_init(void) cpu_data_ptr = getCpuDatap(); started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + if (cpu_data_ptr->cpu_cache_dispatch != NULL) { platform_cache_init(); } @@ -554,36 +685,10 @@ cpu_machine_init(void) ml_init_interrupt(); } -processor_t -cpu_processor_alloc(boolean_t is_boot_cpu) -{ - processor_t proc; - - if (is_boot_cpu) { - return &BootProcessor; - } - - proc = kalloc(sizeof(*proc)); - if (!proc) { - return NULL; - } - - bzero((void *) proc, sizeof(*proc)); - return proc; -} - -void -cpu_processor_free(processor_t proc) -{ - if (proc != NULL && proc != &BootProcessor) { - kfree(proc, sizeof(*proc)); - } -} - processor_t current_processor(void) { - return getCpuDatap()->cpu_processor; + return PERCPU_GET(processor); } processor_t @@ -591,7 +696,7 @@ cpu_to_processor(int cpu) { cpu_data_t *cpu_data = cpu_datap(cpu); if (cpu_data != NULL) { - return cpu_data->cpu_processor; + return PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data); } else { return NULL; } @@ -600,44 +705,59 @@ cpu_to_processor(int cpu) cpu_data_t * processor_to_cpu_datap(processor_t processor) { - cpu_data_t *target_cpu_datap; - - assert(processor->cpu_id < MAX_CPUS); + assert(processor->cpu_id <= ml_get_max_cpu_number()); assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL); - target_cpu_datap = (cpu_data_t*)CpuDataEntries[processor->cpu_id].cpu_data_vaddr; - assert(target_cpu_datap->cpu_processor == processor); + return PERCPU_GET_RELATIVE(cpu_data, processor, processor); +} + +__startup_func +static void +cpu_data_startup_init(void) +{ + vm_size_t size = percpu_section_size() * (ml_get_cpu_count() - 1); + + percpu_base.size = percpu_section_size(); + if (ml_get_cpu_count() == 1) { + percpu_base.start = VM_MAX_KERNEL_ADDRESS; + return; + } + + /* + * The memory needs to be physically contiguous because it contains + * cpu_data_t structures sometimes accessed during reset + * with the MMU off. + * + * kmem_alloc_contig() can't be used early, at the time STARTUP_SUB_PERCPU + * normally runs, so we instead steal the memory for the PERCPU subsystem + * even earlier. + */ + percpu_base.start = (vm_offset_t)pmap_steal_memory(round_page(size)); + bzero((void *)percpu_base.start, round_page(size)); - return target_cpu_datap; + percpu_base.start -= percpu_section_start(); + percpu_base.end = percpu_base.start + size - 1; + percpu_base_cur = percpu_base.start; } +STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, cpu_data_startup_init); cpu_data_t * cpu_data_alloc(boolean_t is_boot_cpu) { - cpu_data_t *cpu_data_ptr = NULL; + cpu_data_t *cpu_data_ptr = NULL; + vm_address_t base; if (is_boot_cpu) { - cpu_data_ptr = &BootCpuData; + cpu_data_ptr = PERCPU_GET_MASTER(cpu_data); } else { - if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) { - goto cpu_data_alloc_error; - } - - bzero((void *)cpu_data_ptr, sizeof(cpu_data_t)); + base = os_atomic_add_orig(&percpu_base_cur, + percpu_section_size(), relaxed); + cpu_data_ptr = PERCPU_GET_WITH_BASE(base, cpu_data); cpu_stack_alloc(cpu_data_ptr); } - cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu); - if (cpu_data_ptr->cpu_processor == (struct processor *)NULL) { - goto cpu_data_alloc_error; - } - return cpu_data_ptr; - -cpu_data_alloc_error: - panic("cpu_data_alloc() failed\n"); - return (cpu_data_t *)NULL; } ast_t * @@ -688,8 +808,93 @@ cpu_number(void) return getCpuDatap()->cpu_number; } +vm_offset_t +current_percpu_base(void) +{ + return current_thread()->machine.pcpu_data_base; +} + uint64_t ml_get_wake_timebase(void) { return wake_abstime; } + +bool +ml_cpu_signal_is_enabled(void) +{ + return !(getCpuDatap()->cpu_signal & SIGPdisabled); +} + +bool +ml_cpu_can_exit(__unused int cpu_id) +{ + /* processor_exit() is always allowed on the S2R path */ + if (ml_is_quiescing()) { + return true; + } +#if HAS_CLUSTER && USE_APPLEARMSMP + /* + * Cyprus and newer chips can disable individual non-boot CPUs. The + * implementation polls cpuX_IMPL_CPU_STS, which differs on older chips. + */ + if (CpuDataEntries[cpu_id].cpu_data_vaddr != &BootCpuData) { + return true; + } +#endif + return false; +} + +#ifdef USE_APPLEARMSMP + +void +ml_cpu_begin_state_transition(int cpu_id) +{ + lck_rw_lock_exclusive(&cpu_state_lock); + CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = true; + lck_rw_unlock_exclusive(&cpu_state_lock); +} + +void +ml_cpu_end_state_transition(int cpu_id) +{ + lck_rw_lock_exclusive(&cpu_state_lock); + CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = false; + lck_rw_unlock_exclusive(&cpu_state_lock); +} + +void +ml_cpu_begin_loop(void) +{ + lck_rw_lock_shared(&cpu_state_lock); +} + +void +ml_cpu_end_loop(void) +{ + lck_rw_unlock_shared(&cpu_state_lock); +} + +#else /* USE_APPLEARMSMP */ + +void +ml_cpu_begin_state_transition(__unused int cpu_id) +{ +} + +void +ml_cpu_end_state_transition(__unused int cpu_id) +{ +} + +void +ml_cpu_begin_loop(void) +{ +} + +void +ml_cpu_end_loop(void) +{ +} + +#endif /* USE_APPLEARMSMP */