]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
0a7de745 | 2 | * Copyright (c) 2017-2019 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * File: arm/cpu_common.c | |
30 | * | |
31 | * cpu routines common to all supported arm variants | |
32 | */ | |
33 | ||
5ba3f43e A |
34 | #include <kern/machine.h> |
35 | #include <kern/cpu_number.h> | |
36 | #include <kern/thread.h> | |
f427ee49 | 37 | #include <kern/percpu.h> |
5ba3f43e | 38 | #include <kern/timer_queue.h> |
f427ee49 | 39 | #include <kern/locks.h> |
5ba3f43e A |
40 | #include <arm/cpu_data.h> |
41 | #include <arm/cpuid.h> | |
42 | #include <arm/caches_internal.h> | |
43 | #include <arm/cpu_data_internal.h> | |
44 | #include <arm/cpu_internal.h> | |
45 | #include <arm/misc_protos.h> | |
46 | #include <arm/machine_cpu.h> | |
47 | #include <arm/rtclock.h> | |
48 | #include <mach/processor_info.h> | |
49 | #include <machine/atomic.h> | |
50 | #include <machine/config.h> | |
51 | #include <vm/vm_kern.h> | |
52 | #include <vm/vm_map.h> | |
53 | #include <pexpert/arm/protos.h> | |
54 | #include <pexpert/device_tree.h> | |
55 | #include <sys/kdebug.h> | |
56 | #include <arm/machine_routines.h> | |
f427ee49 | 57 | #include <arm/proc_reg.h> |
5ba3f43e | 58 | #include <libkern/OSAtomic.h> |
5ba3f43e | 59 | |
f427ee49 A |
60 | SECURITY_READ_ONLY_LATE(struct percpu_base) percpu_base; |
61 | vm_address_t percpu_base_cur; | |
62 | cpu_data_t PERCPU_DATA(cpu_data); | |
0a7de745 A |
63 | cpu_data_entry_t CpuDataEntries[MAX_CPUS]; |
64 | ||
c3c9b80d A |
65 | static LCK_GRP_DECLARE(cpu_lck_grp, "cpu_lck_grp"); |
66 | static LCK_RW_DECLARE(cpu_state_lock, &cpu_lck_grp); | |
5ba3f43e | 67 | |
0a7de745 A |
68 | unsigned int real_ncpus = 1; |
69 | boolean_t idle_enable = FALSE; | |
70 | uint64_t wake_abstime = 0x0ULL; | |
5ba3f43e | 71 | |
c3c9b80d A |
72 | extern uint64_t xcall_ack_timeout_abstime; |
73 | ||
c6bf4f31 A |
74 | #if defined(HAS_IPI) |
75 | extern unsigned int gFastIPI; | |
76 | #endif /* defined(HAS_IPI) */ | |
5ba3f43e A |
77 | |
78 | cpu_data_t * | |
79 | cpu_datap(int cpu) | |
80 | { | |
f427ee49 | 81 | assert(cpu <= ml_get_max_cpu_number()); |
0a7de745 | 82 | return CpuDataEntries[cpu].cpu_data_vaddr; |
5ba3f43e A |
83 | } |
84 | ||
85 | kern_return_t | |
86 | cpu_control(int slot_num, | |
0a7de745 A |
87 | processor_info_t info, |
88 | unsigned int count) | |
5ba3f43e A |
89 | { |
90 | printf("cpu_control(%d,%p,%d) not implemented\n", | |
0a7de745 A |
91 | slot_num, info, count); |
92 | return KERN_FAILURE; | |
5ba3f43e A |
93 | } |
94 | ||
95 | kern_return_t | |
96 | cpu_info_count(processor_flavor_t flavor, | |
0a7de745 | 97 | unsigned int *count) |
5ba3f43e | 98 | { |
5ba3f43e A |
99 | switch (flavor) { |
100 | case PROCESSOR_CPU_STAT: | |
101 | *count = PROCESSOR_CPU_STAT_COUNT; | |
0a7de745 A |
102 | return KERN_SUCCESS; |
103 | ||
104 | case PROCESSOR_CPU_STAT64: | |
105 | *count = PROCESSOR_CPU_STAT64_COUNT; | |
106 | return KERN_SUCCESS; | |
5ba3f43e A |
107 | |
108 | default: | |
109 | *count = 0; | |
0a7de745 | 110 | return KERN_FAILURE; |
5ba3f43e A |
111 | } |
112 | } | |
113 | ||
114 | kern_return_t | |
0a7de745 A |
115 | cpu_info(processor_flavor_t flavor, int slot_num, processor_info_t info, |
116 | unsigned int *count) | |
5ba3f43e | 117 | { |
0a7de745 A |
118 | cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr; |
119 | ||
5ba3f43e A |
120 | switch (flavor) { |
121 | case PROCESSOR_CPU_STAT: | |
0a7de745 A |
122 | { |
123 | if (*count < PROCESSOR_CPU_STAT_COUNT) { | |
124 | return KERN_FAILURE; | |
125 | } | |
126 | ||
127 | processor_cpu_stat_t cpu_stat = (processor_cpu_stat_t)info; | |
128 | cpu_stat->irq_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.irq_ex_cnt; | |
129 | cpu_stat->ipi_cnt = (uint32_t)cpu_data_ptr->cpu_stat.ipi_cnt; | |
130 | cpu_stat->timer_cnt = (uint32_t)cpu_data_ptr->cpu_stat.timer_cnt; | |
131 | cpu_stat->undef_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.undef_ex_cnt; | |
132 | cpu_stat->unaligned_cnt = (uint32_t)cpu_data_ptr->cpu_stat.unaligned_cnt; | |
133 | cpu_stat->vfp_cnt = (uint32_t)cpu_data_ptr->cpu_stat.vfp_cnt; | |
134 | cpu_stat->vfp_shortv_cnt = 0; | |
135 | cpu_stat->data_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.data_ex_cnt; | |
136 | cpu_stat->instr_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.instr_ex_cnt; | |
137 | ||
138 | *count = PROCESSOR_CPU_STAT_COUNT; | |
139 | ||
140 | return KERN_SUCCESS; | |
141 | } | |
142 | ||
143 | case PROCESSOR_CPU_STAT64: | |
144 | { | |
145 | if (*count < PROCESSOR_CPU_STAT64_COUNT) { | |
146 | return KERN_FAILURE; | |
5ba3f43e A |
147 | } |
148 | ||
0a7de745 A |
149 | processor_cpu_stat64_t cpu_stat = (processor_cpu_stat64_t)info; |
150 | cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt; | |
151 | cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt; | |
152 | cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt; | |
153 | cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt; | |
154 | cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt; | |
155 | cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt; | |
156 | cpu_stat->vfp_shortv_cnt = 0; | |
157 | cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt; | |
158 | cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt; | |
cb323159 A |
159 | #if MONOTONIC |
160 | cpu_stat->pmi_cnt = cpu_data_ptr->cpu_monotonic.mtc_npmis; | |
161 | #endif /* MONOTONIC */ | |
0a7de745 A |
162 | |
163 | *count = PROCESSOR_CPU_STAT64_COUNT; | |
164 | ||
165 | return KERN_SUCCESS; | |
166 | } | |
167 | ||
5ba3f43e | 168 | default: |
0a7de745 | 169 | return KERN_FAILURE; |
5ba3f43e A |
170 | } |
171 | } | |
172 | ||
173 | /* | |
174 | * Routine: cpu_doshutdown | |
175 | * Function: | |
176 | */ | |
177 | void | |
0a7de745 A |
178 | cpu_doshutdown(void (*doshutdown)(processor_t), |
179 | processor_t processor) | |
5ba3f43e A |
180 | { |
181 | doshutdown(processor); | |
182 | } | |
183 | ||
184 | /* | |
185 | * Routine: cpu_idle_tickle | |
186 | * | |
187 | */ | |
188 | void | |
189 | cpu_idle_tickle(void) | |
190 | { | |
0a7de745 A |
191 | boolean_t intr; |
192 | cpu_data_t *cpu_data_ptr; | |
193 | uint64_t new_idle_timeout_ticks = 0x0ULL; | |
5ba3f43e A |
194 | |
195 | intr = ml_set_interrupts_enabled(FALSE); | |
196 | cpu_data_ptr = getCpuDatap(); | |
197 | ||
f427ee49 A |
198 | if (cpu_data_ptr->idle_timer_notify != NULL) { |
199 | cpu_data_ptr->idle_timer_notify(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks); | |
5ba3f43e A |
200 | if (new_idle_timeout_ticks != 0x0ULL) { |
201 | /* if a new idle timeout was requested set the new idle timer deadline */ | |
202 | clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); | |
203 | } else { | |
204 | /* turn off the idle timer */ | |
205 | cpu_data_ptr->idle_timer_deadline = 0x0ULL; | |
206 | } | |
207 | timer_resync_deadlines(); | |
208 | } | |
209 | (void) ml_set_interrupts_enabled(intr); | |
210 | } | |
211 | ||
212 | static void | |
213 | cpu_handle_xcall(cpu_data_t *cpu_data_ptr) | |
214 | { | |
0a7de745 A |
215 | broadcastFunc xfunc; |
216 | void *xparam; | |
5ba3f43e | 217 | |
cb323159 | 218 | os_atomic_thread_fence(acquire); |
5ba3f43e | 219 | /* Come back around if cpu_signal_internal is running on another CPU and has just |
0a7de745 | 220 | * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/ |
5ba3f43e A |
221 | if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) { |
222 | xfunc = cpu_data_ptr->cpu_xcall_p0; | |
f427ee49 | 223 | INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI); |
5ba3f43e A |
224 | xparam = cpu_data_ptr->cpu_xcall_p1; |
225 | cpu_data_ptr->cpu_xcall_p0 = NULL; | |
226 | cpu_data_ptr->cpu_xcall_p1 = NULL; | |
cb323159 A |
227 | os_atomic_thread_fence(acq_rel); |
228 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcall, relaxed); | |
229 | xfunc(xparam); | |
f427ee49 | 230 | INTERRUPT_MASKED_DEBUG_END(); |
cb323159 A |
231 | } |
232 | if (cpu_data_ptr->cpu_imm_xcall_p0 != NULL && cpu_data_ptr->cpu_imm_xcall_p1 != NULL) { | |
233 | xfunc = cpu_data_ptr->cpu_imm_xcall_p0; | |
f427ee49 | 234 | INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI); |
cb323159 A |
235 | xparam = cpu_data_ptr->cpu_imm_xcall_p1; |
236 | cpu_data_ptr->cpu_imm_xcall_p0 = NULL; | |
237 | cpu_data_ptr->cpu_imm_xcall_p1 = NULL; | |
238 | os_atomic_thread_fence(acq_rel); | |
239 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcallImm, relaxed); | |
5ba3f43e | 240 | xfunc(xparam); |
f427ee49 | 241 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e | 242 | } |
5ba3f43e A |
243 | } |
244 | ||
cb323159 A |
245 | static unsigned int |
246 | cpu_broadcast_xcall_internal(unsigned int signal, | |
247 | uint32_t *synch, | |
0a7de745 A |
248 | boolean_t self_xcall, |
249 | broadcastFunc func, | |
250 | void *parm) | |
5ba3f43e | 251 | { |
0a7de745 A |
252 | boolean_t intr; |
253 | cpu_data_t *cpu_data_ptr; | |
254 | cpu_data_t *target_cpu_datap; | |
255 | unsigned int failsig; | |
256 | int cpu; | |
cb323159 | 257 | int max_cpu = ml_get_max_cpu_number() + 1; |
5ba3f43e | 258 | |
f427ee49 A |
259 | //yes, param ALSO cannot be NULL |
260 | assert(func); | |
261 | assert(parm); | |
262 | ||
5ba3f43e A |
263 | intr = ml_set_interrupts_enabled(FALSE); |
264 | cpu_data_ptr = getCpuDatap(); | |
265 | ||
266 | failsig = 0; | |
267 | ||
268 | if (synch != NULL) { | |
cb323159 | 269 | *synch = max_cpu; |
5ba3f43e A |
270 | assert_wait((event_t)synch, THREAD_UNINT); |
271 | } | |
272 | ||
cb323159 | 273 | for (cpu = 0; cpu < max_cpu; cpu++) { |
5ba3f43e A |
274 | target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
275 | ||
cb323159 | 276 | if (target_cpu_datap == cpu_data_ptr) { |
5ba3f43e | 277 | continue; |
0a7de745 | 278 | } |
5ba3f43e | 279 | |
cb323159 A |
280 | if ((target_cpu_datap == NULL) || |
281 | KERN_SUCCESS != cpu_signal(target_cpu_datap, signal, (void *)func, parm)) { | |
5ba3f43e A |
282 | failsig++; |
283 | } | |
284 | } | |
285 | ||
286 | ||
287 | if (self_xcall) { | |
288 | func(parm); | |
289 | } | |
290 | ||
291 | (void) ml_set_interrupts_enabled(intr); | |
292 | ||
293 | if (synch != NULL) { | |
cb323159 | 294 | if (os_atomic_sub(synch, (!self_xcall) ? failsig + 1 : failsig, relaxed) == 0) { |
5ba3f43e | 295 | clear_wait(current_thread(), THREAD_AWAKENED); |
0a7de745 | 296 | } else { |
5ba3f43e | 297 | thread_block(THREAD_CONTINUE_NULL); |
0a7de745 | 298 | } |
5ba3f43e A |
299 | } |
300 | ||
0a7de745 | 301 | if (!self_xcall) { |
cb323159 | 302 | return max_cpu - failsig - 1; |
0a7de745 | 303 | } else { |
cb323159 | 304 | return max_cpu - failsig; |
0a7de745 | 305 | } |
5ba3f43e A |
306 | } |
307 | ||
cb323159 A |
308 | unsigned int |
309 | cpu_broadcast_xcall(uint32_t *synch, | |
310 | boolean_t self_xcall, | |
311 | broadcastFunc func, | |
312 | void *parm) | |
313 | { | |
314 | return cpu_broadcast_xcall_internal(SIGPxcall, synch, self_xcall, func, parm); | |
315 | } | |
316 | ||
f427ee49 A |
317 | struct cpu_broadcast_xcall_simple_data { |
318 | broadcastFunc func; | |
319 | void* parm; | |
320 | uint32_t sync; | |
321 | }; | |
322 | ||
323 | static void | |
324 | cpu_broadcast_xcall_simple_cbk(void *parm) | |
325 | { | |
326 | struct cpu_broadcast_xcall_simple_data *data = (struct cpu_broadcast_xcall_simple_data*)parm; | |
327 | ||
328 | data->func(data->parm); | |
329 | ||
330 | if (os_atomic_dec(&data->sync, relaxed) == 0) { | |
331 | thread_wakeup((event_t)&data->sync); | |
332 | } | |
333 | } | |
334 | ||
335 | static unsigned int | |
336 | cpu_xcall_simple(boolean_t self_xcall, | |
337 | broadcastFunc func, | |
338 | void *parm, | |
339 | bool immediate) | |
340 | { | |
341 | struct cpu_broadcast_xcall_simple_data data = {}; | |
342 | ||
343 | data.func = func; | |
344 | data.parm = parm; | |
345 | ||
346 | return cpu_broadcast_xcall_internal(immediate ? SIGPxcallImm : SIGPxcall, &data.sync, self_xcall, cpu_broadcast_xcall_simple_cbk, &data); | |
347 | } | |
348 | ||
cb323159 A |
349 | unsigned int |
350 | cpu_broadcast_immediate_xcall(uint32_t *synch, | |
351 | boolean_t self_xcall, | |
352 | broadcastFunc func, | |
353 | void *parm) | |
354 | { | |
355 | return cpu_broadcast_xcall_internal(SIGPxcallImm, synch, self_xcall, func, parm); | |
356 | } | |
357 | ||
f427ee49 A |
358 | unsigned int |
359 | cpu_broadcast_xcall_simple(boolean_t self_xcall, | |
360 | broadcastFunc func, | |
361 | void *parm) | |
362 | { | |
363 | return cpu_xcall_simple(self_xcall, func, parm, false); | |
364 | } | |
365 | ||
366 | unsigned int | |
367 | cpu_broadcast_immediate_xcall_simple(boolean_t self_xcall, | |
368 | broadcastFunc func, | |
369 | void *parm) | |
370 | { | |
371 | return cpu_xcall_simple(self_xcall, func, parm, true); | |
372 | } | |
373 | ||
cb323159 A |
374 | static kern_return_t |
375 | cpu_xcall_internal(unsigned int signal, int cpu_number, broadcastFunc func, void *param) | |
5ba3f43e | 376 | { |
0a7de745 | 377 | cpu_data_t *target_cpu_datap; |
5ba3f43e | 378 | |
0a7de745 | 379 | if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number())) { |
5ba3f43e | 380 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 381 | } |
5ba3f43e | 382 | |
cb323159 A |
383 | if (func == NULL || param == NULL) { |
384 | return KERN_INVALID_ARGUMENT; | |
385 | } | |
386 | ||
0a7de745 A |
387 | target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr; |
388 | if (target_cpu_datap == NULL) { | |
5ba3f43e | 389 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 390 | } |
5ba3f43e | 391 | |
cb323159 A |
392 | return cpu_signal(target_cpu_datap, signal, (void*)func, param); |
393 | } | |
394 | ||
395 | kern_return_t | |
396 | cpu_xcall(int cpu_number, broadcastFunc func, void *param) | |
397 | { | |
398 | return cpu_xcall_internal(SIGPxcall, cpu_number, func, param); | |
399 | } | |
400 | ||
401 | kern_return_t | |
402 | cpu_immediate_xcall(int cpu_number, broadcastFunc func, void *param) | |
403 | { | |
404 | return cpu_xcall_internal(SIGPxcallImm, cpu_number, func, param); | |
5ba3f43e A |
405 | } |
406 | ||
407 | static kern_return_t | |
408 | cpu_signal_internal(cpu_data_t *target_proc, | |
0a7de745 A |
409 | unsigned int signal, |
410 | void *p0, | |
411 | void *p1, | |
412 | boolean_t defer) | |
5ba3f43e | 413 | { |
0a7de745 A |
414 | unsigned int Check_SIGPdisabled; |
415 | int current_signals; | |
416 | Boolean swap_success; | |
417 | boolean_t interruptible = ml_set_interrupts_enabled(FALSE); | |
418 | cpu_data_t *current_proc = getCpuDatap(); | |
5ba3f43e A |
419 | |
420 | /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */ | |
421 | if (defer) { | |
422 | assert(signal == SIGPnop); | |
423 | } | |
424 | ||
0a7de745 | 425 | if (current_proc != target_proc) { |
5ba3f43e | 426 | Check_SIGPdisabled = SIGPdisabled; |
0a7de745 | 427 | } else { |
5ba3f43e | 428 | Check_SIGPdisabled = 0; |
0a7de745 | 429 | } |
5ba3f43e | 430 | |
cb323159 | 431 | if ((signal == SIGPxcall) || (signal == SIGPxcallImm)) { |
c3c9b80d A |
432 | uint64_t start_mabs_time, max_mabs_time, current_mabs_time; |
433 | current_mabs_time = start_mabs_time = mach_absolute_time(); | |
434 | max_mabs_time = xcall_ack_timeout_abstime + current_mabs_time; | |
435 | assert(max_mabs_time > current_mabs_time); | |
436 | ||
5ba3f43e A |
437 | do { |
438 | current_signals = target_proc->cpu_signal; | |
439 | if ((current_signals & SIGPdisabled) == SIGPdisabled) { | |
5ba3f43e A |
440 | ml_set_interrupts_enabled(interruptible); |
441 | return KERN_FAILURE; | |
442 | } | |
cb323159 | 443 | swap_success = OSCompareAndSwap(current_signals & (~signal), current_signals | signal, |
0a7de745 | 444 | &target_proc->cpu_signal); |
5ba3f43e | 445 | |
cb323159 A |
446 | if (!swap_success && (signal == SIGPxcallImm) && (target_proc->cpu_signal & SIGPxcallImm)) { |
447 | ml_set_interrupts_enabled(interruptible); | |
448 | return KERN_ALREADY_WAITING; | |
449 | } | |
450 | ||
5ba3f43e A |
451 | /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn |
452 | * be trying to xcall us. Since we have interrupts disabled that can deadlock, | |
453 | * so break the deadlock by draining pending xcalls. */ | |
cb323159 | 454 | if (!swap_success && (current_proc->cpu_signal & signal)) { |
5ba3f43e | 455 | cpu_handle_xcall(current_proc); |
0a7de745 | 456 | } |
c3c9b80d A |
457 | } while (!swap_success && ((current_mabs_time = mach_absolute_time()) < max_mabs_time)); |
458 | ||
459 | /* | |
460 | * If we time out while waiting for the target CPU to respond, it's possible that no | |
461 | * other CPU is available to handle the watchdog interrupt that would eventually trigger | |
462 | * a panic. To prevent this from happening, we just panic here to flag this condition. | |
463 | */ | |
464 | if (__improbable(current_mabs_time >= max_mabs_time)) { | |
465 | uint64_t end_time_ns, xcall_ack_timeout_ns; | |
466 | absolutetime_to_nanoseconds(current_mabs_time - start_mabs_time, &end_time_ns); | |
467 | absolutetime_to_nanoseconds(xcall_ack_timeout_abstime, &xcall_ack_timeout_ns); | |
468 | panic("CPU%u has failed to respond to cross-call after %llu nanoseconds (timeout = %llu ns)", | |
469 | target_proc->cpu_number, end_time_ns, xcall_ack_timeout_ns); | |
470 | } | |
5ba3f43e | 471 | |
cb323159 A |
472 | if (signal == SIGPxcallImm) { |
473 | target_proc->cpu_imm_xcall_p0 = p0; | |
474 | target_proc->cpu_imm_xcall_p1 = p1; | |
475 | } else { | |
476 | target_proc->cpu_xcall_p0 = p0; | |
477 | target_proc->cpu_xcall_p1 = p1; | |
478 | } | |
5ba3f43e A |
479 | } else { |
480 | do { | |
481 | current_signals = target_proc->cpu_signal; | |
0a7de745 | 482 | if ((Check_SIGPdisabled != 0) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) { |
5ba3f43e A |
483 | ml_set_interrupts_enabled(interruptible); |
484 | return KERN_FAILURE; | |
485 | } | |
486 | ||
487 | swap_success = OSCompareAndSwap(current_signals, current_signals | signal, | |
0a7de745 | 488 | &target_proc->cpu_signal); |
5ba3f43e A |
489 | } while (!swap_success); |
490 | } | |
491 | ||
492 | /* | |
493 | * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params | |
494 | * will be visible to other cores when the IPI is dispatched, and 2) subsequent | |
495 | * instructions to signal the other cores will not execute until after the barrier. | |
496 | * DMB would be sufficient to guarantee 1) but not 2). | |
497 | */ | |
498 | __builtin_arm_dsb(DSB_ISH); | |
499 | ||
500 | if (!(target_proc->cpu_signal & SIGPdisabled)) { | |
501 | if (defer) { | |
c6bf4f31 A |
502 | #if defined(HAS_IPI) |
503 | if (gFastIPI) { | |
504 | ml_cpu_signal_deferred(target_proc->cpu_phys_id); | |
505 | } else { | |
506 | PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id); | |
507 | } | |
508 | #else | |
5ba3f43e | 509 | PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id); |
c6bf4f31 | 510 | #endif /* defined(HAS_IPI) */ |
5ba3f43e | 511 | } else { |
c6bf4f31 A |
512 | #if defined(HAS_IPI) |
513 | if (gFastIPI) { | |
514 | ml_cpu_signal(target_proc->cpu_phys_id); | |
515 | } else { | |
516 | PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id); | |
517 | } | |
518 | #else | |
5ba3f43e | 519 | PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id); |
c6bf4f31 | 520 | #endif /* defined(HAS_IPI) */ |
5ba3f43e A |
521 | } |
522 | } | |
523 | ||
524 | ml_set_interrupts_enabled(interruptible); | |
0a7de745 | 525 | return KERN_SUCCESS; |
5ba3f43e A |
526 | } |
527 | ||
528 | kern_return_t | |
529 | cpu_signal(cpu_data_t *target_proc, | |
0a7de745 A |
530 | unsigned int signal, |
531 | void *p0, | |
532 | void *p1) | |
5ba3f43e A |
533 | { |
534 | return cpu_signal_internal(target_proc, signal, p0, p1, FALSE); | |
535 | } | |
536 | ||
537 | kern_return_t | |
538 | cpu_signal_deferred(cpu_data_t *target_proc) | |
539 | { | |
540 | return cpu_signal_internal(target_proc, SIGPnop, NULL, NULL, TRUE); | |
541 | } | |
542 | ||
543 | void | |
544 | cpu_signal_cancel(cpu_data_t *target_proc) | |
545 | { | |
546 | /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */ | |
547 | if (!(target_proc->cpu_signal & SIGPdisabled)) { | |
c6bf4f31 A |
548 | #if defined(HAS_IPI) |
549 | if (gFastIPI) { | |
550 | ml_cpu_signal_retract(target_proc->cpu_phys_id); | |
551 | } else { | |
552 | PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id); | |
553 | } | |
554 | #else | |
5ba3f43e | 555 | PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id); |
c6bf4f31 | 556 | #endif /* defined(HAS_IPI) */ |
5ba3f43e A |
557 | } |
558 | } | |
559 | ||
560 | void | |
561 | cpu_signal_handler(void) | |
562 | { | |
563 | cpu_signal_handler_internal(FALSE); | |
564 | } | |
565 | ||
566 | void | |
567 | cpu_signal_handler_internal(boolean_t disable_signal) | |
568 | { | |
569 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
0a7de745 | 570 | unsigned int cpu_signal; |
5ba3f43e | 571 | |
5ba3f43e A |
572 | cpu_data_ptr->cpu_stat.ipi_cnt++; |
573 | cpu_data_ptr->cpu_stat.ipi_cnt_wake++; | |
f427ee49 | 574 | SCHED_STATS_INC(ipi_count); |
5ba3f43e | 575 | |
cb323159 | 576 | cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed); |
5ba3f43e | 577 | |
0a7de745 | 578 | if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE)) { |
cb323159 | 579 | os_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed); |
0a7de745 | 580 | } else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE)) { |
cb323159 | 581 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed); |
0a7de745 | 582 | } |
5ba3f43e A |
583 | |
584 | while (cpu_signal & ~SIGPdisabled) { | |
585 | if (cpu_signal & SIGPdec) { | |
cb323159 | 586 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdec, relaxed); |
f427ee49 | 587 | INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_IPI); |
5ba3f43e | 588 | rtclock_intr(FALSE); |
f427ee49 | 589 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e | 590 | } |
5ba3f43e | 591 | #if KPERF |
f427ee49 A |
592 | if (cpu_signal & SIGPkppet) { |
593 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkppet, relaxed); | |
594 | extern void kperf_signal_handler(void); | |
595 | INTERRUPT_MASKED_DEBUG_START(kperf_signal_handler, DBG_INTR_TYPE_IPI); | |
596 | kperf_signal_handler(); | |
597 | INTERRUPT_MASKED_DEBUG_END(); | |
5ba3f43e | 598 | } |
f427ee49 | 599 | #endif /* KPERF */ |
cb323159 | 600 | if (cpu_signal & (SIGPxcall | SIGPxcallImm)) { |
5ba3f43e A |
601 | cpu_handle_xcall(cpu_data_ptr); |
602 | } | |
603 | if (cpu_signal & SIGPast) { | |
cb323159 | 604 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPast, relaxed); |
f427ee49 A |
605 | INTERRUPT_MASKED_DEBUG_START(ast_check, DBG_INTR_TYPE_IPI); |
606 | ast_check(current_processor()); | |
607 | INTERRUPT_MASKED_DEBUG_END(); | |
5ba3f43e A |
608 | } |
609 | if (cpu_signal & SIGPdebug) { | |
cb323159 | 610 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdebug, relaxed); |
f427ee49 | 611 | INTERRUPT_MASKED_DEBUG_START(DebuggerXCall, DBG_INTR_TYPE_IPI); |
5ba3f43e | 612 | DebuggerXCall(cpu_data_ptr->cpu_int_state); |
f427ee49 | 613 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e | 614 | } |
f427ee49 | 615 | #if defined(ARMA7) |
5ba3f43e | 616 | if (cpu_signal & SIGPLWFlush) { |
cb323159 | 617 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWFlush, relaxed); |
f427ee49 | 618 | INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI); |
5ba3f43e | 619 | cache_xcall_handler(LWFlush); |
f427ee49 | 620 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e A |
621 | } |
622 | if (cpu_signal & SIGPLWClean) { | |
cb323159 | 623 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWClean, relaxed); |
f427ee49 | 624 | INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI); |
5ba3f43e | 625 | cache_xcall_handler(LWClean); |
f427ee49 | 626 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e A |
627 | } |
628 | #endif | |
629 | ||
cb323159 | 630 | cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed); |
5ba3f43e A |
631 | } |
632 | } | |
633 | ||
634 | void | |
f427ee49 A |
635 | cpu_exit_wait(int cpu_id) |
636 | { | |
637 | #if USE_APPLEARMSMP | |
638 | if (!ml_is_quiescing()) { | |
639 | // For runtime disable (non S2R) the CPU will shut down immediately. | |
640 | ml_topology_cpu_t *cpu = &ml_get_topology_info()->cpus[cpu_id]; | |
641 | assert(cpu && cpu->cpu_IMPL_regs); | |
642 | volatile uint64_t *cpu_sts = (void *)(cpu->cpu_IMPL_regs + CPU_PIO_CPU_STS_OFFSET); | |
643 | ||
644 | // Poll the "CPU running state" field until it is 0 (off) | |
645 | while ((*cpu_sts & CPU_PIO_CPU_STS_cpuRunSt_mask) != 0x00) { | |
646 | __builtin_arm_dsb(DSB_ISH); | |
647 | } | |
648 | return; | |
649 | } | |
650 | #endif /* USE_APPLEARMSMP */ | |
651 | ||
652 | if (cpu_id != master_cpu) { | |
653 | // For S2R, ml_arm_sleep() will do some extra polling after setting ARM_CPU_ON_SLEEP_PATH. | |
0a7de745 | 654 | cpu_data_t *cpu_data_ptr; |
5ba3f43e | 655 | |
f427ee49 | 656 | cpu_data_ptr = CpuDataEntries[cpu_id].cpu_data_vaddr; |
0a7de745 A |
657 | while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) { |
658 | } | |
659 | ; | |
5ba3f43e A |
660 | } |
661 | } | |
662 | ||
0a7de745 A |
663 | boolean_t |
664 | cpu_can_exit(__unused int cpu) | |
665 | { | |
666 | return TRUE; | |
667 | } | |
668 | ||
5ba3f43e A |
669 | void |
670 | cpu_machine_init(void) | |
671 | { | |
672 | static boolean_t started = FALSE; | |
0a7de745 | 673 | cpu_data_t *cpu_data_ptr; |
5ba3f43e A |
674 | |
675 | cpu_data_ptr = getCpuDatap(); | |
676 | started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState); | |
f427ee49 | 677 | if (cpu_data_ptr->cpu_cache_dispatch != NULL) { |
5ba3f43e | 678 | platform_cache_init(); |
0a7de745 | 679 | } |
cb323159 A |
680 | |
681 | /* Note: this calls IOCPURunPlatformActiveActions when resuming on boot cpu */ | |
5ba3f43e | 682 | PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started); |
cb323159 | 683 | |
5ba3f43e A |
684 | cpu_data_ptr->cpu_flags |= StartedState; |
685 | ml_init_interrupt(); | |
686 | } | |
687 | ||
5ba3f43e A |
688 | processor_t |
689 | current_processor(void) | |
690 | { | |
f427ee49 | 691 | return PERCPU_GET(processor); |
5ba3f43e A |
692 | } |
693 | ||
694 | processor_t | |
695 | cpu_to_processor(int cpu) | |
696 | { | |
697 | cpu_data_t *cpu_data = cpu_datap(cpu); | |
0a7de745 | 698 | if (cpu_data != NULL) { |
f427ee49 | 699 | return PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data); |
0a7de745 | 700 | } else { |
5ba3f43e | 701 | return NULL; |
0a7de745 | 702 | } |
5ba3f43e A |
703 | } |
704 | ||
705 | cpu_data_t * | |
706 | processor_to_cpu_datap(processor_t processor) | |
707 | { | |
f427ee49 | 708 | assert(processor->cpu_id <= ml_get_max_cpu_number()); |
5ba3f43e A |
709 | assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL); |
710 | ||
f427ee49 A |
711 | return PERCPU_GET_RELATIVE(cpu_data, processor, processor); |
712 | } | |
713 | ||
714 | __startup_func | |
715 | static void | |
716 | cpu_data_startup_init(void) | |
717 | { | |
718 | vm_size_t size = percpu_section_size() * (ml_get_cpu_count() - 1); | |
719 | ||
720 | percpu_base.size = percpu_section_size(); | |
721 | if (ml_get_cpu_count() == 1) { | |
722 | percpu_base.start = VM_MAX_KERNEL_ADDRESS; | |
723 | return; | |
724 | } | |
725 | ||
726 | /* | |
727 | * The memory needs to be physically contiguous because it contains | |
728 | * cpu_data_t structures sometimes accessed during reset | |
729 | * with the MMU off. | |
730 | * | |
731 | * kmem_alloc_contig() can't be used early, at the time STARTUP_SUB_PERCPU | |
732 | * normally runs, so we instead steal the memory for the PERCPU subsystem | |
733 | * even earlier. | |
734 | */ | |
735 | percpu_base.start = (vm_offset_t)pmap_steal_memory(round_page(size)); | |
736 | bzero((void *)percpu_base.start, round_page(size)); | |
5ba3f43e | 737 | |
f427ee49 A |
738 | percpu_base.start -= percpu_section_start(); |
739 | percpu_base.end = percpu_base.start + size - 1; | |
740 | percpu_base_cur = percpu_base.start; | |
5ba3f43e | 741 | } |
f427ee49 | 742 | STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, cpu_data_startup_init); |
5ba3f43e | 743 | |
d9a64523 A |
744 | cpu_data_t * |
745 | cpu_data_alloc(boolean_t is_boot_cpu) | |
746 | { | |
f427ee49 A |
747 | cpu_data_t *cpu_data_ptr = NULL; |
748 | vm_address_t base; | |
d9a64523 | 749 | |
0a7de745 | 750 | if (is_boot_cpu) { |
f427ee49 | 751 | cpu_data_ptr = PERCPU_GET_MASTER(cpu_data); |
0a7de745 | 752 | } else { |
f427ee49 A |
753 | base = os_atomic_add_orig(&percpu_base_cur, |
754 | percpu_section_size(), relaxed); | |
d9a64523 | 755 | |
f427ee49 | 756 | cpu_data_ptr = PERCPU_GET_WITH_BASE(base, cpu_data); |
d9a64523 A |
757 | cpu_stack_alloc(cpu_data_ptr); |
758 | } | |
759 | ||
d9a64523 | 760 | return cpu_data_ptr; |
d9a64523 A |
761 | } |
762 | ||
5ba3f43e A |
763 | ast_t * |
764 | ast_pending(void) | |
765 | { | |
0a7de745 | 766 | return &getCpuDatap()->cpu_pending_ast; |
5ba3f43e A |
767 | } |
768 | ||
769 | cpu_type_t | |
770 | slot_type(int slot_num) | |
771 | { | |
0a7de745 | 772 | return cpu_datap(slot_num)->cpu_type; |
5ba3f43e A |
773 | } |
774 | ||
775 | cpu_subtype_t | |
776 | slot_subtype(int slot_num) | |
777 | { | |
0a7de745 | 778 | return cpu_datap(slot_num)->cpu_subtype; |
5ba3f43e A |
779 | } |
780 | ||
781 | cpu_threadtype_t | |
782 | slot_threadtype(int slot_num) | |
783 | { | |
0a7de745 | 784 | return cpu_datap(slot_num)->cpu_threadtype; |
5ba3f43e A |
785 | } |
786 | ||
787 | cpu_type_t | |
788 | cpu_type(void) | |
789 | { | |
0a7de745 | 790 | return getCpuDatap()->cpu_type; |
5ba3f43e A |
791 | } |
792 | ||
793 | cpu_subtype_t | |
794 | cpu_subtype(void) | |
795 | { | |
0a7de745 | 796 | return getCpuDatap()->cpu_subtype; |
5ba3f43e A |
797 | } |
798 | ||
799 | cpu_threadtype_t | |
800 | cpu_threadtype(void) | |
801 | { | |
0a7de745 | 802 | return getCpuDatap()->cpu_threadtype; |
5ba3f43e A |
803 | } |
804 | ||
805 | int | |
806 | cpu_number(void) | |
807 | { | |
0a7de745 | 808 | return getCpuDatap()->cpu_number; |
5ba3f43e A |
809 | } |
810 | ||
f427ee49 A |
811 | vm_offset_t |
812 | current_percpu_base(void) | |
813 | { | |
814 | return current_thread()->machine.pcpu_data_base; | |
815 | } | |
816 | ||
5ba3f43e A |
817 | uint64_t |
818 | ml_get_wake_timebase(void) | |
819 | { | |
820 | return wake_abstime; | |
821 | } | |
f427ee49 A |
822 | |
823 | bool | |
824 | ml_cpu_signal_is_enabled(void) | |
825 | { | |
826 | return !(getCpuDatap()->cpu_signal & SIGPdisabled); | |
827 | } | |
828 | ||
829 | bool | |
830 | ml_cpu_can_exit(__unused int cpu_id) | |
831 | { | |
832 | /* processor_exit() is always allowed on the S2R path */ | |
833 | if (ml_is_quiescing()) { | |
834 | return true; | |
835 | } | |
836 | #if HAS_CLUSTER && USE_APPLEARMSMP | |
837 | /* | |
838 | * Cyprus and newer chips can disable individual non-boot CPUs. The | |
839 | * implementation polls cpuX_IMPL_CPU_STS, which differs on older chips. | |
840 | */ | |
841 | if (CpuDataEntries[cpu_id].cpu_data_vaddr != &BootCpuData) { | |
842 | return true; | |
843 | } | |
844 | #endif | |
845 | return false; | |
846 | } | |
847 | ||
f427ee49 A |
848 | #ifdef USE_APPLEARMSMP |
849 | ||
850 | void | |
851 | ml_cpu_begin_state_transition(int cpu_id) | |
852 | { | |
853 | lck_rw_lock_exclusive(&cpu_state_lock); | |
854 | CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = true; | |
855 | lck_rw_unlock_exclusive(&cpu_state_lock); | |
856 | } | |
857 | ||
858 | void | |
859 | ml_cpu_end_state_transition(int cpu_id) | |
860 | { | |
861 | lck_rw_lock_exclusive(&cpu_state_lock); | |
862 | CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = false; | |
863 | lck_rw_unlock_exclusive(&cpu_state_lock); | |
864 | } | |
865 | ||
866 | void | |
867 | ml_cpu_begin_loop(void) | |
868 | { | |
869 | lck_rw_lock_shared(&cpu_state_lock); | |
870 | } | |
871 | ||
872 | void | |
873 | ml_cpu_end_loop(void) | |
874 | { | |
875 | lck_rw_unlock_shared(&cpu_state_lock); | |
876 | } | |
877 | ||
878 | #else /* USE_APPLEARMSMP */ | |
879 | ||
880 | void | |
881 | ml_cpu_begin_state_transition(__unused int cpu_id) | |
882 | { | |
883 | } | |
884 | ||
885 | void | |
886 | ml_cpu_end_state_transition(__unused int cpu_id) | |
887 | { | |
888 | } | |
889 | ||
890 | void | |
891 | ml_cpu_begin_loop(void) | |
892 | { | |
893 | } | |
894 | ||
895 | void | |
896 | ml_cpu_end_loop(void) | |
897 | { | |
898 | } | |
899 | ||
900 | #endif /* USE_APPLEARMSMP */ |