]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
0a7de745 | 2 | * Copyright (c) 2017-2019 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * File: arm/cpu_common.c | |
30 | * | |
31 | * cpu routines common to all supported arm variants | |
32 | */ | |
33 | ||
5ba3f43e A |
34 | #include <kern/machine.h> |
35 | #include <kern/cpu_number.h> | |
36 | #include <kern/thread.h> | |
f427ee49 | 37 | #include <kern/percpu.h> |
5ba3f43e | 38 | #include <kern/timer_queue.h> |
f427ee49 | 39 | #include <kern/locks.h> |
5ba3f43e A |
40 | #include <arm/cpu_data.h> |
41 | #include <arm/cpuid.h> | |
42 | #include <arm/caches_internal.h> | |
43 | #include <arm/cpu_data_internal.h> | |
44 | #include <arm/cpu_internal.h> | |
45 | #include <arm/misc_protos.h> | |
46 | #include <arm/machine_cpu.h> | |
47 | #include <arm/rtclock.h> | |
48 | #include <mach/processor_info.h> | |
49 | #include <machine/atomic.h> | |
50 | #include <machine/config.h> | |
51 | #include <vm/vm_kern.h> | |
52 | #include <vm/vm_map.h> | |
53 | #include <pexpert/arm/protos.h> | |
54 | #include <pexpert/device_tree.h> | |
55 | #include <sys/kdebug.h> | |
56 | #include <arm/machine_routines.h> | |
f427ee49 | 57 | #include <arm/proc_reg.h> |
5ba3f43e | 58 | #include <libkern/OSAtomic.h> |
5ba3f43e | 59 | |
f427ee49 A |
60 | SECURITY_READ_ONLY_LATE(struct percpu_base) percpu_base; |
61 | vm_address_t percpu_base_cur; | |
62 | cpu_data_t PERCPU_DATA(cpu_data); | |
0a7de745 A |
63 | cpu_data_entry_t CpuDataEntries[MAX_CPUS]; |
64 | ||
f427ee49 A |
65 | static lck_grp_t cpu_lck_grp; |
66 | static lck_rw_t cpu_state_lock; | |
5ba3f43e | 67 | |
0a7de745 A |
68 | unsigned int real_ncpus = 1; |
69 | boolean_t idle_enable = FALSE; | |
70 | uint64_t wake_abstime = 0x0ULL; | |
5ba3f43e | 71 | |
c6bf4f31 A |
72 | #if defined(HAS_IPI) |
73 | extern unsigned int gFastIPI; | |
74 | #endif /* defined(HAS_IPI) */ | |
5ba3f43e A |
75 | |
76 | cpu_data_t * | |
77 | cpu_datap(int cpu) | |
78 | { | |
f427ee49 | 79 | assert(cpu <= ml_get_max_cpu_number()); |
0a7de745 | 80 | return CpuDataEntries[cpu].cpu_data_vaddr; |
5ba3f43e A |
81 | } |
82 | ||
83 | kern_return_t | |
84 | cpu_control(int slot_num, | |
0a7de745 A |
85 | processor_info_t info, |
86 | unsigned int count) | |
5ba3f43e A |
87 | { |
88 | printf("cpu_control(%d,%p,%d) not implemented\n", | |
0a7de745 A |
89 | slot_num, info, count); |
90 | return KERN_FAILURE; | |
5ba3f43e A |
91 | } |
92 | ||
93 | kern_return_t | |
94 | cpu_info_count(processor_flavor_t flavor, | |
0a7de745 | 95 | unsigned int *count) |
5ba3f43e | 96 | { |
5ba3f43e A |
97 | switch (flavor) { |
98 | case PROCESSOR_CPU_STAT: | |
99 | *count = PROCESSOR_CPU_STAT_COUNT; | |
0a7de745 A |
100 | return KERN_SUCCESS; |
101 | ||
102 | case PROCESSOR_CPU_STAT64: | |
103 | *count = PROCESSOR_CPU_STAT64_COUNT; | |
104 | return KERN_SUCCESS; | |
5ba3f43e A |
105 | |
106 | default: | |
107 | *count = 0; | |
0a7de745 | 108 | return KERN_FAILURE; |
5ba3f43e A |
109 | } |
110 | } | |
111 | ||
112 | kern_return_t | |
0a7de745 A |
113 | cpu_info(processor_flavor_t flavor, int slot_num, processor_info_t info, |
114 | unsigned int *count) | |
5ba3f43e | 115 | { |
0a7de745 A |
116 | cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr; |
117 | ||
5ba3f43e A |
118 | switch (flavor) { |
119 | case PROCESSOR_CPU_STAT: | |
0a7de745 A |
120 | { |
121 | if (*count < PROCESSOR_CPU_STAT_COUNT) { | |
122 | return KERN_FAILURE; | |
123 | } | |
124 | ||
125 | processor_cpu_stat_t cpu_stat = (processor_cpu_stat_t)info; | |
126 | cpu_stat->irq_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.irq_ex_cnt; | |
127 | cpu_stat->ipi_cnt = (uint32_t)cpu_data_ptr->cpu_stat.ipi_cnt; | |
128 | cpu_stat->timer_cnt = (uint32_t)cpu_data_ptr->cpu_stat.timer_cnt; | |
129 | cpu_stat->undef_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.undef_ex_cnt; | |
130 | cpu_stat->unaligned_cnt = (uint32_t)cpu_data_ptr->cpu_stat.unaligned_cnt; | |
131 | cpu_stat->vfp_cnt = (uint32_t)cpu_data_ptr->cpu_stat.vfp_cnt; | |
132 | cpu_stat->vfp_shortv_cnt = 0; | |
133 | cpu_stat->data_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.data_ex_cnt; | |
134 | cpu_stat->instr_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.instr_ex_cnt; | |
135 | ||
136 | *count = PROCESSOR_CPU_STAT_COUNT; | |
137 | ||
138 | return KERN_SUCCESS; | |
139 | } | |
140 | ||
141 | case PROCESSOR_CPU_STAT64: | |
142 | { | |
143 | if (*count < PROCESSOR_CPU_STAT64_COUNT) { | |
144 | return KERN_FAILURE; | |
5ba3f43e A |
145 | } |
146 | ||
0a7de745 A |
147 | processor_cpu_stat64_t cpu_stat = (processor_cpu_stat64_t)info; |
148 | cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt; | |
149 | cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt; | |
150 | cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt; | |
151 | cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt; | |
152 | cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt; | |
153 | cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt; | |
154 | cpu_stat->vfp_shortv_cnt = 0; | |
155 | cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt; | |
156 | cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt; | |
cb323159 A |
157 | #if MONOTONIC |
158 | cpu_stat->pmi_cnt = cpu_data_ptr->cpu_monotonic.mtc_npmis; | |
159 | #endif /* MONOTONIC */ | |
0a7de745 A |
160 | |
161 | *count = PROCESSOR_CPU_STAT64_COUNT; | |
162 | ||
163 | return KERN_SUCCESS; | |
164 | } | |
165 | ||
5ba3f43e | 166 | default: |
0a7de745 | 167 | return KERN_FAILURE; |
5ba3f43e A |
168 | } |
169 | } | |
170 | ||
171 | /* | |
172 | * Routine: cpu_doshutdown | |
173 | * Function: | |
174 | */ | |
175 | void | |
0a7de745 A |
176 | cpu_doshutdown(void (*doshutdown)(processor_t), |
177 | processor_t processor) | |
5ba3f43e A |
178 | { |
179 | doshutdown(processor); | |
180 | } | |
181 | ||
182 | /* | |
183 | * Routine: cpu_idle_tickle | |
184 | * | |
185 | */ | |
186 | void | |
187 | cpu_idle_tickle(void) | |
188 | { | |
0a7de745 A |
189 | boolean_t intr; |
190 | cpu_data_t *cpu_data_ptr; | |
191 | uint64_t new_idle_timeout_ticks = 0x0ULL; | |
5ba3f43e A |
192 | |
193 | intr = ml_set_interrupts_enabled(FALSE); | |
194 | cpu_data_ptr = getCpuDatap(); | |
195 | ||
f427ee49 A |
196 | if (cpu_data_ptr->idle_timer_notify != NULL) { |
197 | cpu_data_ptr->idle_timer_notify(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks); | |
5ba3f43e A |
198 | if (new_idle_timeout_ticks != 0x0ULL) { |
199 | /* if a new idle timeout was requested set the new idle timer deadline */ | |
200 | clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); | |
201 | } else { | |
202 | /* turn off the idle timer */ | |
203 | cpu_data_ptr->idle_timer_deadline = 0x0ULL; | |
204 | } | |
205 | timer_resync_deadlines(); | |
206 | } | |
207 | (void) ml_set_interrupts_enabled(intr); | |
208 | } | |
209 | ||
210 | static void | |
211 | cpu_handle_xcall(cpu_data_t *cpu_data_ptr) | |
212 | { | |
0a7de745 A |
213 | broadcastFunc xfunc; |
214 | void *xparam; | |
5ba3f43e | 215 | |
cb323159 | 216 | os_atomic_thread_fence(acquire); |
5ba3f43e | 217 | /* Come back around if cpu_signal_internal is running on another CPU and has just |
0a7de745 | 218 | * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/ |
5ba3f43e A |
219 | if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) { |
220 | xfunc = cpu_data_ptr->cpu_xcall_p0; | |
f427ee49 | 221 | INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI); |
5ba3f43e A |
222 | xparam = cpu_data_ptr->cpu_xcall_p1; |
223 | cpu_data_ptr->cpu_xcall_p0 = NULL; | |
224 | cpu_data_ptr->cpu_xcall_p1 = NULL; | |
cb323159 A |
225 | os_atomic_thread_fence(acq_rel); |
226 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcall, relaxed); | |
227 | xfunc(xparam); | |
f427ee49 | 228 | INTERRUPT_MASKED_DEBUG_END(); |
cb323159 A |
229 | } |
230 | if (cpu_data_ptr->cpu_imm_xcall_p0 != NULL && cpu_data_ptr->cpu_imm_xcall_p1 != NULL) { | |
231 | xfunc = cpu_data_ptr->cpu_imm_xcall_p0; | |
f427ee49 | 232 | INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI); |
cb323159 A |
233 | xparam = cpu_data_ptr->cpu_imm_xcall_p1; |
234 | cpu_data_ptr->cpu_imm_xcall_p0 = NULL; | |
235 | cpu_data_ptr->cpu_imm_xcall_p1 = NULL; | |
236 | os_atomic_thread_fence(acq_rel); | |
237 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcallImm, relaxed); | |
5ba3f43e | 238 | xfunc(xparam); |
f427ee49 | 239 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e | 240 | } |
5ba3f43e A |
241 | } |
242 | ||
cb323159 A |
243 | static unsigned int |
244 | cpu_broadcast_xcall_internal(unsigned int signal, | |
245 | uint32_t *synch, | |
0a7de745 A |
246 | boolean_t self_xcall, |
247 | broadcastFunc func, | |
248 | void *parm) | |
5ba3f43e | 249 | { |
0a7de745 A |
250 | boolean_t intr; |
251 | cpu_data_t *cpu_data_ptr; | |
252 | cpu_data_t *target_cpu_datap; | |
253 | unsigned int failsig; | |
254 | int cpu; | |
cb323159 | 255 | int max_cpu = ml_get_max_cpu_number() + 1; |
5ba3f43e | 256 | |
f427ee49 A |
257 | //yes, param ALSO cannot be NULL |
258 | assert(func); | |
259 | assert(parm); | |
260 | ||
5ba3f43e A |
261 | intr = ml_set_interrupts_enabled(FALSE); |
262 | cpu_data_ptr = getCpuDatap(); | |
263 | ||
264 | failsig = 0; | |
265 | ||
266 | if (synch != NULL) { | |
cb323159 | 267 | *synch = max_cpu; |
5ba3f43e A |
268 | assert_wait((event_t)synch, THREAD_UNINT); |
269 | } | |
270 | ||
cb323159 | 271 | for (cpu = 0; cpu < max_cpu; cpu++) { |
5ba3f43e A |
272 | target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
273 | ||
cb323159 | 274 | if (target_cpu_datap == cpu_data_ptr) { |
5ba3f43e | 275 | continue; |
0a7de745 | 276 | } |
5ba3f43e | 277 | |
cb323159 A |
278 | if ((target_cpu_datap == NULL) || |
279 | KERN_SUCCESS != cpu_signal(target_cpu_datap, signal, (void *)func, parm)) { | |
5ba3f43e A |
280 | failsig++; |
281 | } | |
282 | } | |
283 | ||
284 | ||
285 | if (self_xcall) { | |
286 | func(parm); | |
287 | } | |
288 | ||
289 | (void) ml_set_interrupts_enabled(intr); | |
290 | ||
291 | if (synch != NULL) { | |
cb323159 | 292 | if (os_atomic_sub(synch, (!self_xcall) ? failsig + 1 : failsig, relaxed) == 0) { |
5ba3f43e | 293 | clear_wait(current_thread(), THREAD_AWAKENED); |
0a7de745 | 294 | } else { |
5ba3f43e | 295 | thread_block(THREAD_CONTINUE_NULL); |
0a7de745 | 296 | } |
5ba3f43e A |
297 | } |
298 | ||
0a7de745 | 299 | if (!self_xcall) { |
cb323159 | 300 | return max_cpu - failsig - 1; |
0a7de745 | 301 | } else { |
cb323159 | 302 | return max_cpu - failsig; |
0a7de745 | 303 | } |
5ba3f43e A |
304 | } |
305 | ||
cb323159 A |
306 | unsigned int |
307 | cpu_broadcast_xcall(uint32_t *synch, | |
308 | boolean_t self_xcall, | |
309 | broadcastFunc func, | |
310 | void *parm) | |
311 | { | |
312 | return cpu_broadcast_xcall_internal(SIGPxcall, synch, self_xcall, func, parm); | |
313 | } | |
314 | ||
f427ee49 A |
315 | struct cpu_broadcast_xcall_simple_data { |
316 | broadcastFunc func; | |
317 | void* parm; | |
318 | uint32_t sync; | |
319 | }; | |
320 | ||
321 | static void | |
322 | cpu_broadcast_xcall_simple_cbk(void *parm) | |
323 | { | |
324 | struct cpu_broadcast_xcall_simple_data *data = (struct cpu_broadcast_xcall_simple_data*)parm; | |
325 | ||
326 | data->func(data->parm); | |
327 | ||
328 | if (os_atomic_dec(&data->sync, relaxed) == 0) { | |
329 | thread_wakeup((event_t)&data->sync); | |
330 | } | |
331 | } | |
332 | ||
333 | static unsigned int | |
334 | cpu_xcall_simple(boolean_t self_xcall, | |
335 | broadcastFunc func, | |
336 | void *parm, | |
337 | bool immediate) | |
338 | { | |
339 | struct cpu_broadcast_xcall_simple_data data = {}; | |
340 | ||
341 | data.func = func; | |
342 | data.parm = parm; | |
343 | ||
344 | return cpu_broadcast_xcall_internal(immediate ? SIGPxcallImm : SIGPxcall, &data.sync, self_xcall, cpu_broadcast_xcall_simple_cbk, &data); | |
345 | } | |
346 | ||
cb323159 A |
347 | unsigned int |
348 | cpu_broadcast_immediate_xcall(uint32_t *synch, | |
349 | boolean_t self_xcall, | |
350 | broadcastFunc func, | |
351 | void *parm) | |
352 | { | |
353 | return cpu_broadcast_xcall_internal(SIGPxcallImm, synch, self_xcall, func, parm); | |
354 | } | |
355 | ||
f427ee49 A |
356 | unsigned int |
357 | cpu_broadcast_xcall_simple(boolean_t self_xcall, | |
358 | broadcastFunc func, | |
359 | void *parm) | |
360 | { | |
361 | return cpu_xcall_simple(self_xcall, func, parm, false); | |
362 | } | |
363 | ||
364 | unsigned int | |
365 | cpu_broadcast_immediate_xcall_simple(boolean_t self_xcall, | |
366 | broadcastFunc func, | |
367 | void *parm) | |
368 | { | |
369 | return cpu_xcall_simple(self_xcall, func, parm, true); | |
370 | } | |
371 | ||
cb323159 A |
372 | static kern_return_t |
373 | cpu_xcall_internal(unsigned int signal, int cpu_number, broadcastFunc func, void *param) | |
5ba3f43e | 374 | { |
0a7de745 | 375 | cpu_data_t *target_cpu_datap; |
5ba3f43e | 376 | |
0a7de745 | 377 | if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number())) { |
5ba3f43e | 378 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 379 | } |
5ba3f43e | 380 | |
cb323159 A |
381 | if (func == NULL || param == NULL) { |
382 | return KERN_INVALID_ARGUMENT; | |
383 | } | |
384 | ||
0a7de745 A |
385 | target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr; |
386 | if (target_cpu_datap == NULL) { | |
5ba3f43e | 387 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 388 | } |
5ba3f43e | 389 | |
cb323159 A |
390 | return cpu_signal(target_cpu_datap, signal, (void*)func, param); |
391 | } | |
392 | ||
393 | kern_return_t | |
394 | cpu_xcall(int cpu_number, broadcastFunc func, void *param) | |
395 | { | |
396 | return cpu_xcall_internal(SIGPxcall, cpu_number, func, param); | |
397 | } | |
398 | ||
399 | kern_return_t | |
400 | cpu_immediate_xcall(int cpu_number, broadcastFunc func, void *param) | |
401 | { | |
402 | return cpu_xcall_internal(SIGPxcallImm, cpu_number, func, param); | |
5ba3f43e A |
403 | } |
404 | ||
405 | static kern_return_t | |
406 | cpu_signal_internal(cpu_data_t *target_proc, | |
0a7de745 A |
407 | unsigned int signal, |
408 | void *p0, | |
409 | void *p1, | |
410 | boolean_t defer) | |
5ba3f43e | 411 | { |
0a7de745 A |
412 | unsigned int Check_SIGPdisabled; |
413 | int current_signals; | |
414 | Boolean swap_success; | |
415 | boolean_t interruptible = ml_set_interrupts_enabled(FALSE); | |
416 | cpu_data_t *current_proc = getCpuDatap(); | |
5ba3f43e A |
417 | |
418 | /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */ | |
419 | if (defer) { | |
420 | assert(signal == SIGPnop); | |
421 | } | |
422 | ||
0a7de745 | 423 | if (current_proc != target_proc) { |
5ba3f43e | 424 | Check_SIGPdisabled = SIGPdisabled; |
0a7de745 | 425 | } else { |
5ba3f43e | 426 | Check_SIGPdisabled = 0; |
0a7de745 | 427 | } |
5ba3f43e | 428 | |
cb323159 | 429 | if ((signal == SIGPxcall) || (signal == SIGPxcallImm)) { |
5ba3f43e A |
430 | do { |
431 | current_signals = target_proc->cpu_signal; | |
432 | if ((current_signals & SIGPdisabled) == SIGPdisabled) { | |
5ba3f43e A |
433 | ml_set_interrupts_enabled(interruptible); |
434 | return KERN_FAILURE; | |
435 | } | |
cb323159 | 436 | swap_success = OSCompareAndSwap(current_signals & (~signal), current_signals | signal, |
0a7de745 | 437 | &target_proc->cpu_signal); |
5ba3f43e | 438 | |
cb323159 A |
439 | if (!swap_success && (signal == SIGPxcallImm) && (target_proc->cpu_signal & SIGPxcallImm)) { |
440 | ml_set_interrupts_enabled(interruptible); | |
441 | return KERN_ALREADY_WAITING; | |
442 | } | |
443 | ||
5ba3f43e A |
444 | /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn |
445 | * be trying to xcall us. Since we have interrupts disabled that can deadlock, | |
446 | * so break the deadlock by draining pending xcalls. */ | |
cb323159 | 447 | if (!swap_success && (current_proc->cpu_signal & signal)) { |
5ba3f43e | 448 | cpu_handle_xcall(current_proc); |
0a7de745 | 449 | } |
5ba3f43e A |
450 | } while (!swap_success); |
451 | ||
cb323159 A |
452 | if (signal == SIGPxcallImm) { |
453 | target_proc->cpu_imm_xcall_p0 = p0; | |
454 | target_proc->cpu_imm_xcall_p1 = p1; | |
455 | } else { | |
456 | target_proc->cpu_xcall_p0 = p0; | |
457 | target_proc->cpu_xcall_p1 = p1; | |
458 | } | |
5ba3f43e A |
459 | } else { |
460 | do { | |
461 | current_signals = target_proc->cpu_signal; | |
0a7de745 | 462 | if ((Check_SIGPdisabled != 0) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) { |
5ba3f43e A |
463 | ml_set_interrupts_enabled(interruptible); |
464 | return KERN_FAILURE; | |
465 | } | |
466 | ||
467 | swap_success = OSCompareAndSwap(current_signals, current_signals | signal, | |
0a7de745 | 468 | &target_proc->cpu_signal); |
5ba3f43e A |
469 | } while (!swap_success); |
470 | } | |
471 | ||
472 | /* | |
473 | * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params | |
474 | * will be visible to other cores when the IPI is dispatched, and 2) subsequent | |
475 | * instructions to signal the other cores will not execute until after the barrier. | |
476 | * DMB would be sufficient to guarantee 1) but not 2). | |
477 | */ | |
478 | __builtin_arm_dsb(DSB_ISH); | |
479 | ||
480 | if (!(target_proc->cpu_signal & SIGPdisabled)) { | |
481 | if (defer) { | |
c6bf4f31 A |
482 | #if defined(HAS_IPI) |
483 | if (gFastIPI) { | |
484 | ml_cpu_signal_deferred(target_proc->cpu_phys_id); | |
485 | } else { | |
486 | PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id); | |
487 | } | |
488 | #else | |
5ba3f43e | 489 | PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id); |
c6bf4f31 | 490 | #endif /* defined(HAS_IPI) */ |
5ba3f43e | 491 | } else { |
c6bf4f31 A |
492 | #if defined(HAS_IPI) |
493 | if (gFastIPI) { | |
494 | ml_cpu_signal(target_proc->cpu_phys_id); | |
495 | } else { | |
496 | PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id); | |
497 | } | |
498 | #else | |
5ba3f43e | 499 | PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id); |
c6bf4f31 | 500 | #endif /* defined(HAS_IPI) */ |
5ba3f43e A |
501 | } |
502 | } | |
503 | ||
504 | ml_set_interrupts_enabled(interruptible); | |
0a7de745 | 505 | return KERN_SUCCESS; |
5ba3f43e A |
506 | } |
507 | ||
508 | kern_return_t | |
509 | cpu_signal(cpu_data_t *target_proc, | |
0a7de745 A |
510 | unsigned int signal, |
511 | void *p0, | |
512 | void *p1) | |
5ba3f43e A |
513 | { |
514 | return cpu_signal_internal(target_proc, signal, p0, p1, FALSE); | |
515 | } | |
516 | ||
517 | kern_return_t | |
518 | cpu_signal_deferred(cpu_data_t *target_proc) | |
519 | { | |
520 | return cpu_signal_internal(target_proc, SIGPnop, NULL, NULL, TRUE); | |
521 | } | |
522 | ||
523 | void | |
524 | cpu_signal_cancel(cpu_data_t *target_proc) | |
525 | { | |
526 | /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */ | |
527 | if (!(target_proc->cpu_signal & SIGPdisabled)) { | |
c6bf4f31 A |
528 | #if defined(HAS_IPI) |
529 | if (gFastIPI) { | |
530 | ml_cpu_signal_retract(target_proc->cpu_phys_id); | |
531 | } else { | |
532 | PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id); | |
533 | } | |
534 | #else | |
5ba3f43e | 535 | PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id); |
c6bf4f31 | 536 | #endif /* defined(HAS_IPI) */ |
5ba3f43e A |
537 | } |
538 | } | |
539 | ||
540 | void | |
541 | cpu_signal_handler(void) | |
542 | { | |
543 | cpu_signal_handler_internal(FALSE); | |
544 | } | |
545 | ||
546 | void | |
547 | cpu_signal_handler_internal(boolean_t disable_signal) | |
548 | { | |
549 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
0a7de745 | 550 | unsigned int cpu_signal; |
5ba3f43e | 551 | |
5ba3f43e A |
552 | cpu_data_ptr->cpu_stat.ipi_cnt++; |
553 | cpu_data_ptr->cpu_stat.ipi_cnt_wake++; | |
f427ee49 | 554 | SCHED_STATS_INC(ipi_count); |
5ba3f43e | 555 | |
cb323159 | 556 | cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed); |
5ba3f43e | 557 | |
0a7de745 | 558 | if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE)) { |
cb323159 | 559 | os_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed); |
0a7de745 | 560 | } else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE)) { |
cb323159 | 561 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed); |
0a7de745 | 562 | } |
5ba3f43e A |
563 | |
564 | while (cpu_signal & ~SIGPdisabled) { | |
565 | if (cpu_signal & SIGPdec) { | |
cb323159 | 566 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdec, relaxed); |
f427ee49 | 567 | INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_IPI); |
5ba3f43e | 568 | rtclock_intr(FALSE); |
f427ee49 | 569 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e | 570 | } |
5ba3f43e | 571 | #if KPERF |
f427ee49 A |
572 | if (cpu_signal & SIGPkppet) { |
573 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkppet, relaxed); | |
574 | extern void kperf_signal_handler(void); | |
575 | INTERRUPT_MASKED_DEBUG_START(kperf_signal_handler, DBG_INTR_TYPE_IPI); | |
576 | kperf_signal_handler(); | |
577 | INTERRUPT_MASKED_DEBUG_END(); | |
5ba3f43e | 578 | } |
f427ee49 | 579 | #endif /* KPERF */ |
cb323159 | 580 | if (cpu_signal & (SIGPxcall | SIGPxcallImm)) { |
5ba3f43e A |
581 | cpu_handle_xcall(cpu_data_ptr); |
582 | } | |
583 | if (cpu_signal & SIGPast) { | |
cb323159 | 584 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPast, relaxed); |
f427ee49 A |
585 | INTERRUPT_MASKED_DEBUG_START(ast_check, DBG_INTR_TYPE_IPI); |
586 | ast_check(current_processor()); | |
587 | INTERRUPT_MASKED_DEBUG_END(); | |
5ba3f43e A |
588 | } |
589 | if (cpu_signal & SIGPdebug) { | |
cb323159 | 590 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdebug, relaxed); |
f427ee49 | 591 | INTERRUPT_MASKED_DEBUG_START(DebuggerXCall, DBG_INTR_TYPE_IPI); |
5ba3f43e | 592 | DebuggerXCall(cpu_data_ptr->cpu_int_state); |
f427ee49 | 593 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e | 594 | } |
f427ee49 | 595 | #if defined(ARMA7) |
5ba3f43e | 596 | if (cpu_signal & SIGPLWFlush) { |
cb323159 | 597 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWFlush, relaxed); |
f427ee49 | 598 | INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI); |
5ba3f43e | 599 | cache_xcall_handler(LWFlush); |
f427ee49 | 600 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e A |
601 | } |
602 | if (cpu_signal & SIGPLWClean) { | |
cb323159 | 603 | os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWClean, relaxed); |
f427ee49 | 604 | INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI); |
5ba3f43e | 605 | cache_xcall_handler(LWClean); |
f427ee49 | 606 | INTERRUPT_MASKED_DEBUG_END(); |
5ba3f43e A |
607 | } |
608 | #endif | |
609 | ||
cb323159 | 610 | cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed); |
5ba3f43e A |
611 | } |
612 | } | |
613 | ||
614 | void | |
f427ee49 A |
615 | cpu_exit_wait(int cpu_id) |
616 | { | |
617 | #if USE_APPLEARMSMP | |
618 | if (!ml_is_quiescing()) { | |
619 | // For runtime disable (non S2R) the CPU will shut down immediately. | |
620 | ml_topology_cpu_t *cpu = &ml_get_topology_info()->cpus[cpu_id]; | |
621 | assert(cpu && cpu->cpu_IMPL_regs); | |
622 | volatile uint64_t *cpu_sts = (void *)(cpu->cpu_IMPL_regs + CPU_PIO_CPU_STS_OFFSET); | |
623 | ||
624 | // Poll the "CPU running state" field until it is 0 (off) | |
625 | while ((*cpu_sts & CPU_PIO_CPU_STS_cpuRunSt_mask) != 0x00) { | |
626 | __builtin_arm_dsb(DSB_ISH); | |
627 | } | |
628 | return; | |
629 | } | |
630 | #endif /* USE_APPLEARMSMP */ | |
631 | ||
632 | if (cpu_id != master_cpu) { | |
633 | // For S2R, ml_arm_sleep() will do some extra polling after setting ARM_CPU_ON_SLEEP_PATH. | |
0a7de745 | 634 | cpu_data_t *cpu_data_ptr; |
5ba3f43e | 635 | |
f427ee49 | 636 | cpu_data_ptr = CpuDataEntries[cpu_id].cpu_data_vaddr; |
0a7de745 A |
637 | while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) { |
638 | } | |
639 | ; | |
5ba3f43e A |
640 | } |
641 | } | |
642 | ||
0a7de745 A |
643 | boolean_t |
644 | cpu_can_exit(__unused int cpu) | |
645 | { | |
646 | return TRUE; | |
647 | } | |
648 | ||
5ba3f43e A |
649 | void |
650 | cpu_machine_init(void) | |
651 | { | |
652 | static boolean_t started = FALSE; | |
0a7de745 | 653 | cpu_data_t *cpu_data_ptr; |
5ba3f43e A |
654 | |
655 | cpu_data_ptr = getCpuDatap(); | |
656 | started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState); | |
f427ee49 | 657 | if (cpu_data_ptr->cpu_cache_dispatch != NULL) { |
5ba3f43e | 658 | platform_cache_init(); |
0a7de745 | 659 | } |
cb323159 A |
660 | |
661 | /* Note: this calls IOCPURunPlatformActiveActions when resuming on boot cpu */ | |
5ba3f43e | 662 | PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started); |
cb323159 | 663 | |
5ba3f43e A |
664 | cpu_data_ptr->cpu_flags |= StartedState; |
665 | ml_init_interrupt(); | |
666 | } | |
667 | ||
5ba3f43e A |
668 | processor_t |
669 | current_processor(void) | |
670 | { | |
f427ee49 | 671 | return PERCPU_GET(processor); |
5ba3f43e A |
672 | } |
673 | ||
674 | processor_t | |
675 | cpu_to_processor(int cpu) | |
676 | { | |
677 | cpu_data_t *cpu_data = cpu_datap(cpu); | |
0a7de745 | 678 | if (cpu_data != NULL) { |
f427ee49 | 679 | return PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data); |
0a7de745 | 680 | } else { |
5ba3f43e | 681 | return NULL; |
0a7de745 | 682 | } |
5ba3f43e A |
683 | } |
684 | ||
685 | cpu_data_t * | |
686 | processor_to_cpu_datap(processor_t processor) | |
687 | { | |
f427ee49 | 688 | assert(processor->cpu_id <= ml_get_max_cpu_number()); |
5ba3f43e A |
689 | assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL); |
690 | ||
f427ee49 A |
691 | return PERCPU_GET_RELATIVE(cpu_data, processor, processor); |
692 | } | |
693 | ||
694 | __startup_func | |
695 | static void | |
696 | cpu_data_startup_init(void) | |
697 | { | |
698 | vm_size_t size = percpu_section_size() * (ml_get_cpu_count() - 1); | |
699 | ||
700 | percpu_base.size = percpu_section_size(); | |
701 | if (ml_get_cpu_count() == 1) { | |
702 | percpu_base.start = VM_MAX_KERNEL_ADDRESS; | |
703 | return; | |
704 | } | |
705 | ||
706 | /* | |
707 | * The memory needs to be physically contiguous because it contains | |
708 | * cpu_data_t structures sometimes accessed during reset | |
709 | * with the MMU off. | |
710 | * | |
711 | * kmem_alloc_contig() can't be used early, at the time STARTUP_SUB_PERCPU | |
712 | * normally runs, so we instead steal the memory for the PERCPU subsystem | |
713 | * even earlier. | |
714 | */ | |
715 | percpu_base.start = (vm_offset_t)pmap_steal_memory(round_page(size)); | |
716 | bzero((void *)percpu_base.start, round_page(size)); | |
5ba3f43e | 717 | |
f427ee49 A |
718 | percpu_base.start -= percpu_section_start(); |
719 | percpu_base.end = percpu_base.start + size - 1; | |
720 | percpu_base_cur = percpu_base.start; | |
5ba3f43e | 721 | } |
f427ee49 | 722 | STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, cpu_data_startup_init); |
5ba3f43e | 723 | |
d9a64523 A |
724 | cpu_data_t * |
725 | cpu_data_alloc(boolean_t is_boot_cpu) | |
726 | { | |
f427ee49 A |
727 | cpu_data_t *cpu_data_ptr = NULL; |
728 | vm_address_t base; | |
d9a64523 | 729 | |
0a7de745 | 730 | if (is_boot_cpu) { |
f427ee49 | 731 | cpu_data_ptr = PERCPU_GET_MASTER(cpu_data); |
0a7de745 | 732 | } else { |
f427ee49 A |
733 | base = os_atomic_add_orig(&percpu_base_cur, |
734 | percpu_section_size(), relaxed); | |
d9a64523 | 735 | |
f427ee49 | 736 | cpu_data_ptr = PERCPU_GET_WITH_BASE(base, cpu_data); |
d9a64523 A |
737 | cpu_stack_alloc(cpu_data_ptr); |
738 | } | |
739 | ||
d9a64523 | 740 | return cpu_data_ptr; |
d9a64523 A |
741 | } |
742 | ||
5ba3f43e A |
743 | ast_t * |
744 | ast_pending(void) | |
745 | { | |
0a7de745 | 746 | return &getCpuDatap()->cpu_pending_ast; |
5ba3f43e A |
747 | } |
748 | ||
749 | cpu_type_t | |
750 | slot_type(int slot_num) | |
751 | { | |
0a7de745 | 752 | return cpu_datap(slot_num)->cpu_type; |
5ba3f43e A |
753 | } |
754 | ||
755 | cpu_subtype_t | |
756 | slot_subtype(int slot_num) | |
757 | { | |
0a7de745 | 758 | return cpu_datap(slot_num)->cpu_subtype; |
5ba3f43e A |
759 | } |
760 | ||
761 | cpu_threadtype_t | |
762 | slot_threadtype(int slot_num) | |
763 | { | |
0a7de745 | 764 | return cpu_datap(slot_num)->cpu_threadtype; |
5ba3f43e A |
765 | } |
766 | ||
767 | cpu_type_t | |
768 | cpu_type(void) | |
769 | { | |
0a7de745 | 770 | return getCpuDatap()->cpu_type; |
5ba3f43e A |
771 | } |
772 | ||
773 | cpu_subtype_t | |
774 | cpu_subtype(void) | |
775 | { | |
0a7de745 | 776 | return getCpuDatap()->cpu_subtype; |
5ba3f43e A |
777 | } |
778 | ||
779 | cpu_threadtype_t | |
780 | cpu_threadtype(void) | |
781 | { | |
0a7de745 | 782 | return getCpuDatap()->cpu_threadtype; |
5ba3f43e A |
783 | } |
784 | ||
785 | int | |
786 | cpu_number(void) | |
787 | { | |
0a7de745 | 788 | return getCpuDatap()->cpu_number; |
5ba3f43e A |
789 | } |
790 | ||
f427ee49 A |
791 | vm_offset_t |
792 | current_percpu_base(void) | |
793 | { | |
794 | return current_thread()->machine.pcpu_data_base; | |
795 | } | |
796 | ||
5ba3f43e A |
797 | uint64_t |
798 | ml_get_wake_timebase(void) | |
799 | { | |
800 | return wake_abstime; | |
801 | } | |
f427ee49 A |
802 | |
803 | bool | |
804 | ml_cpu_signal_is_enabled(void) | |
805 | { | |
806 | return !(getCpuDatap()->cpu_signal & SIGPdisabled); | |
807 | } | |
808 | ||
809 | bool | |
810 | ml_cpu_can_exit(__unused int cpu_id) | |
811 | { | |
812 | /* processor_exit() is always allowed on the S2R path */ | |
813 | if (ml_is_quiescing()) { | |
814 | return true; | |
815 | } | |
816 | #if HAS_CLUSTER && USE_APPLEARMSMP | |
817 | /* | |
818 | * Cyprus and newer chips can disable individual non-boot CPUs. The | |
819 | * implementation polls cpuX_IMPL_CPU_STS, which differs on older chips. | |
820 | */ | |
821 | if (CpuDataEntries[cpu_id].cpu_data_vaddr != &BootCpuData) { | |
822 | return true; | |
823 | } | |
824 | #endif | |
825 | return false; | |
826 | } | |
827 | ||
828 | void | |
829 | ml_cpu_init_state(void) | |
830 | { | |
831 | lck_grp_init(&cpu_lck_grp, "cpu_lck_grp", LCK_GRP_ATTR_NULL); | |
832 | lck_rw_init(&cpu_state_lock, &cpu_lck_grp, LCK_ATTR_NULL); | |
833 | } | |
834 | ||
835 | #ifdef USE_APPLEARMSMP | |
836 | ||
837 | void | |
838 | ml_cpu_begin_state_transition(int cpu_id) | |
839 | { | |
840 | lck_rw_lock_exclusive(&cpu_state_lock); | |
841 | CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = true; | |
842 | lck_rw_unlock_exclusive(&cpu_state_lock); | |
843 | } | |
844 | ||
845 | void | |
846 | ml_cpu_end_state_transition(int cpu_id) | |
847 | { | |
848 | lck_rw_lock_exclusive(&cpu_state_lock); | |
849 | CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = false; | |
850 | lck_rw_unlock_exclusive(&cpu_state_lock); | |
851 | } | |
852 | ||
853 | void | |
854 | ml_cpu_begin_loop(void) | |
855 | { | |
856 | lck_rw_lock_shared(&cpu_state_lock); | |
857 | } | |
858 | ||
859 | void | |
860 | ml_cpu_end_loop(void) | |
861 | { | |
862 | lck_rw_unlock_shared(&cpu_state_lock); | |
863 | } | |
864 | ||
865 | #else /* USE_APPLEARMSMP */ | |
866 | ||
867 | void | |
868 | ml_cpu_begin_state_transition(__unused int cpu_id) | |
869 | { | |
870 | } | |
871 | ||
872 | void | |
873 | ml_cpu_end_state_transition(__unused int cpu_id) | |
874 | { | |
875 | } | |
876 | ||
877 | void | |
878 | ml_cpu_begin_loop(void) | |
879 | { | |
880 | } | |
881 | ||
882 | void | |
883 | ml_cpu_end_loop(void) | |
884 | { | |
885 | } | |
886 | ||
887 | #endif /* USE_APPLEARMSMP */ |