]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/cpu_common.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / arm / cpu_common.c
1 /*
2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm/cpu_common.c
30 *
31 * cpu routines common to all supported arm variants
32 */
33
34 #include <kern/machine.h>
35 #include <kern/cpu_number.h>
36 #include <kern/thread.h>
37 #include <kern/percpu.h>
38 #include <kern/timer_queue.h>
39 #include <kern/locks.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpuid.h>
42 #include <arm/caches_internal.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/cpu_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/machine_cpu.h>
47 #include <arm/rtclock.h>
48 #include <mach/processor_info.h>
49 #include <machine/atomic.h>
50 #include <machine/config.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <pexpert/arm/protos.h>
54 #include <pexpert/device_tree.h>
55 #include <sys/kdebug.h>
56 #include <arm/machine_routines.h>
57 #include <arm/proc_reg.h>
58 #include <libkern/OSAtomic.h>
59
60 SECURITY_READ_ONLY_LATE(struct percpu_base) percpu_base;
61 vm_address_t percpu_base_cur;
62 cpu_data_t PERCPU_DATA(cpu_data);
63 cpu_data_entry_t CpuDataEntries[MAX_CPUS];
64
65 static lck_grp_t cpu_lck_grp;
66 static lck_rw_t cpu_state_lock;
67
68 unsigned int real_ncpus = 1;
69 boolean_t idle_enable = FALSE;
70 uint64_t wake_abstime = 0x0ULL;
71
72 #if defined(HAS_IPI)
73 extern unsigned int gFastIPI;
74 #endif /* defined(HAS_IPI) */
75
76 cpu_data_t *
77 cpu_datap(int cpu)
78 {
79 assert(cpu <= ml_get_max_cpu_number());
80 return CpuDataEntries[cpu].cpu_data_vaddr;
81 }
82
83 kern_return_t
84 cpu_control(int slot_num,
85 processor_info_t info,
86 unsigned int count)
87 {
88 printf("cpu_control(%d,%p,%d) not implemented\n",
89 slot_num, info, count);
90 return KERN_FAILURE;
91 }
92
93 kern_return_t
94 cpu_info_count(processor_flavor_t flavor,
95 unsigned int *count)
96 {
97 switch (flavor) {
98 case PROCESSOR_CPU_STAT:
99 *count = PROCESSOR_CPU_STAT_COUNT;
100 return KERN_SUCCESS;
101
102 case PROCESSOR_CPU_STAT64:
103 *count = PROCESSOR_CPU_STAT64_COUNT;
104 return KERN_SUCCESS;
105
106 default:
107 *count = 0;
108 return KERN_FAILURE;
109 }
110 }
111
112 kern_return_t
113 cpu_info(processor_flavor_t flavor, int slot_num, processor_info_t info,
114 unsigned int *count)
115 {
116 cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr;
117
118 switch (flavor) {
119 case PROCESSOR_CPU_STAT:
120 {
121 if (*count < PROCESSOR_CPU_STAT_COUNT) {
122 return KERN_FAILURE;
123 }
124
125 processor_cpu_stat_t cpu_stat = (processor_cpu_stat_t)info;
126 cpu_stat->irq_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.irq_ex_cnt;
127 cpu_stat->ipi_cnt = (uint32_t)cpu_data_ptr->cpu_stat.ipi_cnt;
128 cpu_stat->timer_cnt = (uint32_t)cpu_data_ptr->cpu_stat.timer_cnt;
129 cpu_stat->undef_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.undef_ex_cnt;
130 cpu_stat->unaligned_cnt = (uint32_t)cpu_data_ptr->cpu_stat.unaligned_cnt;
131 cpu_stat->vfp_cnt = (uint32_t)cpu_data_ptr->cpu_stat.vfp_cnt;
132 cpu_stat->vfp_shortv_cnt = 0;
133 cpu_stat->data_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.data_ex_cnt;
134 cpu_stat->instr_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.instr_ex_cnt;
135
136 *count = PROCESSOR_CPU_STAT_COUNT;
137
138 return KERN_SUCCESS;
139 }
140
141 case PROCESSOR_CPU_STAT64:
142 {
143 if (*count < PROCESSOR_CPU_STAT64_COUNT) {
144 return KERN_FAILURE;
145 }
146
147 processor_cpu_stat64_t cpu_stat = (processor_cpu_stat64_t)info;
148 cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt;
149 cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt;
150 cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt;
151 cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt;
152 cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt;
153 cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt;
154 cpu_stat->vfp_shortv_cnt = 0;
155 cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt;
156 cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt;
157 #if MONOTONIC
158 cpu_stat->pmi_cnt = cpu_data_ptr->cpu_monotonic.mtc_npmis;
159 #endif /* MONOTONIC */
160
161 *count = PROCESSOR_CPU_STAT64_COUNT;
162
163 return KERN_SUCCESS;
164 }
165
166 default:
167 return KERN_FAILURE;
168 }
169 }
170
171 /*
172 * Routine: cpu_doshutdown
173 * Function:
174 */
175 void
176 cpu_doshutdown(void (*doshutdown)(processor_t),
177 processor_t processor)
178 {
179 doshutdown(processor);
180 }
181
182 /*
183 * Routine: cpu_idle_tickle
184 *
185 */
186 void
187 cpu_idle_tickle(void)
188 {
189 boolean_t intr;
190 cpu_data_t *cpu_data_ptr;
191 uint64_t new_idle_timeout_ticks = 0x0ULL;
192
193 intr = ml_set_interrupts_enabled(FALSE);
194 cpu_data_ptr = getCpuDatap();
195
196 if (cpu_data_ptr->idle_timer_notify != NULL) {
197 cpu_data_ptr->idle_timer_notify(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks);
198 if (new_idle_timeout_ticks != 0x0ULL) {
199 /* if a new idle timeout was requested set the new idle timer deadline */
200 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
201 } else {
202 /* turn off the idle timer */
203 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
204 }
205 timer_resync_deadlines();
206 }
207 (void) ml_set_interrupts_enabled(intr);
208 }
209
210 static void
211 cpu_handle_xcall(cpu_data_t *cpu_data_ptr)
212 {
213 broadcastFunc xfunc;
214 void *xparam;
215
216 os_atomic_thread_fence(acquire);
217 /* Come back around if cpu_signal_internal is running on another CPU and has just
218 * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
219 if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) {
220 xfunc = cpu_data_ptr->cpu_xcall_p0;
221 INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI);
222 xparam = cpu_data_ptr->cpu_xcall_p1;
223 cpu_data_ptr->cpu_xcall_p0 = NULL;
224 cpu_data_ptr->cpu_xcall_p1 = NULL;
225 os_atomic_thread_fence(acq_rel);
226 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcall, relaxed);
227 xfunc(xparam);
228 INTERRUPT_MASKED_DEBUG_END();
229 }
230 if (cpu_data_ptr->cpu_imm_xcall_p0 != NULL && cpu_data_ptr->cpu_imm_xcall_p1 != NULL) {
231 xfunc = cpu_data_ptr->cpu_imm_xcall_p0;
232 INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI);
233 xparam = cpu_data_ptr->cpu_imm_xcall_p1;
234 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
235 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
236 os_atomic_thread_fence(acq_rel);
237 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcallImm, relaxed);
238 xfunc(xparam);
239 INTERRUPT_MASKED_DEBUG_END();
240 }
241 }
242
243 static unsigned int
244 cpu_broadcast_xcall_internal(unsigned int signal,
245 uint32_t *synch,
246 boolean_t self_xcall,
247 broadcastFunc func,
248 void *parm)
249 {
250 boolean_t intr;
251 cpu_data_t *cpu_data_ptr;
252 cpu_data_t *target_cpu_datap;
253 unsigned int failsig;
254 int cpu;
255 int max_cpu = ml_get_max_cpu_number() + 1;
256
257 //yes, param ALSO cannot be NULL
258 assert(func);
259 assert(parm);
260
261 intr = ml_set_interrupts_enabled(FALSE);
262 cpu_data_ptr = getCpuDatap();
263
264 failsig = 0;
265
266 if (synch != NULL) {
267 *synch = max_cpu;
268 assert_wait((event_t)synch, THREAD_UNINT);
269 }
270
271 for (cpu = 0; cpu < max_cpu; cpu++) {
272 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
273
274 if (target_cpu_datap == cpu_data_ptr) {
275 continue;
276 }
277
278 if ((target_cpu_datap == NULL) ||
279 KERN_SUCCESS != cpu_signal(target_cpu_datap, signal, (void *)func, parm)) {
280 failsig++;
281 }
282 }
283
284
285 if (self_xcall) {
286 func(parm);
287 }
288
289 (void) ml_set_interrupts_enabled(intr);
290
291 if (synch != NULL) {
292 if (os_atomic_sub(synch, (!self_xcall) ? failsig + 1 : failsig, relaxed) == 0) {
293 clear_wait(current_thread(), THREAD_AWAKENED);
294 } else {
295 thread_block(THREAD_CONTINUE_NULL);
296 }
297 }
298
299 if (!self_xcall) {
300 return max_cpu - failsig - 1;
301 } else {
302 return max_cpu - failsig;
303 }
304 }
305
306 unsigned int
307 cpu_broadcast_xcall(uint32_t *synch,
308 boolean_t self_xcall,
309 broadcastFunc func,
310 void *parm)
311 {
312 return cpu_broadcast_xcall_internal(SIGPxcall, synch, self_xcall, func, parm);
313 }
314
315 struct cpu_broadcast_xcall_simple_data {
316 broadcastFunc func;
317 void* parm;
318 uint32_t sync;
319 };
320
321 static void
322 cpu_broadcast_xcall_simple_cbk(void *parm)
323 {
324 struct cpu_broadcast_xcall_simple_data *data = (struct cpu_broadcast_xcall_simple_data*)parm;
325
326 data->func(data->parm);
327
328 if (os_atomic_dec(&data->sync, relaxed) == 0) {
329 thread_wakeup((event_t)&data->sync);
330 }
331 }
332
333 static unsigned int
334 cpu_xcall_simple(boolean_t self_xcall,
335 broadcastFunc func,
336 void *parm,
337 bool immediate)
338 {
339 struct cpu_broadcast_xcall_simple_data data = {};
340
341 data.func = func;
342 data.parm = parm;
343
344 return cpu_broadcast_xcall_internal(immediate ? SIGPxcallImm : SIGPxcall, &data.sync, self_xcall, cpu_broadcast_xcall_simple_cbk, &data);
345 }
346
347 unsigned int
348 cpu_broadcast_immediate_xcall(uint32_t *synch,
349 boolean_t self_xcall,
350 broadcastFunc func,
351 void *parm)
352 {
353 return cpu_broadcast_xcall_internal(SIGPxcallImm, synch, self_xcall, func, parm);
354 }
355
356 unsigned int
357 cpu_broadcast_xcall_simple(boolean_t self_xcall,
358 broadcastFunc func,
359 void *parm)
360 {
361 return cpu_xcall_simple(self_xcall, func, parm, false);
362 }
363
364 unsigned int
365 cpu_broadcast_immediate_xcall_simple(boolean_t self_xcall,
366 broadcastFunc func,
367 void *parm)
368 {
369 return cpu_xcall_simple(self_xcall, func, parm, true);
370 }
371
372 static kern_return_t
373 cpu_xcall_internal(unsigned int signal, int cpu_number, broadcastFunc func, void *param)
374 {
375 cpu_data_t *target_cpu_datap;
376
377 if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number())) {
378 return KERN_INVALID_ARGUMENT;
379 }
380
381 if (func == NULL || param == NULL) {
382 return KERN_INVALID_ARGUMENT;
383 }
384
385 target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr;
386 if (target_cpu_datap == NULL) {
387 return KERN_INVALID_ARGUMENT;
388 }
389
390 return cpu_signal(target_cpu_datap, signal, (void*)func, param);
391 }
392
393 kern_return_t
394 cpu_xcall(int cpu_number, broadcastFunc func, void *param)
395 {
396 return cpu_xcall_internal(SIGPxcall, cpu_number, func, param);
397 }
398
399 kern_return_t
400 cpu_immediate_xcall(int cpu_number, broadcastFunc func, void *param)
401 {
402 return cpu_xcall_internal(SIGPxcallImm, cpu_number, func, param);
403 }
404
405 static kern_return_t
406 cpu_signal_internal(cpu_data_t *target_proc,
407 unsigned int signal,
408 void *p0,
409 void *p1,
410 boolean_t defer)
411 {
412 unsigned int Check_SIGPdisabled;
413 int current_signals;
414 Boolean swap_success;
415 boolean_t interruptible = ml_set_interrupts_enabled(FALSE);
416 cpu_data_t *current_proc = getCpuDatap();
417
418 /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
419 if (defer) {
420 assert(signal == SIGPnop);
421 }
422
423 if (current_proc != target_proc) {
424 Check_SIGPdisabled = SIGPdisabled;
425 } else {
426 Check_SIGPdisabled = 0;
427 }
428
429 if ((signal == SIGPxcall) || (signal == SIGPxcallImm)) {
430 do {
431 current_signals = target_proc->cpu_signal;
432 if ((current_signals & SIGPdisabled) == SIGPdisabled) {
433 ml_set_interrupts_enabled(interruptible);
434 return KERN_FAILURE;
435 }
436 swap_success = OSCompareAndSwap(current_signals & (~signal), current_signals | signal,
437 &target_proc->cpu_signal);
438
439 if (!swap_success && (signal == SIGPxcallImm) && (target_proc->cpu_signal & SIGPxcallImm)) {
440 ml_set_interrupts_enabled(interruptible);
441 return KERN_ALREADY_WAITING;
442 }
443
444 /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
445 * be trying to xcall us. Since we have interrupts disabled that can deadlock,
446 * so break the deadlock by draining pending xcalls. */
447 if (!swap_success && (current_proc->cpu_signal & signal)) {
448 cpu_handle_xcall(current_proc);
449 }
450 } while (!swap_success);
451
452 if (signal == SIGPxcallImm) {
453 target_proc->cpu_imm_xcall_p0 = p0;
454 target_proc->cpu_imm_xcall_p1 = p1;
455 } else {
456 target_proc->cpu_xcall_p0 = p0;
457 target_proc->cpu_xcall_p1 = p1;
458 }
459 } else {
460 do {
461 current_signals = target_proc->cpu_signal;
462 if ((Check_SIGPdisabled != 0) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) {
463 ml_set_interrupts_enabled(interruptible);
464 return KERN_FAILURE;
465 }
466
467 swap_success = OSCompareAndSwap(current_signals, current_signals | signal,
468 &target_proc->cpu_signal);
469 } while (!swap_success);
470 }
471
472 /*
473 * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params
474 * will be visible to other cores when the IPI is dispatched, and 2) subsequent
475 * instructions to signal the other cores will not execute until after the barrier.
476 * DMB would be sufficient to guarantee 1) but not 2).
477 */
478 __builtin_arm_dsb(DSB_ISH);
479
480 if (!(target_proc->cpu_signal & SIGPdisabled)) {
481 if (defer) {
482 #if defined(HAS_IPI)
483 if (gFastIPI) {
484 ml_cpu_signal_deferred(target_proc->cpu_phys_id);
485 } else {
486 PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
487 }
488 #else
489 PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
490 #endif /* defined(HAS_IPI) */
491 } else {
492 #if defined(HAS_IPI)
493 if (gFastIPI) {
494 ml_cpu_signal(target_proc->cpu_phys_id);
495 } else {
496 PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
497 }
498 #else
499 PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
500 #endif /* defined(HAS_IPI) */
501 }
502 }
503
504 ml_set_interrupts_enabled(interruptible);
505 return KERN_SUCCESS;
506 }
507
508 kern_return_t
509 cpu_signal(cpu_data_t *target_proc,
510 unsigned int signal,
511 void *p0,
512 void *p1)
513 {
514 return cpu_signal_internal(target_proc, signal, p0, p1, FALSE);
515 }
516
517 kern_return_t
518 cpu_signal_deferred(cpu_data_t *target_proc)
519 {
520 return cpu_signal_internal(target_proc, SIGPnop, NULL, NULL, TRUE);
521 }
522
523 void
524 cpu_signal_cancel(cpu_data_t *target_proc)
525 {
526 /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
527 if (!(target_proc->cpu_signal & SIGPdisabled)) {
528 #if defined(HAS_IPI)
529 if (gFastIPI) {
530 ml_cpu_signal_retract(target_proc->cpu_phys_id);
531 } else {
532 PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
533 }
534 #else
535 PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
536 #endif /* defined(HAS_IPI) */
537 }
538 }
539
540 void
541 cpu_signal_handler(void)
542 {
543 cpu_signal_handler_internal(FALSE);
544 }
545
546 void
547 cpu_signal_handler_internal(boolean_t disable_signal)
548 {
549 cpu_data_t *cpu_data_ptr = getCpuDatap();
550 unsigned int cpu_signal;
551
552 cpu_data_ptr->cpu_stat.ipi_cnt++;
553 cpu_data_ptr->cpu_stat.ipi_cnt_wake++;
554 SCHED_STATS_INC(ipi_count);
555
556 cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed);
557
558 if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE)) {
559 os_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed);
560 } else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE)) {
561 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed);
562 }
563
564 while (cpu_signal & ~SIGPdisabled) {
565 if (cpu_signal & SIGPdec) {
566 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdec, relaxed);
567 INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_IPI);
568 rtclock_intr(FALSE);
569 INTERRUPT_MASKED_DEBUG_END();
570 }
571 #if KPERF
572 if (cpu_signal & SIGPkppet) {
573 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkppet, relaxed);
574 extern void kperf_signal_handler(void);
575 INTERRUPT_MASKED_DEBUG_START(kperf_signal_handler, DBG_INTR_TYPE_IPI);
576 kperf_signal_handler();
577 INTERRUPT_MASKED_DEBUG_END();
578 }
579 #endif /* KPERF */
580 if (cpu_signal & (SIGPxcall | SIGPxcallImm)) {
581 cpu_handle_xcall(cpu_data_ptr);
582 }
583 if (cpu_signal & SIGPast) {
584 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPast, relaxed);
585 INTERRUPT_MASKED_DEBUG_START(ast_check, DBG_INTR_TYPE_IPI);
586 ast_check(current_processor());
587 INTERRUPT_MASKED_DEBUG_END();
588 }
589 if (cpu_signal & SIGPdebug) {
590 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdebug, relaxed);
591 INTERRUPT_MASKED_DEBUG_START(DebuggerXCall, DBG_INTR_TYPE_IPI);
592 DebuggerXCall(cpu_data_ptr->cpu_int_state);
593 INTERRUPT_MASKED_DEBUG_END();
594 }
595 #if defined(ARMA7)
596 if (cpu_signal & SIGPLWFlush) {
597 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWFlush, relaxed);
598 INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI);
599 cache_xcall_handler(LWFlush);
600 INTERRUPT_MASKED_DEBUG_END();
601 }
602 if (cpu_signal & SIGPLWClean) {
603 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWClean, relaxed);
604 INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI);
605 cache_xcall_handler(LWClean);
606 INTERRUPT_MASKED_DEBUG_END();
607 }
608 #endif
609
610 cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed);
611 }
612 }
613
614 void
615 cpu_exit_wait(int cpu_id)
616 {
617 #if USE_APPLEARMSMP
618 if (!ml_is_quiescing()) {
619 // For runtime disable (non S2R) the CPU will shut down immediately.
620 ml_topology_cpu_t *cpu = &ml_get_topology_info()->cpus[cpu_id];
621 assert(cpu && cpu->cpu_IMPL_regs);
622 volatile uint64_t *cpu_sts = (void *)(cpu->cpu_IMPL_regs + CPU_PIO_CPU_STS_OFFSET);
623
624 // Poll the "CPU running state" field until it is 0 (off)
625 while ((*cpu_sts & CPU_PIO_CPU_STS_cpuRunSt_mask) != 0x00) {
626 __builtin_arm_dsb(DSB_ISH);
627 }
628 return;
629 }
630 #endif /* USE_APPLEARMSMP */
631
632 if (cpu_id != master_cpu) {
633 // For S2R, ml_arm_sleep() will do some extra polling after setting ARM_CPU_ON_SLEEP_PATH.
634 cpu_data_t *cpu_data_ptr;
635
636 cpu_data_ptr = CpuDataEntries[cpu_id].cpu_data_vaddr;
637 while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) {
638 }
639 ;
640 }
641 }
642
643 boolean_t
644 cpu_can_exit(__unused int cpu)
645 {
646 return TRUE;
647 }
648
649 void
650 cpu_machine_init(void)
651 {
652 static boolean_t started = FALSE;
653 cpu_data_t *cpu_data_ptr;
654
655 cpu_data_ptr = getCpuDatap();
656 started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState);
657 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
658 platform_cache_init();
659 }
660
661 /* Note: this calls IOCPURunPlatformActiveActions when resuming on boot cpu */
662 PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started);
663
664 cpu_data_ptr->cpu_flags |= StartedState;
665 ml_init_interrupt();
666 }
667
668 processor_t
669 current_processor(void)
670 {
671 return PERCPU_GET(processor);
672 }
673
674 processor_t
675 cpu_to_processor(int cpu)
676 {
677 cpu_data_t *cpu_data = cpu_datap(cpu);
678 if (cpu_data != NULL) {
679 return PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data);
680 } else {
681 return NULL;
682 }
683 }
684
685 cpu_data_t *
686 processor_to_cpu_datap(processor_t processor)
687 {
688 assert(processor->cpu_id <= ml_get_max_cpu_number());
689 assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL);
690
691 return PERCPU_GET_RELATIVE(cpu_data, processor, processor);
692 }
693
694 __startup_func
695 static void
696 cpu_data_startup_init(void)
697 {
698 vm_size_t size = percpu_section_size() * (ml_get_cpu_count() - 1);
699
700 percpu_base.size = percpu_section_size();
701 if (ml_get_cpu_count() == 1) {
702 percpu_base.start = VM_MAX_KERNEL_ADDRESS;
703 return;
704 }
705
706 /*
707 * The memory needs to be physically contiguous because it contains
708 * cpu_data_t structures sometimes accessed during reset
709 * with the MMU off.
710 *
711 * kmem_alloc_contig() can't be used early, at the time STARTUP_SUB_PERCPU
712 * normally runs, so we instead steal the memory for the PERCPU subsystem
713 * even earlier.
714 */
715 percpu_base.start = (vm_offset_t)pmap_steal_memory(round_page(size));
716 bzero((void *)percpu_base.start, round_page(size));
717
718 percpu_base.start -= percpu_section_start();
719 percpu_base.end = percpu_base.start + size - 1;
720 percpu_base_cur = percpu_base.start;
721 }
722 STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, cpu_data_startup_init);
723
724 cpu_data_t *
725 cpu_data_alloc(boolean_t is_boot_cpu)
726 {
727 cpu_data_t *cpu_data_ptr = NULL;
728 vm_address_t base;
729
730 if (is_boot_cpu) {
731 cpu_data_ptr = PERCPU_GET_MASTER(cpu_data);
732 } else {
733 base = os_atomic_add_orig(&percpu_base_cur,
734 percpu_section_size(), relaxed);
735
736 cpu_data_ptr = PERCPU_GET_WITH_BASE(base, cpu_data);
737 cpu_stack_alloc(cpu_data_ptr);
738 }
739
740 return cpu_data_ptr;
741 }
742
743 ast_t *
744 ast_pending(void)
745 {
746 return &getCpuDatap()->cpu_pending_ast;
747 }
748
749 cpu_type_t
750 slot_type(int slot_num)
751 {
752 return cpu_datap(slot_num)->cpu_type;
753 }
754
755 cpu_subtype_t
756 slot_subtype(int slot_num)
757 {
758 return cpu_datap(slot_num)->cpu_subtype;
759 }
760
761 cpu_threadtype_t
762 slot_threadtype(int slot_num)
763 {
764 return cpu_datap(slot_num)->cpu_threadtype;
765 }
766
767 cpu_type_t
768 cpu_type(void)
769 {
770 return getCpuDatap()->cpu_type;
771 }
772
773 cpu_subtype_t
774 cpu_subtype(void)
775 {
776 return getCpuDatap()->cpu_subtype;
777 }
778
779 cpu_threadtype_t
780 cpu_threadtype(void)
781 {
782 return getCpuDatap()->cpu_threadtype;
783 }
784
785 int
786 cpu_number(void)
787 {
788 return getCpuDatap()->cpu_number;
789 }
790
791 vm_offset_t
792 current_percpu_base(void)
793 {
794 return current_thread()->machine.pcpu_data_base;
795 }
796
797 uint64_t
798 ml_get_wake_timebase(void)
799 {
800 return wake_abstime;
801 }
802
803 bool
804 ml_cpu_signal_is_enabled(void)
805 {
806 return !(getCpuDatap()->cpu_signal & SIGPdisabled);
807 }
808
809 bool
810 ml_cpu_can_exit(__unused int cpu_id)
811 {
812 /* processor_exit() is always allowed on the S2R path */
813 if (ml_is_quiescing()) {
814 return true;
815 }
816 #if HAS_CLUSTER && USE_APPLEARMSMP
817 /*
818 * Cyprus and newer chips can disable individual non-boot CPUs. The
819 * implementation polls cpuX_IMPL_CPU_STS, which differs on older chips.
820 */
821 if (CpuDataEntries[cpu_id].cpu_data_vaddr != &BootCpuData) {
822 return true;
823 }
824 #endif
825 return false;
826 }
827
828 void
829 ml_cpu_init_state(void)
830 {
831 lck_grp_init(&cpu_lck_grp, "cpu_lck_grp", LCK_GRP_ATTR_NULL);
832 lck_rw_init(&cpu_state_lock, &cpu_lck_grp, LCK_ATTR_NULL);
833 }
834
835 #ifdef USE_APPLEARMSMP
836
837 void
838 ml_cpu_begin_state_transition(int cpu_id)
839 {
840 lck_rw_lock_exclusive(&cpu_state_lock);
841 CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = true;
842 lck_rw_unlock_exclusive(&cpu_state_lock);
843 }
844
845 void
846 ml_cpu_end_state_transition(int cpu_id)
847 {
848 lck_rw_lock_exclusive(&cpu_state_lock);
849 CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = false;
850 lck_rw_unlock_exclusive(&cpu_state_lock);
851 }
852
853 void
854 ml_cpu_begin_loop(void)
855 {
856 lck_rw_lock_shared(&cpu_state_lock);
857 }
858
859 void
860 ml_cpu_end_loop(void)
861 {
862 lck_rw_unlock_shared(&cpu_state_lock);
863 }
864
865 #else /* USE_APPLEARMSMP */
866
867 void
868 ml_cpu_begin_state_transition(__unused int cpu_id)
869 {
870 }
871
872 void
873 ml_cpu_end_state_transition(__unused int cpu_id)
874 {
875 }
876
877 void
878 ml_cpu_begin_loop(void)
879 {
880 }
881
882 void
883 ml_cpu_end_loop(void)
884 {
885 }
886
887 #endif /* USE_APPLEARMSMP */