]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/cpu_common.c
85f1cf13bdc112ac64b59c8fae70bf666a1b1243
[apple/xnu.git] / osfmk / arm / cpu_common.c
1 /*
2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm/cpu_common.c
30 *
31 * cpu routines common to all supported arm variants
32 */
33
34 #include <kern/kalloc.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_number.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <arm/cpu_data.h>
40 #include <arm/cpuid.h>
41 #include <arm/caches_internal.h>
42 #include <arm/cpu_data_internal.h>
43 #include <arm/cpu_internal.h>
44 #include <arm/misc_protos.h>
45 #include <arm/machine_cpu.h>
46 #include <arm/rtclock.h>
47 #include <mach/processor_info.h>
48 #include <machine/atomic.h>
49 #include <machine/config.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_map.h>
52 #include <pexpert/arm/protos.h>
53 #include <pexpert/device_tree.h>
54 #include <sys/kdebug.h>
55 #include <arm/machine_routines.h>
56 #include <libkern/OSAtomic.h>
57
58 #if KPERF
59 void kperf_signal_handler(unsigned int cpu_number);
60 #endif
61
62 cpu_data_t BootCpuData;
63 cpu_data_entry_t CpuDataEntries[MAX_CPUS];
64
65 struct processor BootProcessor;
66
67 unsigned int real_ncpus = 1;
68 boolean_t idle_enable = FALSE;
69 uint64_t wake_abstime = 0x0ULL;
70
71
72 cpu_data_t *
73 cpu_datap(int cpu)
74 {
75 assert(cpu < MAX_CPUS);
76 return CpuDataEntries[cpu].cpu_data_vaddr;
77 }
78
79 kern_return_t
80 cpu_control(int slot_num,
81 processor_info_t info,
82 unsigned int count)
83 {
84 printf("cpu_control(%d,%p,%d) not implemented\n",
85 slot_num, info, count);
86 return KERN_FAILURE;
87 }
88
89 kern_return_t
90 cpu_info_count(processor_flavor_t flavor,
91 unsigned int *count)
92 {
93 switch (flavor) {
94 case PROCESSOR_CPU_STAT:
95 *count = PROCESSOR_CPU_STAT_COUNT;
96 return KERN_SUCCESS;
97
98 case PROCESSOR_CPU_STAT64:
99 *count = PROCESSOR_CPU_STAT64_COUNT;
100 return KERN_SUCCESS;
101
102 default:
103 *count = 0;
104 return KERN_FAILURE;
105 }
106 }
107
108 kern_return_t
109 cpu_info(processor_flavor_t flavor, int slot_num, processor_info_t info,
110 unsigned int *count)
111 {
112 cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr;
113
114 switch (flavor) {
115 case PROCESSOR_CPU_STAT:
116 {
117 if (*count < PROCESSOR_CPU_STAT_COUNT) {
118 return KERN_FAILURE;
119 }
120
121 processor_cpu_stat_t cpu_stat = (processor_cpu_stat_t)info;
122 cpu_stat->irq_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.irq_ex_cnt;
123 cpu_stat->ipi_cnt = (uint32_t)cpu_data_ptr->cpu_stat.ipi_cnt;
124 cpu_stat->timer_cnt = (uint32_t)cpu_data_ptr->cpu_stat.timer_cnt;
125 cpu_stat->undef_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.undef_ex_cnt;
126 cpu_stat->unaligned_cnt = (uint32_t)cpu_data_ptr->cpu_stat.unaligned_cnt;
127 cpu_stat->vfp_cnt = (uint32_t)cpu_data_ptr->cpu_stat.vfp_cnt;
128 cpu_stat->vfp_shortv_cnt = 0;
129 cpu_stat->data_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.data_ex_cnt;
130 cpu_stat->instr_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.instr_ex_cnt;
131
132 *count = PROCESSOR_CPU_STAT_COUNT;
133
134 return KERN_SUCCESS;
135 }
136
137 case PROCESSOR_CPU_STAT64:
138 {
139 if (*count < PROCESSOR_CPU_STAT64_COUNT) {
140 return KERN_FAILURE;
141 }
142
143 processor_cpu_stat64_t cpu_stat = (processor_cpu_stat64_t)info;
144 cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt;
145 cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt;
146 cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt;
147 cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt;
148 cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt;
149 cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt;
150 cpu_stat->vfp_shortv_cnt = 0;
151 cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt;
152 cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt;
153 cpu_stat->pmi_cnt = cpu_data_ptr->cpu_stat.pmi_cnt;
154
155 *count = PROCESSOR_CPU_STAT64_COUNT;
156
157 return KERN_SUCCESS;
158 }
159
160 default:
161 return KERN_FAILURE;
162 }
163 }
164
165 /*
166 * Routine: cpu_doshutdown
167 * Function:
168 */
169 void
170 cpu_doshutdown(void (*doshutdown)(processor_t),
171 processor_t processor)
172 {
173 doshutdown(processor);
174 }
175
176 /*
177 * Routine: cpu_idle_tickle
178 *
179 */
180 void
181 cpu_idle_tickle(void)
182 {
183 boolean_t intr;
184 cpu_data_t *cpu_data_ptr;
185 uint64_t new_idle_timeout_ticks = 0x0ULL;
186
187 intr = ml_set_interrupts_enabled(FALSE);
188 cpu_data_ptr = getCpuDatap();
189
190 if (cpu_data_ptr->idle_timer_notify != (void *)NULL) {
191 ((idle_timer_t)cpu_data_ptr->idle_timer_notify)(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks);
192 if (new_idle_timeout_ticks != 0x0ULL) {
193 /* if a new idle timeout was requested set the new idle timer deadline */
194 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
195 } else {
196 /* turn off the idle timer */
197 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
198 }
199 timer_resync_deadlines();
200 }
201 (void) ml_set_interrupts_enabled(intr);
202 }
203
204 static void
205 cpu_handle_xcall(cpu_data_t *cpu_data_ptr)
206 {
207 broadcastFunc xfunc;
208 void *xparam;
209
210 __c11_atomic_thread_fence(memory_order_acquire_smp);
211 /* Come back around if cpu_signal_internal is running on another CPU and has just
212 * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
213 if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) {
214 xfunc = cpu_data_ptr->cpu_xcall_p0;
215 xparam = cpu_data_ptr->cpu_xcall_p1;
216 cpu_data_ptr->cpu_xcall_p0 = NULL;
217 cpu_data_ptr->cpu_xcall_p1 = NULL;
218 __c11_atomic_thread_fence(memory_order_acq_rel_smp);
219 hw_atomic_and_noret(&cpu_data_ptr->cpu_signal, ~SIGPxcall);
220 xfunc(xparam);
221 }
222 }
223
224 unsigned int
225 cpu_broadcast_xcall(uint32_t *synch,
226 boolean_t self_xcall,
227 broadcastFunc func,
228 void *parm)
229 {
230 boolean_t intr;
231 cpu_data_t *cpu_data_ptr;
232 cpu_data_t *target_cpu_datap;
233 unsigned int failsig;
234 int cpu;
235 int max_cpu;
236
237 intr = ml_set_interrupts_enabled(FALSE);
238 cpu_data_ptr = getCpuDatap();
239
240 failsig = 0;
241
242 if (synch != NULL) {
243 *synch = real_ncpus;
244 assert_wait((event_t)synch, THREAD_UNINT);
245 }
246
247 max_cpu = ml_get_max_cpu_number();
248 for (cpu = 0; cpu <= max_cpu; cpu++) {
249 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
250
251 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) {
252 continue;
253 }
254
255 if (KERN_SUCCESS != cpu_signal(target_cpu_datap, SIGPxcall, (void *)func, parm)) {
256 failsig++;
257 }
258 }
259
260
261 if (self_xcall) {
262 func(parm);
263 }
264
265 (void) ml_set_interrupts_enabled(intr);
266
267 if (synch != NULL) {
268 if (hw_atomic_sub(synch, (!self_xcall)? failsig + 1 : failsig) == 0) {
269 clear_wait(current_thread(), THREAD_AWAKENED);
270 } else {
271 thread_block(THREAD_CONTINUE_NULL);
272 }
273 }
274
275 if (!self_xcall) {
276 return real_ncpus - failsig - 1;
277 } else {
278 return real_ncpus - failsig;
279 }
280 }
281
282 kern_return_t
283 cpu_xcall(int cpu_number, broadcastFunc func, void *param)
284 {
285 cpu_data_t *target_cpu_datap;
286
287 if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number())) {
288 return KERN_INVALID_ARGUMENT;
289 }
290
291 target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr;
292 if (target_cpu_datap == NULL) {
293 return KERN_INVALID_ARGUMENT;
294 }
295
296 return cpu_signal(target_cpu_datap, SIGPxcall, (void*)func, param);
297 }
298
299 static kern_return_t
300 cpu_signal_internal(cpu_data_t *target_proc,
301 unsigned int signal,
302 void *p0,
303 void *p1,
304 boolean_t defer)
305 {
306 unsigned int Check_SIGPdisabled;
307 int current_signals;
308 Boolean swap_success;
309 boolean_t interruptible = ml_set_interrupts_enabled(FALSE);
310 cpu_data_t *current_proc = getCpuDatap();
311
312 /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
313 if (defer) {
314 assert(signal == SIGPnop);
315 }
316
317 if (current_proc != target_proc) {
318 Check_SIGPdisabled = SIGPdisabled;
319 } else {
320 Check_SIGPdisabled = 0;
321 }
322
323 if (signal == SIGPxcall) {
324 do {
325 current_signals = target_proc->cpu_signal;
326 if ((current_signals & SIGPdisabled) == SIGPdisabled) {
327 #if DEBUG || DEVELOPMENT
328 target_proc->failed_signal = SIGPxcall;
329 target_proc->failed_xcall = p0;
330 OSIncrementAtomicLong(&target_proc->failed_signal_count);
331 #endif
332 ml_set_interrupts_enabled(interruptible);
333 return KERN_FAILURE;
334 }
335 swap_success = OSCompareAndSwap(current_signals & (~SIGPxcall), current_signals | SIGPxcall,
336 &target_proc->cpu_signal);
337
338 /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
339 * be trying to xcall us. Since we have interrupts disabled that can deadlock,
340 * so break the deadlock by draining pending xcalls. */
341 if (!swap_success && (current_proc->cpu_signal & SIGPxcall)) {
342 cpu_handle_xcall(current_proc);
343 }
344 } while (!swap_success);
345
346 target_proc->cpu_xcall_p0 = p0;
347 target_proc->cpu_xcall_p1 = p1;
348 } else {
349 do {
350 current_signals = target_proc->cpu_signal;
351 if ((Check_SIGPdisabled != 0) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) {
352 #if DEBUG || DEVELOPMENT
353 target_proc->failed_signal = signal;
354 OSIncrementAtomicLong(&target_proc->failed_signal_count);
355 #endif
356 ml_set_interrupts_enabled(interruptible);
357 return KERN_FAILURE;
358 }
359
360 swap_success = OSCompareAndSwap(current_signals, current_signals | signal,
361 &target_proc->cpu_signal);
362 } while (!swap_success);
363 }
364
365 /*
366 * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params
367 * will be visible to other cores when the IPI is dispatched, and 2) subsequent
368 * instructions to signal the other cores will not execute until after the barrier.
369 * DMB would be sufficient to guarantee 1) but not 2).
370 */
371 __builtin_arm_dsb(DSB_ISH);
372
373 if (!(target_proc->cpu_signal & SIGPdisabled)) {
374 if (defer) {
375 PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
376 } else {
377 PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
378 }
379 }
380
381 ml_set_interrupts_enabled(interruptible);
382 return KERN_SUCCESS;
383 }
384
385 kern_return_t
386 cpu_signal(cpu_data_t *target_proc,
387 unsigned int signal,
388 void *p0,
389 void *p1)
390 {
391 return cpu_signal_internal(target_proc, signal, p0, p1, FALSE);
392 }
393
394 kern_return_t
395 cpu_signal_deferred(cpu_data_t *target_proc)
396 {
397 return cpu_signal_internal(target_proc, SIGPnop, NULL, NULL, TRUE);
398 }
399
400 void
401 cpu_signal_cancel(cpu_data_t *target_proc)
402 {
403 /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
404 if (!(target_proc->cpu_signal & SIGPdisabled)) {
405 PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
406 }
407 }
408
409 void
410 cpu_signal_handler(void)
411 {
412 cpu_signal_handler_internal(FALSE);
413 }
414
415 void
416 cpu_signal_handler_internal(boolean_t disable_signal)
417 {
418 cpu_data_t *cpu_data_ptr = getCpuDatap();
419 unsigned int cpu_signal;
420
421
422 cpu_data_ptr->cpu_stat.ipi_cnt++;
423 cpu_data_ptr->cpu_stat.ipi_cnt_wake++;
424
425 SCHED_STATS_IPI(current_processor());
426
427 cpu_signal = hw_atomic_or(&cpu_data_ptr->cpu_signal, 0);
428
429 if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE)) {
430 (void)hw_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled);
431 } else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE)) {
432 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdisabled);
433 }
434
435 while (cpu_signal & ~SIGPdisabled) {
436 if (cpu_signal & SIGPdec) {
437 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdec);
438 rtclock_intr(FALSE);
439 }
440 #if KPERF
441 if (cpu_signal & SIGPkptimer) {
442 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPkptimer);
443 kperf_signal_handler((unsigned int)cpu_data_ptr->cpu_number);
444 }
445 #endif
446 if (cpu_signal & SIGPxcall) {
447 cpu_handle_xcall(cpu_data_ptr);
448 }
449 if (cpu_signal & SIGPast) {
450 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPast);
451 ast_check(cpu_data_ptr->cpu_processor);
452 }
453 if (cpu_signal & SIGPdebug) {
454 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdebug);
455 DebuggerXCall(cpu_data_ptr->cpu_int_state);
456 }
457 #if __ARM_SMP__ && defined(ARMA7)
458 if (cpu_signal & SIGPLWFlush) {
459 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPLWFlush);
460 cache_xcall_handler(LWFlush);
461 }
462 if (cpu_signal & SIGPLWClean) {
463 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPLWClean);
464 cache_xcall_handler(LWClean);
465 }
466 #endif
467
468 cpu_signal = hw_atomic_or(&cpu_data_ptr->cpu_signal, 0);
469 }
470 }
471
472 void
473 cpu_exit_wait(int cpu)
474 {
475 if (cpu != master_cpu) {
476 cpu_data_t *cpu_data_ptr;
477
478 cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
479 while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) {
480 }
481 ;
482 }
483 }
484
485 boolean_t
486 cpu_can_exit(__unused int cpu)
487 {
488 return TRUE;
489 }
490
491 void
492 cpu_machine_init(void)
493 {
494 static boolean_t started = FALSE;
495 cpu_data_t *cpu_data_ptr;
496
497 cpu_data_ptr = getCpuDatap();
498 started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState);
499 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
500 platform_cache_init();
501 }
502 PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started);
503 cpu_data_ptr->cpu_flags |= StartedState;
504 ml_init_interrupt();
505 }
506
507 processor_t
508 cpu_processor_alloc(boolean_t is_boot_cpu)
509 {
510 processor_t proc;
511
512 if (is_boot_cpu) {
513 return &BootProcessor;
514 }
515
516 proc = kalloc(sizeof(*proc));
517 if (!proc) {
518 return NULL;
519 }
520
521 bzero((void *) proc, sizeof(*proc));
522 return proc;
523 }
524
525 void
526 cpu_processor_free(processor_t proc)
527 {
528 if (proc != NULL && proc != &BootProcessor) {
529 kfree(proc, sizeof(*proc));
530 }
531 }
532
533 processor_t
534 current_processor(void)
535 {
536 return getCpuDatap()->cpu_processor;
537 }
538
539 processor_t
540 cpu_to_processor(int cpu)
541 {
542 cpu_data_t *cpu_data = cpu_datap(cpu);
543 if (cpu_data != NULL) {
544 return cpu_data->cpu_processor;
545 } else {
546 return NULL;
547 }
548 }
549
550 cpu_data_t *
551 processor_to_cpu_datap(processor_t processor)
552 {
553 cpu_data_t *target_cpu_datap;
554
555 assert(processor->cpu_id < MAX_CPUS);
556 assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL);
557
558 target_cpu_datap = (cpu_data_t*)CpuDataEntries[processor->cpu_id].cpu_data_vaddr;
559 assert(target_cpu_datap->cpu_processor == processor);
560
561 return target_cpu_datap;
562 }
563
564 cpu_data_t *
565 cpu_data_alloc(boolean_t is_boot_cpu)
566 {
567 cpu_data_t *cpu_data_ptr = NULL;
568
569 if (is_boot_cpu) {
570 cpu_data_ptr = &BootCpuData;
571 } else {
572 if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) {
573 goto cpu_data_alloc_error;
574 }
575
576 bzero((void *)cpu_data_ptr, sizeof(cpu_data_t));
577
578 cpu_stack_alloc(cpu_data_ptr);
579 }
580
581 cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu);
582 if (cpu_data_ptr->cpu_processor == (struct processor *)NULL) {
583 goto cpu_data_alloc_error;
584 }
585
586 return cpu_data_ptr;
587
588 cpu_data_alloc_error:
589 panic("cpu_data_alloc() failed\n");
590 return (cpu_data_t *)NULL;
591 }
592
593 ast_t *
594 ast_pending(void)
595 {
596 return &getCpuDatap()->cpu_pending_ast;
597 }
598
599 cpu_type_t
600 slot_type(int slot_num)
601 {
602 return cpu_datap(slot_num)->cpu_type;
603 }
604
605 cpu_subtype_t
606 slot_subtype(int slot_num)
607 {
608 return cpu_datap(slot_num)->cpu_subtype;
609 }
610
611 cpu_threadtype_t
612 slot_threadtype(int slot_num)
613 {
614 return cpu_datap(slot_num)->cpu_threadtype;
615 }
616
617 cpu_type_t
618 cpu_type(void)
619 {
620 return getCpuDatap()->cpu_type;
621 }
622
623 cpu_subtype_t
624 cpu_subtype(void)
625 {
626 return getCpuDatap()->cpu_subtype;
627 }
628
629 cpu_threadtype_t
630 cpu_threadtype(void)
631 {
632 return getCpuDatap()->cpu_threadtype;
633 }
634
635 int
636 cpu_number(void)
637 {
638 return getCpuDatap()->cpu_number;
639 }
640
641 uint64_t
642 ml_get_wake_timebase(void)
643 {
644 return wake_abstime;
645 }