]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/cpu_common.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / arm / cpu_common.c
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm/cpu_common.c
30 *
31 * cpu routines common to all supported arm variants
32 */
33
34 #include <kern/kalloc.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_number.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <arm/cpu_data.h>
40 #include <arm/cpuid.h>
41 #include <arm/caches_internal.h>
42 #include <arm/cpu_data_internal.h>
43 #include <arm/cpu_internal.h>
44 #include <arm/misc_protos.h>
45 #include <arm/machine_cpu.h>
46 #include <arm/rtclock.h>
47 #include <mach/processor_info.h>
48 #include <machine/atomic.h>
49 #include <machine/config.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_map.h>
52 #include <pexpert/arm/protos.h>
53 #include <pexpert/device_tree.h>
54 #include <sys/kdebug.h>
55 #include <arm/machine_routines.h>
56 #include <libkern/OSAtomic.h>
57 #include <chud/chud_xnu.h>
58 #include <chud/chud_xnu_private.h>
59
60 #if KPERF
61 void kperf_signal_handler(unsigned int cpu_number);
62 #endif
63
64 struct processor BootProcessor;
65
66 unsigned int real_ncpus = 1;
67 boolean_t idle_enable = FALSE;
68 uint64_t wake_abstime=0x0ULL;
69
70
71 cpu_data_t *
72 cpu_datap(int cpu)
73 {
74 assert(cpu < MAX_CPUS);
75 return (CpuDataEntries[cpu].cpu_data_vaddr);
76 }
77
78 kern_return_t
79 cpu_control(int slot_num,
80 processor_info_t info,
81 unsigned int count)
82 {
83 printf("cpu_control(%d,%p,%d) not implemented\n",
84 slot_num, info, count);
85 return (KERN_FAILURE);
86 }
87
88 kern_return_t
89 cpu_info_count(processor_flavor_t flavor,
90 unsigned int *count)
91 {
92
93 switch (flavor) {
94 case PROCESSOR_CPU_STAT:
95 *count = PROCESSOR_CPU_STAT_COUNT;
96 return (KERN_SUCCESS);
97
98 default:
99 *count = 0;
100 return (KERN_FAILURE);
101 }
102 }
103
104 kern_return_t
105 cpu_info(processor_flavor_t flavor,
106 int slot_num,
107 processor_info_t info,
108 unsigned int *count)
109 {
110 switch (flavor) {
111 case PROCESSOR_CPU_STAT:
112 {
113 processor_cpu_stat_t cpu_stat;
114 cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr;
115
116 if (*count < PROCESSOR_CPU_STAT_COUNT)
117 return (KERN_FAILURE);
118
119 cpu_stat = (processor_cpu_stat_t) info;
120 cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt;
121 cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt;
122 cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt;
123 cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt;
124 cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt;
125 cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt;
126 cpu_stat->vfp_shortv_cnt = 0;
127 cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt;
128 cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt;
129
130 *count = PROCESSOR_CPU_STAT_COUNT;
131
132 return (KERN_SUCCESS);
133 }
134
135 default:
136 return (KERN_FAILURE);
137 }
138 }
139
140 /*
141 * Routine: cpu_doshutdown
142 * Function:
143 */
144 void
145 cpu_doshutdown(void (*doshutdown) (processor_t),
146 processor_t processor)
147 {
148 doshutdown(processor);
149 }
150
151 /*
152 * Routine: cpu_idle_tickle
153 *
154 */
155 void
156 cpu_idle_tickle(void)
157 {
158 boolean_t intr;
159 cpu_data_t *cpu_data_ptr;
160 uint64_t new_idle_timeout_ticks = 0x0ULL;
161
162 intr = ml_set_interrupts_enabled(FALSE);
163 cpu_data_ptr = getCpuDatap();
164
165 if (cpu_data_ptr->idle_timer_notify != (void *)NULL) {
166 ((idle_timer_t)cpu_data_ptr->idle_timer_notify)(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks);
167 if (new_idle_timeout_ticks != 0x0ULL) {
168 /* if a new idle timeout was requested set the new idle timer deadline */
169 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
170 } else {
171 /* turn off the idle timer */
172 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
173 }
174 timer_resync_deadlines();
175 }
176 (void) ml_set_interrupts_enabled(intr);
177 }
178
179 static void
180 cpu_handle_xcall(cpu_data_t *cpu_data_ptr)
181 {
182 broadcastFunc xfunc;
183 void *xparam;
184
185 __c11_atomic_thread_fence(memory_order_acquire_smp);
186 /* Come back around if cpu_signal_internal is running on another CPU and has just
187 * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
188 if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) {
189 xfunc = cpu_data_ptr->cpu_xcall_p0;
190 xparam = cpu_data_ptr->cpu_xcall_p1;
191 cpu_data_ptr->cpu_xcall_p0 = NULL;
192 cpu_data_ptr->cpu_xcall_p1 = NULL;
193 __c11_atomic_thread_fence(memory_order_acq_rel_smp);
194 hw_atomic_and_noret(&cpu_data_ptr->cpu_signal, ~SIGPxcall);
195 xfunc(xparam);
196 }
197
198 }
199
200 unsigned int
201 cpu_broadcast_xcall(uint32_t *synch,
202 boolean_t self_xcall,
203 broadcastFunc func,
204 void *parm)
205 {
206 boolean_t intr;
207 cpu_data_t *cpu_data_ptr;
208 cpu_data_t *target_cpu_datap;
209 unsigned int failsig;
210 int cpu;
211 int max_cpu;
212
213 intr = ml_set_interrupts_enabled(FALSE);
214 cpu_data_ptr = getCpuDatap();
215
216 failsig = 0;
217
218 if (synch != NULL) {
219 *synch = real_ncpus;
220 assert_wait((event_t)synch, THREAD_UNINT);
221 }
222
223 max_cpu = ml_get_max_cpu_number();
224 for (cpu=0; cpu <= max_cpu; cpu++) {
225 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
226
227 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr))
228 continue;
229
230 if(KERN_SUCCESS != cpu_signal(target_cpu_datap, SIGPxcall, (void *)func, parm)) {
231 failsig++;
232 }
233 }
234
235
236 if (self_xcall) {
237 func(parm);
238 }
239
240 (void) ml_set_interrupts_enabled(intr);
241
242 if (synch != NULL) {
243 if (hw_atomic_sub(synch, (!self_xcall)? failsig+1 : failsig) == 0)
244 clear_wait(current_thread(), THREAD_AWAKENED);
245 else
246 thread_block(THREAD_CONTINUE_NULL);
247 }
248
249 if (!self_xcall)
250 return (real_ncpus - failsig - 1);
251 else
252 return (real_ncpus - failsig);
253 }
254
255 kern_return_t
256 cpu_xcall(int cpu_number, broadcastFunc func, void *param)
257 {
258 cpu_data_t *target_cpu_datap;
259
260 if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number()))
261 return KERN_INVALID_ARGUMENT;
262
263 target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr;
264 if (target_cpu_datap == NULL)
265 return KERN_INVALID_ARGUMENT;
266
267 return cpu_signal(target_cpu_datap, SIGPxcall, (void*)func, param);
268 }
269
270 static kern_return_t
271 cpu_signal_internal(cpu_data_t *target_proc,
272 unsigned int signal,
273 void *p0,
274 void *p1,
275 boolean_t defer)
276 {
277 unsigned int Check_SIGPdisabled;
278 int current_signals;
279 Boolean swap_success;
280 boolean_t interruptible = ml_set_interrupts_enabled(FALSE);
281 cpu_data_t *current_proc = getCpuDatap();
282
283 /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
284 if (defer) {
285 assert(signal == SIGPnop);
286 }
287
288 if (current_proc != target_proc)
289 Check_SIGPdisabled = SIGPdisabled;
290 else
291 Check_SIGPdisabled = 0;
292
293 if (signal == SIGPxcall) {
294 do {
295 current_signals = target_proc->cpu_signal;
296 if ((current_signals & SIGPdisabled) == SIGPdisabled) {
297 #if DEBUG || DEVELOPMENT
298 target_proc->failed_signal = SIGPxcall;
299 target_proc->failed_xcall = p0;
300 OSIncrementAtomicLong(&target_proc->failed_signal_count);
301 #endif
302 ml_set_interrupts_enabled(interruptible);
303 return KERN_FAILURE;
304 }
305 swap_success = OSCompareAndSwap(current_signals & (~SIGPxcall), current_signals | SIGPxcall,
306 &target_proc->cpu_signal);
307
308 /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
309 * be trying to xcall us. Since we have interrupts disabled that can deadlock,
310 * so break the deadlock by draining pending xcalls. */
311 if (!swap_success && (current_proc->cpu_signal & SIGPxcall))
312 cpu_handle_xcall(current_proc);
313
314 } while (!swap_success);
315
316 target_proc->cpu_xcall_p0 = p0;
317 target_proc->cpu_xcall_p1 = p1;
318 } else {
319 do {
320 current_signals = target_proc->cpu_signal;
321 if ((Check_SIGPdisabled !=0 ) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) {
322 #if DEBUG || DEVELOPMENT
323 target_proc->failed_signal = signal;
324 OSIncrementAtomicLong(&target_proc->failed_signal_count);
325 #endif
326 ml_set_interrupts_enabled(interruptible);
327 return KERN_FAILURE;
328 }
329
330 swap_success = OSCompareAndSwap(current_signals, current_signals | signal,
331 &target_proc->cpu_signal);
332 } while (!swap_success);
333 }
334
335 /*
336 * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params
337 * will be visible to other cores when the IPI is dispatched, and 2) subsequent
338 * instructions to signal the other cores will not execute until after the barrier.
339 * DMB would be sufficient to guarantee 1) but not 2).
340 */
341 __builtin_arm_dsb(DSB_ISH);
342
343 if (!(target_proc->cpu_signal & SIGPdisabled)) {
344 if (defer) {
345 PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
346 } else {
347 PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
348 }
349 }
350
351 ml_set_interrupts_enabled(interruptible);
352 return (KERN_SUCCESS);
353 }
354
355 kern_return_t
356 cpu_signal(cpu_data_t *target_proc,
357 unsigned int signal,
358 void *p0,
359 void *p1)
360 {
361 return cpu_signal_internal(target_proc, signal, p0, p1, FALSE);
362 }
363
364 kern_return_t
365 cpu_signal_deferred(cpu_data_t *target_proc)
366 {
367 return cpu_signal_internal(target_proc, SIGPnop, NULL, NULL, TRUE);
368 }
369
370 void
371 cpu_signal_cancel(cpu_data_t *target_proc)
372 {
373 /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
374 if (!(target_proc->cpu_signal & SIGPdisabled)) {
375 PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
376 }
377 }
378
379 void
380 cpu_signal_handler(void)
381 {
382 cpu_signal_handler_internal(FALSE);
383 }
384
385 void
386 cpu_signal_handler_internal(boolean_t disable_signal)
387 {
388 cpu_data_t *cpu_data_ptr = getCpuDatap();
389 unsigned int cpu_signal;
390
391
392 cpu_data_ptr->cpu_stat.ipi_cnt++;
393 cpu_data_ptr->cpu_stat.ipi_cnt_wake++;
394
395 SCHED_STATS_IPI(current_processor());
396
397 cpu_signal = hw_atomic_or(&cpu_data_ptr->cpu_signal, 0);
398
399 if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE))
400 (void)hw_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled);
401 else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE))
402 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdisabled);
403
404 while (cpu_signal & ~SIGPdisabled) {
405 if (cpu_signal & SIGPdec) {
406 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdec);
407 rtclock_intr(FALSE);
408 }
409 if (cpu_signal & SIGPchud) {
410 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPchud);
411 chudxnu_cpu_signal_handler();
412 }
413 #if KPERF
414 if (cpu_signal & SIGPkptimer) {
415 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPkptimer);
416 kperf_signal_handler((unsigned int)cpu_data_ptr->cpu_number);
417 }
418 #endif
419 if (cpu_signal & SIGPxcall) {
420 cpu_handle_xcall(cpu_data_ptr);
421 }
422 if (cpu_signal & SIGPast) {
423 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPast);
424 ast_check(cpu_data_ptr->cpu_processor);
425 }
426 if (cpu_signal & SIGPdebug) {
427 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdebug);
428 DebuggerXCall(cpu_data_ptr->cpu_int_state);
429 }
430 #if __ARM_SMP__ && defined(ARMA7)
431 if (cpu_signal & SIGPLWFlush) {
432 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPLWFlush);
433 cache_xcall_handler(LWFlush);
434 }
435 if (cpu_signal & SIGPLWClean) {
436 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPLWClean);
437 cache_xcall_handler(LWClean);
438 }
439 #endif
440
441 cpu_signal = hw_atomic_or(&cpu_data_ptr->cpu_signal, 0);
442 }
443 }
444
445 void
446 cpu_exit_wait(int cpu)
447 {
448 if ( cpu != master_cpu) {
449 cpu_data_t *cpu_data_ptr;
450
451 cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
452 while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) {};
453 }
454 }
455
456 void
457 cpu_machine_init(void)
458 {
459 static boolean_t started = FALSE;
460 cpu_data_t *cpu_data_ptr;
461
462 cpu_data_ptr = getCpuDatap();
463 started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState);
464 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
465 platform_cache_init();
466 PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started);
467 cpu_data_ptr->cpu_flags |= StartedState;
468 ml_init_interrupt();
469 }
470
471 processor_t
472 cpu_processor_alloc(boolean_t is_boot_cpu)
473 {
474 processor_t proc;
475
476 if (is_boot_cpu)
477 return &BootProcessor;
478
479 proc = kalloc(sizeof(*proc));
480 if (!proc)
481 return NULL;
482
483 bzero((void *) proc, sizeof(*proc));
484 return proc;
485 }
486
487 void
488 cpu_processor_free(processor_t proc)
489 {
490 if (proc != NULL && proc != &BootProcessor)
491 kfree((void *) proc, sizeof(*proc));
492 }
493
494 processor_t
495 current_processor(void)
496 {
497 return getCpuDatap()->cpu_processor;
498 }
499
500 processor_t
501 cpu_to_processor(int cpu)
502 {
503 cpu_data_t *cpu_data = cpu_datap(cpu);
504 if (cpu_data != NULL)
505 return cpu_data->cpu_processor;
506 else
507 return NULL;
508 }
509
510 cpu_data_t *
511 processor_to_cpu_datap(processor_t processor)
512 {
513 cpu_data_t *target_cpu_datap;
514
515 assert(processor->cpu_id < MAX_CPUS);
516 assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL);
517
518 target_cpu_datap = (cpu_data_t*)CpuDataEntries[processor->cpu_id].cpu_data_vaddr;
519 assert(target_cpu_datap->cpu_processor == processor);
520
521 return target_cpu_datap;
522 }
523
524 ast_t *
525 ast_pending(void)
526 {
527 return (&getCpuDatap()->cpu_pending_ast);
528 }
529
530 cpu_type_t
531 slot_type(int slot_num)
532 {
533 return (cpu_datap(slot_num)->cpu_type);
534 }
535
536 cpu_subtype_t
537 slot_subtype(int slot_num)
538 {
539 return (cpu_datap(slot_num)->cpu_subtype);
540 }
541
542 cpu_threadtype_t
543 slot_threadtype(int slot_num)
544 {
545 return (cpu_datap(slot_num)->cpu_threadtype);
546 }
547
548 cpu_type_t
549 cpu_type(void)
550 {
551 return (getCpuDatap()->cpu_type);
552 }
553
554 cpu_subtype_t
555 cpu_subtype(void)
556 {
557 return (getCpuDatap()->cpu_subtype);
558 }
559
560 cpu_threadtype_t
561 cpu_threadtype(void)
562 {
563 return (getCpuDatap()->cpu_threadtype);
564 }
565
566 int
567 cpu_number(void)
568 {
569 return (getCpuDatap()->cpu_number);
570 }
571
572 uint64_t
573 ml_get_wake_timebase(void)
574 {
575 return wake_abstime;
576 }
577