2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * File: arm/cpu_common.c
31 * cpu routines common to all supported arm variants
34 #include <kern/kalloc.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_number.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <arm/cpu_data.h>
40 #include <arm/cpuid.h>
41 #include <arm/caches_internal.h>
42 #include <arm/cpu_data_internal.h>
43 #include <arm/cpu_internal.h>
44 #include <arm/misc_protos.h>
45 #include <arm/machine_cpu.h>
46 #include <arm/rtclock.h>
47 #include <mach/processor_info.h>
48 #include <machine/atomic.h>
49 #include <machine/config.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_map.h>
52 #include <pexpert/arm/protos.h>
53 #include <pexpert/device_tree.h>
54 #include <sys/kdebug.h>
55 #include <arm/machine_routines.h>
56 #include <libkern/OSAtomic.h>
59 void kperf_signal_handler(unsigned int cpu_number
);
62 cpu_data_t BootCpuData
;
63 cpu_data_entry_t CpuDataEntries
[MAX_CPUS
];
65 struct processor BootProcessor
;
67 unsigned int real_ncpus
= 1;
68 boolean_t idle_enable
= FALSE
;
69 uint64_t wake_abstime
= 0x0ULL
;
75 assert(cpu
< MAX_CPUS
);
76 return CpuDataEntries
[cpu
].cpu_data_vaddr
;
80 cpu_control(int slot_num
,
81 processor_info_t info
,
84 printf("cpu_control(%d,%p,%d) not implemented\n",
85 slot_num
, info
, count
);
90 cpu_info_count(processor_flavor_t flavor
,
94 case PROCESSOR_CPU_STAT
:
95 *count
= PROCESSOR_CPU_STAT_COUNT
;
98 case PROCESSOR_CPU_STAT64
:
99 *count
= PROCESSOR_CPU_STAT64_COUNT
;
109 cpu_info(processor_flavor_t flavor
, int slot_num
, processor_info_t info
,
112 cpu_data_t
*cpu_data_ptr
= CpuDataEntries
[slot_num
].cpu_data_vaddr
;
115 case PROCESSOR_CPU_STAT
:
117 if (*count
< PROCESSOR_CPU_STAT_COUNT
) {
121 processor_cpu_stat_t cpu_stat
= (processor_cpu_stat_t
)info
;
122 cpu_stat
->irq_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.irq_ex_cnt
;
123 cpu_stat
->ipi_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.ipi_cnt
;
124 cpu_stat
->timer_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.timer_cnt
;
125 cpu_stat
->undef_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.undef_ex_cnt
;
126 cpu_stat
->unaligned_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.unaligned_cnt
;
127 cpu_stat
->vfp_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.vfp_cnt
;
128 cpu_stat
->vfp_shortv_cnt
= 0;
129 cpu_stat
->data_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.data_ex_cnt
;
130 cpu_stat
->instr_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.instr_ex_cnt
;
132 *count
= PROCESSOR_CPU_STAT_COUNT
;
137 case PROCESSOR_CPU_STAT64
:
139 if (*count
< PROCESSOR_CPU_STAT64_COUNT
) {
143 processor_cpu_stat64_t cpu_stat
= (processor_cpu_stat64_t
)info
;
144 cpu_stat
->irq_ex_cnt
= cpu_data_ptr
->cpu_stat
.irq_ex_cnt
;
145 cpu_stat
->ipi_cnt
= cpu_data_ptr
->cpu_stat
.ipi_cnt
;
146 cpu_stat
->timer_cnt
= cpu_data_ptr
->cpu_stat
.timer_cnt
;
147 cpu_stat
->undef_ex_cnt
= cpu_data_ptr
->cpu_stat
.undef_ex_cnt
;
148 cpu_stat
->unaligned_cnt
= cpu_data_ptr
->cpu_stat
.unaligned_cnt
;
149 cpu_stat
->vfp_cnt
= cpu_data_ptr
->cpu_stat
.vfp_cnt
;
150 cpu_stat
->vfp_shortv_cnt
= 0;
151 cpu_stat
->data_ex_cnt
= cpu_data_ptr
->cpu_stat
.data_ex_cnt
;
152 cpu_stat
->instr_ex_cnt
= cpu_data_ptr
->cpu_stat
.instr_ex_cnt
;
153 cpu_stat
->pmi_cnt
= cpu_data_ptr
->cpu_stat
.pmi_cnt
;
155 *count
= PROCESSOR_CPU_STAT64_COUNT
;
166 * Routine: cpu_doshutdown
170 cpu_doshutdown(void (*doshutdown
)(processor_t
),
171 processor_t processor
)
173 doshutdown(processor
);
177 * Routine: cpu_idle_tickle
181 cpu_idle_tickle(void)
184 cpu_data_t
*cpu_data_ptr
;
185 uint64_t new_idle_timeout_ticks
= 0x0ULL
;
187 intr
= ml_set_interrupts_enabled(FALSE
);
188 cpu_data_ptr
= getCpuDatap();
190 if (cpu_data_ptr
->idle_timer_notify
!= (void *)NULL
) {
191 ((idle_timer_t
)cpu_data_ptr
->idle_timer_notify
)(cpu_data_ptr
->idle_timer_refcon
, &new_idle_timeout_ticks
);
192 if (new_idle_timeout_ticks
!= 0x0ULL
) {
193 /* if a new idle timeout was requested set the new idle timer deadline */
194 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
196 /* turn off the idle timer */
197 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
199 timer_resync_deadlines();
201 (void) ml_set_interrupts_enabled(intr
);
205 cpu_handle_xcall(cpu_data_t
*cpu_data_ptr
)
210 __c11_atomic_thread_fence(memory_order_acquire_smp
);
211 /* Come back around if cpu_signal_internal is running on another CPU and has just
212 * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
213 if (cpu_data_ptr
->cpu_xcall_p0
!= NULL
&& cpu_data_ptr
->cpu_xcall_p1
!= NULL
) {
214 xfunc
= cpu_data_ptr
->cpu_xcall_p0
;
215 xparam
= cpu_data_ptr
->cpu_xcall_p1
;
216 cpu_data_ptr
->cpu_xcall_p0
= NULL
;
217 cpu_data_ptr
->cpu_xcall_p1
= NULL
;
218 __c11_atomic_thread_fence(memory_order_acq_rel_smp
);
219 hw_atomic_and_noret(&cpu_data_ptr
->cpu_signal
, ~SIGPxcall
);
225 cpu_broadcast_xcall(uint32_t *synch
,
226 boolean_t self_xcall
,
231 cpu_data_t
*cpu_data_ptr
;
232 cpu_data_t
*target_cpu_datap
;
233 unsigned int failsig
;
237 intr
= ml_set_interrupts_enabled(FALSE
);
238 cpu_data_ptr
= getCpuDatap();
244 assert_wait((event_t
)synch
, THREAD_UNINT
);
247 max_cpu
= ml_get_max_cpu_number();
248 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
249 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
251 if ((target_cpu_datap
== NULL
) || (target_cpu_datap
== cpu_data_ptr
)) {
255 if (KERN_SUCCESS
!= cpu_signal(target_cpu_datap
, SIGPxcall
, (void *)func
, parm
)) {
265 (void) ml_set_interrupts_enabled(intr
);
268 if (hw_atomic_sub(synch
, (!self_xcall
)? failsig
+ 1 : failsig
) == 0) {
269 clear_wait(current_thread(), THREAD_AWAKENED
);
271 thread_block(THREAD_CONTINUE_NULL
);
276 return real_ncpus
- failsig
- 1;
278 return real_ncpus
- failsig
;
283 cpu_xcall(int cpu_number
, broadcastFunc func
, void *param
)
285 cpu_data_t
*target_cpu_datap
;
287 if ((cpu_number
< 0) || (cpu_number
> ml_get_max_cpu_number())) {
288 return KERN_INVALID_ARGUMENT
;
291 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu_number
].cpu_data_vaddr
;
292 if (target_cpu_datap
== NULL
) {
293 return KERN_INVALID_ARGUMENT
;
296 return cpu_signal(target_cpu_datap
, SIGPxcall
, (void*)func
, param
);
300 cpu_signal_internal(cpu_data_t
*target_proc
,
306 unsigned int Check_SIGPdisabled
;
308 Boolean swap_success
;
309 boolean_t interruptible
= ml_set_interrupts_enabled(FALSE
);
310 cpu_data_t
*current_proc
= getCpuDatap();
312 /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
314 assert(signal
== SIGPnop
);
317 if (current_proc
!= target_proc
) {
318 Check_SIGPdisabled
= SIGPdisabled
;
320 Check_SIGPdisabled
= 0;
323 if (signal
== SIGPxcall
) {
325 current_signals
= target_proc
->cpu_signal
;
326 if ((current_signals
& SIGPdisabled
) == SIGPdisabled
) {
327 #if DEBUG || DEVELOPMENT
328 target_proc
->failed_signal
= SIGPxcall
;
329 target_proc
->failed_xcall
= p0
;
330 OSIncrementAtomicLong(&target_proc
->failed_signal_count
);
332 ml_set_interrupts_enabled(interruptible
);
335 swap_success
= OSCompareAndSwap(current_signals
& (~SIGPxcall
), current_signals
| SIGPxcall
,
336 &target_proc
->cpu_signal
);
338 /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
339 * be trying to xcall us. Since we have interrupts disabled that can deadlock,
340 * so break the deadlock by draining pending xcalls. */
341 if (!swap_success
&& (current_proc
->cpu_signal
& SIGPxcall
)) {
342 cpu_handle_xcall(current_proc
);
344 } while (!swap_success
);
346 target_proc
->cpu_xcall_p0
= p0
;
347 target_proc
->cpu_xcall_p1
= p1
;
350 current_signals
= target_proc
->cpu_signal
;
351 if ((Check_SIGPdisabled
!= 0) && (current_signals
& Check_SIGPdisabled
) == SIGPdisabled
) {
352 #if DEBUG || DEVELOPMENT
353 target_proc
->failed_signal
= signal
;
354 OSIncrementAtomicLong(&target_proc
->failed_signal_count
);
356 ml_set_interrupts_enabled(interruptible
);
360 swap_success
= OSCompareAndSwap(current_signals
, current_signals
| signal
,
361 &target_proc
->cpu_signal
);
362 } while (!swap_success
);
366 * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params
367 * will be visible to other cores when the IPI is dispatched, and 2) subsequent
368 * instructions to signal the other cores will not execute until after the barrier.
369 * DMB would be sufficient to guarantee 1) but not 2).
371 __builtin_arm_dsb(DSB_ISH
);
373 if (!(target_proc
->cpu_signal
& SIGPdisabled
)) {
375 PE_cpu_signal_deferred(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
377 PE_cpu_signal(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
381 ml_set_interrupts_enabled(interruptible
);
386 cpu_signal(cpu_data_t
*target_proc
,
391 return cpu_signal_internal(target_proc
, signal
, p0
, p1
, FALSE
);
395 cpu_signal_deferred(cpu_data_t
*target_proc
)
397 return cpu_signal_internal(target_proc
, SIGPnop
, NULL
, NULL
, TRUE
);
401 cpu_signal_cancel(cpu_data_t
*target_proc
)
403 /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
404 if (!(target_proc
->cpu_signal
& SIGPdisabled
)) {
405 PE_cpu_signal_cancel(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
410 cpu_signal_handler(void)
412 cpu_signal_handler_internal(FALSE
);
416 cpu_signal_handler_internal(boolean_t disable_signal
)
418 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
419 unsigned int cpu_signal
;
422 cpu_data_ptr
->cpu_stat
.ipi_cnt
++;
423 cpu_data_ptr
->cpu_stat
.ipi_cnt_wake
++;
425 SCHED_STATS_IPI(current_processor());
427 cpu_signal
= hw_atomic_or(&cpu_data_ptr
->cpu_signal
, 0);
429 if ((!(cpu_signal
& SIGPdisabled
)) && (disable_signal
== TRUE
)) {
430 (void)hw_atomic_or(&cpu_data_ptr
->cpu_signal
, SIGPdisabled
);
431 } else if ((cpu_signal
& SIGPdisabled
) && (disable_signal
== FALSE
)) {
432 (void)hw_atomic_and(&cpu_data_ptr
->cpu_signal
, ~SIGPdisabled
);
435 while (cpu_signal
& ~SIGPdisabled
) {
436 if (cpu_signal
& SIGPdec
) {
437 (void)hw_atomic_and(&cpu_data_ptr
->cpu_signal
, ~SIGPdec
);
441 if (cpu_signal
& SIGPkptimer
) {
442 (void)hw_atomic_and(&cpu_data_ptr
->cpu_signal
, ~SIGPkptimer
);
443 kperf_signal_handler((unsigned int)cpu_data_ptr
->cpu_number
);
446 if (cpu_signal
& SIGPxcall
) {
447 cpu_handle_xcall(cpu_data_ptr
);
449 if (cpu_signal
& SIGPast
) {
450 (void)hw_atomic_and(&cpu_data_ptr
->cpu_signal
, ~SIGPast
);
451 ast_check(cpu_data_ptr
->cpu_processor
);
453 if (cpu_signal
& SIGPdebug
) {
454 (void)hw_atomic_and(&cpu_data_ptr
->cpu_signal
, ~SIGPdebug
);
455 DebuggerXCall(cpu_data_ptr
->cpu_int_state
);
457 #if __ARM_SMP__ && defined(ARMA7)
458 if (cpu_signal
& SIGPLWFlush
) {
459 (void)hw_atomic_and(&cpu_data_ptr
->cpu_signal
, ~SIGPLWFlush
);
460 cache_xcall_handler(LWFlush
);
462 if (cpu_signal
& SIGPLWClean
) {
463 (void)hw_atomic_and(&cpu_data_ptr
->cpu_signal
, ~SIGPLWClean
);
464 cache_xcall_handler(LWClean
);
468 cpu_signal
= hw_atomic_or(&cpu_data_ptr
->cpu_signal
, 0);
473 cpu_exit_wait(int cpu
)
475 if (cpu
!= master_cpu
) {
476 cpu_data_t
*cpu_data_ptr
;
478 cpu_data_ptr
= CpuDataEntries
[cpu
].cpu_data_vaddr
;
479 while (!((*(volatile unsigned int*)&cpu_data_ptr
->cpu_sleep_token
) == ARM_CPU_ON_SLEEP_PATH
)) {
486 cpu_can_exit(__unused
int cpu
)
492 cpu_machine_init(void)
494 static boolean_t started
= FALSE
;
495 cpu_data_t
*cpu_data_ptr
;
497 cpu_data_ptr
= getCpuDatap();
498 started
= ((cpu_data_ptr
->cpu_flags
& StartedState
) == StartedState
);
499 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
500 platform_cache_init();
502 PE_cpu_machine_init(cpu_data_ptr
->cpu_id
, !started
);
503 cpu_data_ptr
->cpu_flags
|= StartedState
;
508 cpu_processor_alloc(boolean_t is_boot_cpu
)
513 return &BootProcessor
;
516 proc
= kalloc(sizeof(*proc
));
521 bzero((void *) proc
, sizeof(*proc
));
526 cpu_processor_free(processor_t proc
)
528 if (proc
!= NULL
&& proc
!= &BootProcessor
) {
529 kfree(proc
, sizeof(*proc
));
534 current_processor(void)
536 return getCpuDatap()->cpu_processor
;
540 cpu_to_processor(int cpu
)
542 cpu_data_t
*cpu_data
= cpu_datap(cpu
);
543 if (cpu_data
!= NULL
) {
544 return cpu_data
->cpu_processor
;
551 processor_to_cpu_datap(processor_t processor
)
553 cpu_data_t
*target_cpu_datap
;
555 assert(processor
->cpu_id
< MAX_CPUS
);
556 assert(CpuDataEntries
[processor
->cpu_id
].cpu_data_vaddr
!= NULL
);
558 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[processor
->cpu_id
].cpu_data_vaddr
;
559 assert(target_cpu_datap
->cpu_processor
== processor
);
561 return target_cpu_datap
;
565 cpu_data_alloc(boolean_t is_boot_cpu
)
567 cpu_data_t
*cpu_data_ptr
= NULL
;
570 cpu_data_ptr
= &BootCpuData
;
572 if ((kmem_alloc(kernel_map
, (vm_offset_t
*)&cpu_data_ptr
, sizeof(cpu_data_t
), VM_KERN_MEMORY_CPU
)) != KERN_SUCCESS
) {
573 goto cpu_data_alloc_error
;
576 bzero((void *)cpu_data_ptr
, sizeof(cpu_data_t
));
578 cpu_stack_alloc(cpu_data_ptr
);
581 cpu_data_ptr
->cpu_processor
= cpu_processor_alloc(is_boot_cpu
);
582 if (cpu_data_ptr
->cpu_processor
== (struct processor
*)NULL
) {
583 goto cpu_data_alloc_error
;
588 cpu_data_alloc_error
:
589 panic("cpu_data_alloc() failed\n");
590 return (cpu_data_t
*)NULL
;
596 return &getCpuDatap()->cpu_pending_ast
;
600 slot_type(int slot_num
)
602 return cpu_datap(slot_num
)->cpu_type
;
606 slot_subtype(int slot_num
)
608 return cpu_datap(slot_num
)->cpu_subtype
;
612 slot_threadtype(int slot_num
)
614 return cpu_datap(slot_num
)->cpu_threadtype
;
620 return getCpuDatap()->cpu_type
;
626 return getCpuDatap()->cpu_subtype
;
632 return getCpuDatap()->cpu_threadtype
;
638 return getCpuDatap()->cpu_number
;
642 ml_get_wake_timebase(void)