2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * File: arm/cpu_common.c
31 * cpu routines common to all supported arm variants
34 #include <kern/kalloc.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_number.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <arm/cpu_data.h>
40 #include <arm/cpuid.h>
41 #include <arm/caches_internal.h>
42 #include <arm/cpu_data_internal.h>
43 #include <arm/cpu_internal.h>
44 #include <arm/misc_protos.h>
45 #include <arm/machine_cpu.h>
46 #include <arm/rtclock.h>
47 #include <mach/processor_info.h>
48 #include <machine/atomic.h>
49 #include <machine/config.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_map.h>
52 #include <pexpert/arm/protos.h>
53 #include <pexpert/device_tree.h>
54 #include <sys/kdebug.h>
55 #include <arm/machine_routines.h>
56 #include <libkern/OSAtomic.h>
59 void kperf_signal_handler(unsigned int cpu_number
);
62 cpu_data_t BootCpuData
;
63 cpu_data_entry_t CpuDataEntries
[MAX_CPUS
];
65 struct processor BootProcessor
;
67 unsigned int real_ncpus
= 1;
68 boolean_t idle_enable
= FALSE
;
69 uint64_t wake_abstime
= 0x0ULL
;
75 assert(cpu
< MAX_CPUS
);
76 return CpuDataEntries
[cpu
].cpu_data_vaddr
;
80 cpu_control(int slot_num
,
81 processor_info_t info
,
84 printf("cpu_control(%d,%p,%d) not implemented\n",
85 slot_num
, info
, count
);
90 cpu_info_count(processor_flavor_t flavor
,
94 case PROCESSOR_CPU_STAT
:
95 *count
= PROCESSOR_CPU_STAT_COUNT
;
98 case PROCESSOR_CPU_STAT64
:
99 *count
= PROCESSOR_CPU_STAT64_COUNT
;
109 cpu_info(processor_flavor_t flavor
, int slot_num
, processor_info_t info
,
112 cpu_data_t
*cpu_data_ptr
= CpuDataEntries
[slot_num
].cpu_data_vaddr
;
115 case PROCESSOR_CPU_STAT
:
117 if (*count
< PROCESSOR_CPU_STAT_COUNT
) {
121 processor_cpu_stat_t cpu_stat
= (processor_cpu_stat_t
)info
;
122 cpu_stat
->irq_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.irq_ex_cnt
;
123 cpu_stat
->ipi_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.ipi_cnt
;
124 cpu_stat
->timer_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.timer_cnt
;
125 cpu_stat
->undef_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.undef_ex_cnt
;
126 cpu_stat
->unaligned_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.unaligned_cnt
;
127 cpu_stat
->vfp_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.vfp_cnt
;
128 cpu_stat
->vfp_shortv_cnt
= 0;
129 cpu_stat
->data_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.data_ex_cnt
;
130 cpu_stat
->instr_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.instr_ex_cnt
;
132 *count
= PROCESSOR_CPU_STAT_COUNT
;
137 case PROCESSOR_CPU_STAT64
:
139 if (*count
< PROCESSOR_CPU_STAT64_COUNT
) {
143 processor_cpu_stat64_t cpu_stat
= (processor_cpu_stat64_t
)info
;
144 cpu_stat
->irq_ex_cnt
= cpu_data_ptr
->cpu_stat
.irq_ex_cnt
;
145 cpu_stat
->ipi_cnt
= cpu_data_ptr
->cpu_stat
.ipi_cnt
;
146 cpu_stat
->timer_cnt
= cpu_data_ptr
->cpu_stat
.timer_cnt
;
147 cpu_stat
->undef_ex_cnt
= cpu_data_ptr
->cpu_stat
.undef_ex_cnt
;
148 cpu_stat
->unaligned_cnt
= cpu_data_ptr
->cpu_stat
.unaligned_cnt
;
149 cpu_stat
->vfp_cnt
= cpu_data_ptr
->cpu_stat
.vfp_cnt
;
150 cpu_stat
->vfp_shortv_cnt
= 0;
151 cpu_stat
->data_ex_cnt
= cpu_data_ptr
->cpu_stat
.data_ex_cnt
;
152 cpu_stat
->instr_ex_cnt
= cpu_data_ptr
->cpu_stat
.instr_ex_cnt
;
154 cpu_stat
->pmi_cnt
= cpu_data_ptr
->cpu_monotonic
.mtc_npmis
;
155 #endif /* MONOTONIC */
157 *count
= PROCESSOR_CPU_STAT64_COUNT
;
168 * Routine: cpu_doshutdown
172 cpu_doshutdown(void (*doshutdown
)(processor_t
),
173 processor_t processor
)
175 doshutdown(processor
);
179 * Routine: cpu_idle_tickle
183 cpu_idle_tickle(void)
186 cpu_data_t
*cpu_data_ptr
;
187 uint64_t new_idle_timeout_ticks
= 0x0ULL
;
189 intr
= ml_set_interrupts_enabled(FALSE
);
190 cpu_data_ptr
= getCpuDatap();
192 if (cpu_data_ptr
->idle_timer_notify
!= (void *)NULL
) {
193 ((idle_timer_t
)cpu_data_ptr
->idle_timer_notify
)(cpu_data_ptr
->idle_timer_refcon
, &new_idle_timeout_ticks
);
194 if (new_idle_timeout_ticks
!= 0x0ULL
) {
195 /* if a new idle timeout was requested set the new idle timer deadline */
196 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
198 /* turn off the idle timer */
199 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
201 timer_resync_deadlines();
203 (void) ml_set_interrupts_enabled(intr
);
207 cpu_handle_xcall(cpu_data_t
*cpu_data_ptr
)
212 os_atomic_thread_fence(acquire
);
213 /* Come back around if cpu_signal_internal is running on another CPU and has just
214 * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
215 if (cpu_data_ptr
->cpu_xcall_p0
!= NULL
&& cpu_data_ptr
->cpu_xcall_p1
!= NULL
) {
216 xfunc
= cpu_data_ptr
->cpu_xcall_p0
;
217 xparam
= cpu_data_ptr
->cpu_xcall_p1
;
218 cpu_data_ptr
->cpu_xcall_p0
= NULL
;
219 cpu_data_ptr
->cpu_xcall_p1
= NULL
;
220 os_atomic_thread_fence(acq_rel
);
221 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPxcall
, relaxed
);
224 if (cpu_data_ptr
->cpu_imm_xcall_p0
!= NULL
&& cpu_data_ptr
->cpu_imm_xcall_p1
!= NULL
) {
225 xfunc
= cpu_data_ptr
->cpu_imm_xcall_p0
;
226 xparam
= cpu_data_ptr
->cpu_imm_xcall_p1
;
227 cpu_data_ptr
->cpu_imm_xcall_p0
= NULL
;
228 cpu_data_ptr
->cpu_imm_xcall_p1
= NULL
;
229 os_atomic_thread_fence(acq_rel
);
230 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPxcallImm
, relaxed
);
236 cpu_broadcast_xcall_internal(unsigned int signal
,
238 boolean_t self_xcall
,
243 cpu_data_t
*cpu_data_ptr
;
244 cpu_data_t
*target_cpu_datap
;
245 unsigned int failsig
;
247 int max_cpu
= ml_get_max_cpu_number() + 1;
249 intr
= ml_set_interrupts_enabled(FALSE
);
250 cpu_data_ptr
= getCpuDatap();
256 assert_wait((event_t
)synch
, THREAD_UNINT
);
259 for (cpu
= 0; cpu
< max_cpu
; cpu
++) {
260 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
262 if (target_cpu_datap
== cpu_data_ptr
) {
266 if ((target_cpu_datap
== NULL
) ||
267 KERN_SUCCESS
!= cpu_signal(target_cpu_datap
, signal
, (void *)func
, parm
)) {
277 (void) ml_set_interrupts_enabled(intr
);
280 if (os_atomic_sub(synch
, (!self_xcall
) ? failsig
+ 1 : failsig
, relaxed
) == 0) {
281 clear_wait(current_thread(), THREAD_AWAKENED
);
283 thread_block(THREAD_CONTINUE_NULL
);
288 return max_cpu
- failsig
- 1;
290 return max_cpu
- failsig
;
295 cpu_broadcast_xcall(uint32_t *synch
,
296 boolean_t self_xcall
,
300 return cpu_broadcast_xcall_internal(SIGPxcall
, synch
, self_xcall
, func
, parm
);
304 cpu_broadcast_immediate_xcall(uint32_t *synch
,
305 boolean_t self_xcall
,
309 return cpu_broadcast_xcall_internal(SIGPxcallImm
, synch
, self_xcall
, func
, parm
);
313 cpu_xcall_internal(unsigned int signal
, int cpu_number
, broadcastFunc func
, void *param
)
315 cpu_data_t
*target_cpu_datap
;
317 if ((cpu_number
< 0) || (cpu_number
> ml_get_max_cpu_number())) {
318 return KERN_INVALID_ARGUMENT
;
321 if (func
== NULL
|| param
== NULL
) {
322 return KERN_INVALID_ARGUMENT
;
325 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu_number
].cpu_data_vaddr
;
326 if (target_cpu_datap
== NULL
) {
327 return KERN_INVALID_ARGUMENT
;
330 return cpu_signal(target_cpu_datap
, signal
, (void*)func
, param
);
334 cpu_xcall(int cpu_number
, broadcastFunc func
, void *param
)
336 return cpu_xcall_internal(SIGPxcall
, cpu_number
, func
, param
);
340 cpu_immediate_xcall(int cpu_number
, broadcastFunc func
, void *param
)
342 return cpu_xcall_internal(SIGPxcallImm
, cpu_number
, func
, param
);
346 cpu_signal_internal(cpu_data_t
*target_proc
,
352 unsigned int Check_SIGPdisabled
;
354 Boolean swap_success
;
355 boolean_t interruptible
= ml_set_interrupts_enabled(FALSE
);
356 cpu_data_t
*current_proc
= getCpuDatap();
358 /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
360 assert(signal
== SIGPnop
);
363 if (current_proc
!= target_proc
) {
364 Check_SIGPdisabled
= SIGPdisabled
;
366 Check_SIGPdisabled
= 0;
369 if ((signal
== SIGPxcall
) || (signal
== SIGPxcallImm
)) {
371 current_signals
= target_proc
->cpu_signal
;
372 if ((current_signals
& SIGPdisabled
) == SIGPdisabled
) {
373 ml_set_interrupts_enabled(interruptible
);
376 swap_success
= OSCompareAndSwap(current_signals
& (~signal
), current_signals
| signal
,
377 &target_proc
->cpu_signal
);
379 if (!swap_success
&& (signal
== SIGPxcallImm
) && (target_proc
->cpu_signal
& SIGPxcallImm
)) {
380 ml_set_interrupts_enabled(interruptible
);
381 return KERN_ALREADY_WAITING
;
384 /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
385 * be trying to xcall us. Since we have interrupts disabled that can deadlock,
386 * so break the deadlock by draining pending xcalls. */
387 if (!swap_success
&& (current_proc
->cpu_signal
& signal
)) {
388 cpu_handle_xcall(current_proc
);
390 } while (!swap_success
);
392 if (signal
== SIGPxcallImm
) {
393 target_proc
->cpu_imm_xcall_p0
= p0
;
394 target_proc
->cpu_imm_xcall_p1
= p1
;
396 target_proc
->cpu_xcall_p0
= p0
;
397 target_proc
->cpu_xcall_p1
= p1
;
401 current_signals
= target_proc
->cpu_signal
;
402 if ((Check_SIGPdisabled
!= 0) && (current_signals
& Check_SIGPdisabled
) == SIGPdisabled
) {
403 ml_set_interrupts_enabled(interruptible
);
407 swap_success
= OSCompareAndSwap(current_signals
, current_signals
| signal
,
408 &target_proc
->cpu_signal
);
409 } while (!swap_success
);
413 * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params
414 * will be visible to other cores when the IPI is dispatched, and 2) subsequent
415 * instructions to signal the other cores will not execute until after the barrier.
416 * DMB would be sufficient to guarantee 1) but not 2).
418 __builtin_arm_dsb(DSB_ISH
);
420 if (!(target_proc
->cpu_signal
& SIGPdisabled
)) {
422 PE_cpu_signal_deferred(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
424 PE_cpu_signal(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
428 ml_set_interrupts_enabled(interruptible
);
433 cpu_signal(cpu_data_t
*target_proc
,
438 return cpu_signal_internal(target_proc
, signal
, p0
, p1
, FALSE
);
442 cpu_signal_deferred(cpu_data_t
*target_proc
)
444 return cpu_signal_internal(target_proc
, SIGPnop
, NULL
, NULL
, TRUE
);
448 cpu_signal_cancel(cpu_data_t
*target_proc
)
450 /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
451 if (!(target_proc
->cpu_signal
& SIGPdisabled
)) {
452 PE_cpu_signal_cancel(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
457 cpu_signal_handler(void)
459 cpu_signal_handler_internal(FALSE
);
463 cpu_signal_handler_internal(boolean_t disable_signal
)
465 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
466 unsigned int cpu_signal
;
469 cpu_data_ptr
->cpu_stat
.ipi_cnt
++;
470 cpu_data_ptr
->cpu_stat
.ipi_cnt_wake
++;
472 SCHED_STATS_IPI(current_processor());
474 cpu_signal
= os_atomic_or(&cpu_data_ptr
->cpu_signal
, 0, relaxed
);
476 if ((!(cpu_signal
& SIGPdisabled
)) && (disable_signal
== TRUE
)) {
477 os_atomic_or(&cpu_data_ptr
->cpu_signal
, SIGPdisabled
, relaxed
);
478 } else if ((cpu_signal
& SIGPdisabled
) && (disable_signal
== FALSE
)) {
479 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPdisabled
, relaxed
);
482 while (cpu_signal
& ~SIGPdisabled
) {
483 if (cpu_signal
& SIGPdec
) {
484 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPdec
, relaxed
);
488 if (cpu_signal
& SIGPkptimer
) {
489 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPkptimer
, relaxed
);
490 kperf_signal_handler((unsigned int)cpu_data_ptr
->cpu_number
);
493 if (cpu_signal
& (SIGPxcall
| SIGPxcallImm
)) {
494 cpu_handle_xcall(cpu_data_ptr
);
496 if (cpu_signal
& SIGPast
) {
497 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPast
, relaxed
);
498 ast_check(cpu_data_ptr
->cpu_processor
);
500 if (cpu_signal
& SIGPdebug
) {
501 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPdebug
, relaxed
);
502 DebuggerXCall(cpu_data_ptr
->cpu_int_state
);
504 #if __ARM_SMP__ && defined(ARMA7)
505 if (cpu_signal
& SIGPLWFlush
) {
506 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPLWFlush
, relaxed
);
507 cache_xcall_handler(LWFlush
);
509 if (cpu_signal
& SIGPLWClean
) {
510 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPLWClean
, relaxed
);
511 cache_xcall_handler(LWClean
);
515 cpu_signal
= os_atomic_or(&cpu_data_ptr
->cpu_signal
, 0, relaxed
);
520 cpu_exit_wait(int cpu
)
522 if (cpu
!= master_cpu
) {
523 cpu_data_t
*cpu_data_ptr
;
525 cpu_data_ptr
= CpuDataEntries
[cpu
].cpu_data_vaddr
;
526 while (!((*(volatile unsigned int*)&cpu_data_ptr
->cpu_sleep_token
) == ARM_CPU_ON_SLEEP_PATH
)) {
533 cpu_can_exit(__unused
int cpu
)
539 cpu_machine_init(void)
541 static boolean_t started
= FALSE
;
542 cpu_data_t
*cpu_data_ptr
;
544 cpu_data_ptr
= getCpuDatap();
545 started
= ((cpu_data_ptr
->cpu_flags
& StartedState
) == StartedState
);
546 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
547 platform_cache_init();
550 /* Note: this calls IOCPURunPlatformActiveActions when resuming on boot cpu */
551 PE_cpu_machine_init(cpu_data_ptr
->cpu_id
, !started
);
553 cpu_data_ptr
->cpu_flags
|= StartedState
;
558 cpu_processor_alloc(boolean_t is_boot_cpu
)
563 return &BootProcessor
;
566 proc
= kalloc(sizeof(*proc
));
571 bzero((void *) proc
, sizeof(*proc
));
576 cpu_processor_free(processor_t proc
)
578 if (proc
!= NULL
&& proc
!= &BootProcessor
) {
579 kfree(proc
, sizeof(*proc
));
584 current_processor(void)
586 return getCpuDatap()->cpu_processor
;
590 cpu_to_processor(int cpu
)
592 cpu_data_t
*cpu_data
= cpu_datap(cpu
);
593 if (cpu_data
!= NULL
) {
594 return cpu_data
->cpu_processor
;
601 processor_to_cpu_datap(processor_t processor
)
603 cpu_data_t
*target_cpu_datap
;
605 assert(processor
->cpu_id
< MAX_CPUS
);
606 assert(CpuDataEntries
[processor
->cpu_id
].cpu_data_vaddr
!= NULL
);
608 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[processor
->cpu_id
].cpu_data_vaddr
;
609 assert(target_cpu_datap
->cpu_processor
== processor
);
611 return target_cpu_datap
;
615 cpu_data_alloc(boolean_t is_boot_cpu
)
617 cpu_data_t
*cpu_data_ptr
= NULL
;
620 cpu_data_ptr
= &BootCpuData
;
622 if ((kmem_alloc(kernel_map
, (vm_offset_t
*)&cpu_data_ptr
, sizeof(cpu_data_t
), VM_KERN_MEMORY_CPU
)) != KERN_SUCCESS
) {
623 goto cpu_data_alloc_error
;
626 bzero((void *)cpu_data_ptr
, sizeof(cpu_data_t
));
628 cpu_stack_alloc(cpu_data_ptr
);
631 cpu_data_ptr
->cpu_processor
= cpu_processor_alloc(is_boot_cpu
);
632 if (cpu_data_ptr
->cpu_processor
== (struct processor
*)NULL
) {
633 goto cpu_data_alloc_error
;
638 cpu_data_alloc_error
:
639 panic("cpu_data_alloc() failed\n");
640 return (cpu_data_t
*)NULL
;
646 return &getCpuDatap()->cpu_pending_ast
;
650 slot_type(int slot_num
)
652 return cpu_datap(slot_num
)->cpu_type
;
656 slot_subtype(int slot_num
)
658 return cpu_datap(slot_num
)->cpu_subtype
;
662 slot_threadtype(int slot_num
)
664 return cpu_datap(slot_num
)->cpu_threadtype
;
670 return getCpuDatap()->cpu_type
;
676 return getCpuDatap()->cpu_subtype
;
682 return getCpuDatap()->cpu_threadtype
;
688 return getCpuDatap()->cpu_number
;
692 ml_get_wake_timebase(void)