2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * File: arm/cpu_common.c
31 * cpu routines common to all supported arm variants
34 #include <kern/machine.h>
35 #include <kern/cpu_number.h>
36 #include <kern/thread.h>
37 #include <kern/percpu.h>
38 #include <kern/timer_queue.h>
39 #include <kern/locks.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpuid.h>
42 #include <arm/caches_internal.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/cpu_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/machine_cpu.h>
47 #include <arm/rtclock.h>
48 #include <mach/processor_info.h>
49 #include <machine/atomic.h>
50 #include <machine/config.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <pexpert/arm/protos.h>
54 #include <pexpert/device_tree.h>
55 #include <sys/kdebug.h>
56 #include <arm/machine_routines.h>
57 #include <arm/proc_reg.h>
58 #include <libkern/OSAtomic.h>
60 SECURITY_READ_ONLY_LATE(struct percpu_base
) percpu_base
;
61 vm_address_t percpu_base_cur
;
62 cpu_data_t
PERCPU_DATA(cpu_data
);
63 cpu_data_entry_t CpuDataEntries
[MAX_CPUS
];
65 static LCK_GRP_DECLARE(cpu_lck_grp
, "cpu_lck_grp");
66 static LCK_RW_DECLARE(cpu_state_lock
, &cpu_lck_grp
);
68 unsigned int real_ncpus
= 1;
69 boolean_t idle_enable
= FALSE
;
70 uint64_t wake_abstime
= 0x0ULL
;
72 extern uint64_t xcall_ack_timeout_abstime
;
75 extern unsigned int gFastIPI
;
76 #endif /* defined(HAS_IPI) */
81 assert(cpu
<= ml_get_max_cpu_number());
82 return CpuDataEntries
[cpu
].cpu_data_vaddr
;
86 cpu_control(int slot_num
,
87 processor_info_t info
,
90 printf("cpu_control(%d,%p,%d) not implemented\n",
91 slot_num
, info
, count
);
96 cpu_info_count(processor_flavor_t flavor
,
100 case PROCESSOR_CPU_STAT
:
101 *count
= PROCESSOR_CPU_STAT_COUNT
;
104 case PROCESSOR_CPU_STAT64
:
105 *count
= PROCESSOR_CPU_STAT64_COUNT
;
115 cpu_info(processor_flavor_t flavor
, int slot_num
, processor_info_t info
,
118 cpu_data_t
*cpu_data_ptr
= CpuDataEntries
[slot_num
].cpu_data_vaddr
;
121 case PROCESSOR_CPU_STAT
:
123 if (*count
< PROCESSOR_CPU_STAT_COUNT
) {
127 processor_cpu_stat_t cpu_stat
= (processor_cpu_stat_t
)info
;
128 cpu_stat
->irq_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.irq_ex_cnt
;
129 cpu_stat
->ipi_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.ipi_cnt
;
130 cpu_stat
->timer_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.timer_cnt
;
131 cpu_stat
->undef_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.undef_ex_cnt
;
132 cpu_stat
->unaligned_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.unaligned_cnt
;
133 cpu_stat
->vfp_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.vfp_cnt
;
134 cpu_stat
->vfp_shortv_cnt
= 0;
135 cpu_stat
->data_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.data_ex_cnt
;
136 cpu_stat
->instr_ex_cnt
= (uint32_t)cpu_data_ptr
->cpu_stat
.instr_ex_cnt
;
138 *count
= PROCESSOR_CPU_STAT_COUNT
;
143 case PROCESSOR_CPU_STAT64
:
145 if (*count
< PROCESSOR_CPU_STAT64_COUNT
) {
149 processor_cpu_stat64_t cpu_stat
= (processor_cpu_stat64_t
)info
;
150 cpu_stat
->irq_ex_cnt
= cpu_data_ptr
->cpu_stat
.irq_ex_cnt
;
151 cpu_stat
->ipi_cnt
= cpu_data_ptr
->cpu_stat
.ipi_cnt
;
152 cpu_stat
->timer_cnt
= cpu_data_ptr
->cpu_stat
.timer_cnt
;
153 cpu_stat
->undef_ex_cnt
= cpu_data_ptr
->cpu_stat
.undef_ex_cnt
;
154 cpu_stat
->unaligned_cnt
= cpu_data_ptr
->cpu_stat
.unaligned_cnt
;
155 cpu_stat
->vfp_cnt
= cpu_data_ptr
->cpu_stat
.vfp_cnt
;
156 cpu_stat
->vfp_shortv_cnt
= 0;
157 cpu_stat
->data_ex_cnt
= cpu_data_ptr
->cpu_stat
.data_ex_cnt
;
158 cpu_stat
->instr_ex_cnt
= cpu_data_ptr
->cpu_stat
.instr_ex_cnt
;
160 cpu_stat
->pmi_cnt
= cpu_data_ptr
->cpu_monotonic
.mtc_npmis
;
161 #endif /* MONOTONIC */
163 *count
= PROCESSOR_CPU_STAT64_COUNT
;
174 * Routine: cpu_doshutdown
178 cpu_doshutdown(void (*doshutdown
)(processor_t
),
179 processor_t processor
)
181 doshutdown(processor
);
185 * Routine: cpu_idle_tickle
189 cpu_idle_tickle(void)
192 cpu_data_t
*cpu_data_ptr
;
193 uint64_t new_idle_timeout_ticks
= 0x0ULL
;
195 intr
= ml_set_interrupts_enabled(FALSE
);
196 cpu_data_ptr
= getCpuDatap();
198 if (cpu_data_ptr
->idle_timer_notify
!= NULL
) {
199 cpu_data_ptr
->idle_timer_notify(cpu_data_ptr
->idle_timer_refcon
, &new_idle_timeout_ticks
);
200 if (new_idle_timeout_ticks
!= 0x0ULL
) {
201 /* if a new idle timeout was requested set the new idle timer deadline */
202 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
204 /* turn off the idle timer */
205 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
207 timer_resync_deadlines();
209 (void) ml_set_interrupts_enabled(intr
);
213 cpu_handle_xcall(cpu_data_t
*cpu_data_ptr
)
218 os_atomic_thread_fence(acquire
);
219 /* Come back around if cpu_signal_internal is running on another CPU and has just
220 * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
221 if (cpu_data_ptr
->cpu_xcall_p0
!= NULL
&& cpu_data_ptr
->cpu_xcall_p1
!= NULL
) {
222 xfunc
= cpu_data_ptr
->cpu_xcall_p0
;
223 INTERRUPT_MASKED_DEBUG_START(xfunc
, DBG_INTR_TYPE_IPI
);
224 xparam
= cpu_data_ptr
->cpu_xcall_p1
;
225 cpu_data_ptr
->cpu_xcall_p0
= NULL
;
226 cpu_data_ptr
->cpu_xcall_p1
= NULL
;
227 os_atomic_thread_fence(acq_rel
);
228 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPxcall
, relaxed
);
230 INTERRUPT_MASKED_DEBUG_END();
232 if (cpu_data_ptr
->cpu_imm_xcall_p0
!= NULL
&& cpu_data_ptr
->cpu_imm_xcall_p1
!= NULL
) {
233 xfunc
= cpu_data_ptr
->cpu_imm_xcall_p0
;
234 INTERRUPT_MASKED_DEBUG_START(xfunc
, DBG_INTR_TYPE_IPI
);
235 xparam
= cpu_data_ptr
->cpu_imm_xcall_p1
;
236 cpu_data_ptr
->cpu_imm_xcall_p0
= NULL
;
237 cpu_data_ptr
->cpu_imm_xcall_p1
= NULL
;
238 os_atomic_thread_fence(acq_rel
);
239 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPxcallImm
, relaxed
);
241 INTERRUPT_MASKED_DEBUG_END();
246 cpu_broadcast_xcall_internal(unsigned int signal
,
248 boolean_t self_xcall
,
253 cpu_data_t
*cpu_data_ptr
;
254 cpu_data_t
*target_cpu_datap
;
255 unsigned int failsig
;
257 int max_cpu
= ml_get_max_cpu_number() + 1;
259 //yes, param ALSO cannot be NULL
263 intr
= ml_set_interrupts_enabled(FALSE
);
264 cpu_data_ptr
= getCpuDatap();
270 assert_wait((event_t
)synch
, THREAD_UNINT
);
273 for (cpu
= 0; cpu
< max_cpu
; cpu
++) {
274 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
276 if (target_cpu_datap
== cpu_data_ptr
) {
280 if ((target_cpu_datap
== NULL
) ||
281 KERN_SUCCESS
!= cpu_signal(target_cpu_datap
, signal
, (void *)func
, parm
)) {
291 (void) ml_set_interrupts_enabled(intr
);
294 if (os_atomic_sub(synch
, (!self_xcall
) ? failsig
+ 1 : failsig
, relaxed
) == 0) {
295 clear_wait(current_thread(), THREAD_AWAKENED
);
297 thread_block(THREAD_CONTINUE_NULL
);
302 return max_cpu
- failsig
- 1;
304 return max_cpu
- failsig
;
309 cpu_broadcast_xcall(uint32_t *synch
,
310 boolean_t self_xcall
,
314 return cpu_broadcast_xcall_internal(SIGPxcall
, synch
, self_xcall
, func
, parm
);
317 struct cpu_broadcast_xcall_simple_data
{
324 cpu_broadcast_xcall_simple_cbk(void *parm
)
326 struct cpu_broadcast_xcall_simple_data
*data
= (struct cpu_broadcast_xcall_simple_data
*)parm
;
328 data
->func(data
->parm
);
330 if (os_atomic_dec(&data
->sync
, relaxed
) == 0) {
331 thread_wakeup((event_t
)&data
->sync
);
336 cpu_xcall_simple(boolean_t self_xcall
,
341 struct cpu_broadcast_xcall_simple_data data
= {};
346 return cpu_broadcast_xcall_internal(immediate
? SIGPxcallImm
: SIGPxcall
, &data
.sync
, self_xcall
, cpu_broadcast_xcall_simple_cbk
, &data
);
350 cpu_broadcast_immediate_xcall(uint32_t *synch
,
351 boolean_t self_xcall
,
355 return cpu_broadcast_xcall_internal(SIGPxcallImm
, synch
, self_xcall
, func
, parm
);
359 cpu_broadcast_xcall_simple(boolean_t self_xcall
,
363 return cpu_xcall_simple(self_xcall
, func
, parm
, false);
367 cpu_broadcast_immediate_xcall_simple(boolean_t self_xcall
,
371 return cpu_xcall_simple(self_xcall
, func
, parm
, true);
375 cpu_xcall_internal(unsigned int signal
, int cpu_number
, broadcastFunc func
, void *param
)
377 cpu_data_t
*target_cpu_datap
;
379 if ((cpu_number
< 0) || (cpu_number
> ml_get_max_cpu_number())) {
380 return KERN_INVALID_ARGUMENT
;
383 if (func
== NULL
|| param
== NULL
) {
384 return KERN_INVALID_ARGUMENT
;
387 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu_number
].cpu_data_vaddr
;
388 if (target_cpu_datap
== NULL
) {
389 return KERN_INVALID_ARGUMENT
;
392 return cpu_signal(target_cpu_datap
, signal
, (void*)func
, param
);
396 cpu_xcall(int cpu_number
, broadcastFunc func
, void *param
)
398 return cpu_xcall_internal(SIGPxcall
, cpu_number
, func
, param
);
402 cpu_immediate_xcall(int cpu_number
, broadcastFunc func
, void *param
)
404 return cpu_xcall_internal(SIGPxcallImm
, cpu_number
, func
, param
);
408 cpu_signal_internal(cpu_data_t
*target_proc
,
414 unsigned int Check_SIGPdisabled
;
416 Boolean swap_success
;
417 boolean_t interruptible
= ml_set_interrupts_enabled(FALSE
);
418 cpu_data_t
*current_proc
= getCpuDatap();
420 /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
422 assert(signal
== SIGPnop
);
425 if (current_proc
!= target_proc
) {
426 Check_SIGPdisabled
= SIGPdisabled
;
428 Check_SIGPdisabled
= 0;
431 if ((signal
== SIGPxcall
) || (signal
== SIGPxcallImm
)) {
432 uint64_t start_mabs_time
, max_mabs_time
, current_mabs_time
;
433 current_mabs_time
= start_mabs_time
= mach_absolute_time();
434 max_mabs_time
= xcall_ack_timeout_abstime
+ current_mabs_time
;
435 assert(max_mabs_time
> current_mabs_time
);
438 current_signals
= target_proc
->cpu_signal
;
439 if ((current_signals
& SIGPdisabled
) == SIGPdisabled
) {
440 ml_set_interrupts_enabled(interruptible
);
443 swap_success
= OSCompareAndSwap(current_signals
& (~signal
), current_signals
| signal
,
444 &target_proc
->cpu_signal
);
446 if (!swap_success
&& (signal
== SIGPxcallImm
) && (target_proc
->cpu_signal
& SIGPxcallImm
)) {
447 ml_set_interrupts_enabled(interruptible
);
448 return KERN_ALREADY_WAITING
;
451 /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
452 * be trying to xcall us. Since we have interrupts disabled that can deadlock,
453 * so break the deadlock by draining pending xcalls. */
454 if (!swap_success
&& (current_proc
->cpu_signal
& signal
)) {
455 cpu_handle_xcall(current_proc
);
457 } while (!swap_success
&& ((current_mabs_time
= mach_absolute_time()) < max_mabs_time
));
460 * If we time out while waiting for the target CPU to respond, it's possible that no
461 * other CPU is available to handle the watchdog interrupt that would eventually trigger
462 * a panic. To prevent this from happening, we just panic here to flag this condition.
464 if (__improbable(current_mabs_time
>= max_mabs_time
)) {
465 uint64_t end_time_ns
, xcall_ack_timeout_ns
;
466 absolutetime_to_nanoseconds(current_mabs_time
- start_mabs_time
, &end_time_ns
);
467 absolutetime_to_nanoseconds(xcall_ack_timeout_abstime
, &xcall_ack_timeout_ns
);
468 panic("CPU%u has failed to respond to cross-call after %llu nanoseconds (timeout = %llu ns)",
469 target_proc
->cpu_number
, end_time_ns
, xcall_ack_timeout_ns
);
472 if (signal
== SIGPxcallImm
) {
473 target_proc
->cpu_imm_xcall_p0
= p0
;
474 target_proc
->cpu_imm_xcall_p1
= p1
;
476 target_proc
->cpu_xcall_p0
= p0
;
477 target_proc
->cpu_xcall_p1
= p1
;
481 current_signals
= target_proc
->cpu_signal
;
482 if ((Check_SIGPdisabled
!= 0) && (current_signals
& Check_SIGPdisabled
) == SIGPdisabled
) {
483 ml_set_interrupts_enabled(interruptible
);
487 swap_success
= OSCompareAndSwap(current_signals
, current_signals
| signal
,
488 &target_proc
->cpu_signal
);
489 } while (!swap_success
);
493 * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params
494 * will be visible to other cores when the IPI is dispatched, and 2) subsequent
495 * instructions to signal the other cores will not execute until after the barrier.
496 * DMB would be sufficient to guarantee 1) but not 2).
498 __builtin_arm_dsb(DSB_ISH
);
500 if (!(target_proc
->cpu_signal
& SIGPdisabled
)) {
504 ml_cpu_signal_deferred(target_proc
->cpu_phys_id
);
506 PE_cpu_signal_deferred(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
509 PE_cpu_signal_deferred(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
510 #endif /* defined(HAS_IPI) */
514 ml_cpu_signal(target_proc
->cpu_phys_id
);
516 PE_cpu_signal(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
519 PE_cpu_signal(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
520 #endif /* defined(HAS_IPI) */
524 ml_set_interrupts_enabled(interruptible
);
529 cpu_signal(cpu_data_t
*target_proc
,
534 return cpu_signal_internal(target_proc
, signal
, p0
, p1
, FALSE
);
538 cpu_signal_deferred(cpu_data_t
*target_proc
)
540 return cpu_signal_internal(target_proc
, SIGPnop
, NULL
, NULL
, TRUE
);
544 cpu_signal_cancel(cpu_data_t
*target_proc
)
546 /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
547 if (!(target_proc
->cpu_signal
& SIGPdisabled
)) {
550 ml_cpu_signal_retract(target_proc
->cpu_phys_id
);
552 PE_cpu_signal_cancel(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
555 PE_cpu_signal_cancel(getCpuDatap()->cpu_id
, target_proc
->cpu_id
);
556 #endif /* defined(HAS_IPI) */
561 cpu_signal_handler(void)
563 cpu_signal_handler_internal(FALSE
);
567 cpu_signal_handler_internal(boolean_t disable_signal
)
569 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
570 unsigned int cpu_signal
;
572 cpu_data_ptr
->cpu_stat
.ipi_cnt
++;
573 cpu_data_ptr
->cpu_stat
.ipi_cnt_wake
++;
574 SCHED_STATS_INC(ipi_count
);
576 cpu_signal
= os_atomic_or(&cpu_data_ptr
->cpu_signal
, 0, relaxed
);
578 if ((!(cpu_signal
& SIGPdisabled
)) && (disable_signal
== TRUE
)) {
579 os_atomic_or(&cpu_data_ptr
->cpu_signal
, SIGPdisabled
, relaxed
);
580 } else if ((cpu_signal
& SIGPdisabled
) && (disable_signal
== FALSE
)) {
581 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPdisabled
, relaxed
);
584 while (cpu_signal
& ~SIGPdisabled
) {
585 if (cpu_signal
& SIGPdec
) {
586 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPdec
, relaxed
);
587 INTERRUPT_MASKED_DEBUG_START(rtclock_intr
, DBG_INTR_TYPE_IPI
);
589 INTERRUPT_MASKED_DEBUG_END();
592 if (cpu_signal
& SIGPkppet
) {
593 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPkppet
, relaxed
);
594 extern void kperf_signal_handler(void);
595 INTERRUPT_MASKED_DEBUG_START(kperf_signal_handler
, DBG_INTR_TYPE_IPI
);
596 kperf_signal_handler();
597 INTERRUPT_MASKED_DEBUG_END();
600 if (cpu_signal
& (SIGPxcall
| SIGPxcallImm
)) {
601 cpu_handle_xcall(cpu_data_ptr
);
603 if (cpu_signal
& SIGPast
) {
604 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPast
, relaxed
);
605 INTERRUPT_MASKED_DEBUG_START(ast_check
, DBG_INTR_TYPE_IPI
);
606 ast_check(current_processor());
607 INTERRUPT_MASKED_DEBUG_END();
609 if (cpu_signal
& SIGPdebug
) {
610 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPdebug
, relaxed
);
611 INTERRUPT_MASKED_DEBUG_START(DebuggerXCall
, DBG_INTR_TYPE_IPI
);
612 DebuggerXCall(cpu_data_ptr
->cpu_int_state
);
613 INTERRUPT_MASKED_DEBUG_END();
616 if (cpu_signal
& SIGPLWFlush
) {
617 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPLWFlush
, relaxed
);
618 INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler
, DBG_INTR_TYPE_IPI
);
619 cache_xcall_handler(LWFlush
);
620 INTERRUPT_MASKED_DEBUG_END();
622 if (cpu_signal
& SIGPLWClean
) {
623 os_atomic_andnot(&cpu_data_ptr
->cpu_signal
, SIGPLWClean
, relaxed
);
624 INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler
, DBG_INTR_TYPE_IPI
);
625 cache_xcall_handler(LWClean
);
626 INTERRUPT_MASKED_DEBUG_END();
630 cpu_signal
= os_atomic_or(&cpu_data_ptr
->cpu_signal
, 0, relaxed
);
635 cpu_exit_wait(int cpu_id
)
638 if (!ml_is_quiescing()) {
639 // For runtime disable (non S2R) the CPU will shut down immediately.
640 ml_topology_cpu_t
*cpu
= &ml_get_topology_info()->cpus
[cpu_id
];
641 assert(cpu
&& cpu
->cpu_IMPL_regs
);
642 volatile uint64_t *cpu_sts
= (void *)(cpu
->cpu_IMPL_regs
+ CPU_PIO_CPU_STS_OFFSET
);
644 // Poll the "CPU running state" field until it is 0 (off)
645 while ((*cpu_sts
& CPU_PIO_CPU_STS_cpuRunSt_mask
) != 0x00) {
646 __builtin_arm_dsb(DSB_ISH
);
650 #endif /* USE_APPLEARMSMP */
652 if (cpu_id
!= master_cpu
) {
653 // For S2R, ml_arm_sleep() will do some extra polling after setting ARM_CPU_ON_SLEEP_PATH.
654 cpu_data_t
*cpu_data_ptr
;
656 cpu_data_ptr
= CpuDataEntries
[cpu_id
].cpu_data_vaddr
;
657 while (!((*(volatile unsigned int*)&cpu_data_ptr
->cpu_sleep_token
) == ARM_CPU_ON_SLEEP_PATH
)) {
664 cpu_can_exit(__unused
int cpu
)
670 cpu_machine_init(void)
672 static boolean_t started
= FALSE
;
673 cpu_data_t
*cpu_data_ptr
;
675 cpu_data_ptr
= getCpuDatap();
676 started
= ((cpu_data_ptr
->cpu_flags
& StartedState
) == StartedState
);
677 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
678 platform_cache_init();
681 /* Note: this calls IOCPURunPlatformActiveActions when resuming on boot cpu */
682 PE_cpu_machine_init(cpu_data_ptr
->cpu_id
, !started
);
684 cpu_data_ptr
->cpu_flags
|= StartedState
;
689 current_processor(void)
691 return PERCPU_GET(processor
);
695 cpu_to_processor(int cpu
)
697 cpu_data_t
*cpu_data
= cpu_datap(cpu
);
698 if (cpu_data
!= NULL
) {
699 return PERCPU_GET_RELATIVE(processor
, cpu_data
, cpu_data
);
706 processor_to_cpu_datap(processor_t processor
)
708 assert(processor
->cpu_id
<= ml_get_max_cpu_number());
709 assert(CpuDataEntries
[processor
->cpu_id
].cpu_data_vaddr
!= NULL
);
711 return PERCPU_GET_RELATIVE(cpu_data
, processor
, processor
);
716 cpu_data_startup_init(void)
718 vm_size_t size
= percpu_section_size() * (ml_get_cpu_count() - 1);
720 percpu_base
.size
= percpu_section_size();
721 if (ml_get_cpu_count() == 1) {
722 percpu_base
.start
= VM_MAX_KERNEL_ADDRESS
;
727 * The memory needs to be physically contiguous because it contains
728 * cpu_data_t structures sometimes accessed during reset
731 * kmem_alloc_contig() can't be used early, at the time STARTUP_SUB_PERCPU
732 * normally runs, so we instead steal the memory for the PERCPU subsystem
735 percpu_base
.start
= (vm_offset_t
)pmap_steal_memory(round_page(size
));
736 bzero((void *)percpu_base
.start
, round_page(size
));
738 percpu_base
.start
-= percpu_section_start();
739 percpu_base
.end
= percpu_base
.start
+ size
- 1;
740 percpu_base_cur
= percpu_base
.start
;
742 STARTUP(PMAP_STEAL
, STARTUP_RANK_FIRST
, cpu_data_startup_init
);
745 cpu_data_alloc(boolean_t is_boot_cpu
)
747 cpu_data_t
*cpu_data_ptr
= NULL
;
751 cpu_data_ptr
= PERCPU_GET_MASTER(cpu_data
);
753 base
= os_atomic_add_orig(&percpu_base_cur
,
754 percpu_section_size(), relaxed
);
756 cpu_data_ptr
= PERCPU_GET_WITH_BASE(base
, cpu_data
);
757 cpu_stack_alloc(cpu_data_ptr
);
766 return &getCpuDatap()->cpu_pending_ast
;
770 slot_type(int slot_num
)
772 return cpu_datap(slot_num
)->cpu_type
;
776 slot_subtype(int slot_num
)
778 return cpu_datap(slot_num
)->cpu_subtype
;
782 slot_threadtype(int slot_num
)
784 return cpu_datap(slot_num
)->cpu_threadtype
;
790 return getCpuDatap()->cpu_type
;
796 return getCpuDatap()->cpu_subtype
;
802 return getCpuDatap()->cpu_threadtype
;
808 return getCpuDatap()->cpu_number
;
812 current_percpu_base(void)
814 return current_thread()->machine
.pcpu_data_base
;
818 ml_get_wake_timebase(void)
824 ml_cpu_signal_is_enabled(void)
826 return !(getCpuDatap()->cpu_signal
& SIGPdisabled
);
830 ml_cpu_can_exit(__unused
int cpu_id
)
832 /* processor_exit() is always allowed on the S2R path */
833 if (ml_is_quiescing()) {
836 #if HAS_CLUSTER && USE_APPLEARMSMP
838 * Cyprus and newer chips can disable individual non-boot CPUs. The
839 * implementation polls cpuX_IMPL_CPU_STS, which differs on older chips.
841 if (CpuDataEntries
[cpu_id
].cpu_data_vaddr
!= &BootCpuData
) {
848 #ifdef USE_APPLEARMSMP
851 ml_cpu_begin_state_transition(int cpu_id
)
853 lck_rw_lock_exclusive(&cpu_state_lock
);
854 CpuDataEntries
[cpu_id
].cpu_data_vaddr
->in_state_transition
= true;
855 lck_rw_unlock_exclusive(&cpu_state_lock
);
859 ml_cpu_end_state_transition(int cpu_id
)
861 lck_rw_lock_exclusive(&cpu_state_lock
);
862 CpuDataEntries
[cpu_id
].cpu_data_vaddr
->in_state_transition
= false;
863 lck_rw_unlock_exclusive(&cpu_state_lock
);
867 ml_cpu_begin_loop(void)
869 lck_rw_lock_shared(&cpu_state_lock
);
873 ml_cpu_end_loop(void)
875 lck_rw_unlock_shared(&cpu_state_lock
);
878 #else /* USE_APPLEARMSMP */
881 ml_cpu_begin_state_transition(__unused
int cpu_id
)
886 ml_cpu_end_state_transition(__unused
int cpu_id
)
891 ml_cpu_begin_loop(void)
896 ml_cpu_end_loop(void)
900 #endif /* USE_APPLEARMSMP */