2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * cpu specific routines
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/percpu.h>
39 #include <kern/thread.h>
40 #include <kern/timer_queue.h>
41 #include <arm/cpu_data.h>
42 #include <arm/cpuid.h>
43 #include <arm/caches_internal.h>
44 #include <arm/cpu_data_internal.h>
45 #include <arm/cpu_internal.h>
46 #include <arm/misc_protos.h>
47 #include <arm/machine_cpu.h>
48 #include <arm/rtclock.h>
49 #include <arm64/proc_reg.h>
50 #include <mach/processor_info.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_map.h>
54 #include <pexpert/arm/protos.h>
55 #include <pexpert/device_tree.h>
56 #include <sys/kdebug.h>
57 #include <arm/machine_routines.h>
59 #include <machine/atomic.h>
61 #include <san/kasan.h>
68 #include <kern/monotonic.h>
69 #endif /* MONOTONIC */
72 #include <IOKit/IOPlatformExpert.h>
73 #include <IOKit/IOHibernatePrivate.h>
74 #endif /* HIBERNATION */
77 #include <libkern/section_keywords.h>
79 extern boolean_t idle_enable
;
80 extern uint64_t wake_abstime
;
83 void sleep_token_buffer_init(void);
87 extern uintptr_t resume_idle_cpu
;
88 extern uintptr_t start_cpu
;
90 #if __ARM_KERNEL_PROTECT__
91 extern void exc_vectors_table
;
92 #endif /* __ARM_KERNEL_PROTECT__ */
94 extern void __attribute__((noreturn
)) arm64_prepare_for_sleep(boolean_t deep_sleep
);
95 extern void arm64_force_wfi_clock_gate(void);
96 #if defined(APPLETYPHOON)
97 // <rdar://problem/15827409>
98 extern void typhoon_prepare_for_wfi(void);
99 extern void typhoon_return_from_wfi(void);
102 #if HAS_RETENTION_STATE
103 extern void arm64_retention_wfi(void);
106 vm_address_t start_cpu_paddr
;
108 sysreg_restore_t sysreg_restore
__attribute__((section("__DATA, __const"))) = {
109 .tcr_el1
= TCR_EL1_BOOT
,
116 // 2 : overhead simulation (delay & flags)
119 #if DEVELOPMENT || DEBUG
122 // 1 << 0 : flush L1s
123 // 1 << 1 : flush TLBs
124 static int wfi_flags
= 0;
126 // wfi_delay - delay ticks after wfi exit
127 static uint64_t wfi_delay
= 0;
129 #endif /* DEVELOPMENT || DEBUG */
130 #if DEVELOPMENT || DEBUG
131 static bool idle_proximate_timer_wfe
= true;
132 static bool idle_proximate_io_wfe
= true;
133 #define CPUPM_IDLE_WFE 0x5310300
135 static const bool idle_proximate_timer_wfe
= true;
136 static const bool idle_proximate_io_wfe
= true;
139 #if __ARM_GLOBAL_SLEEP_BIT__
140 volatile boolean_t arm64_stall_sleep
= TRUE
;
145 * These must be aligned to avoid issues with calling bcopy_phys on them before
146 * we are done with pmap initialization.
148 static const uint8_t __attribute__ ((aligned(8))) suspend_signature
[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
149 static const uint8_t __attribute__ ((aligned(8))) running_signature
[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
153 static vm_offset_t sleepTokenBuffer
= (vm_offset_t
)NULL
;
155 static boolean_t coresight_debug_enabled
= FALSE
;
157 #if defined(CONFIG_XNUPOST)
158 void arm64_ipi_test_callback(void *);
159 void arm64_immediate_ipi_test_callback(void *);
162 arm64_ipi_test_callback(void *parm
)
164 volatile uint64_t *ipi_test_data
= parm
;
165 cpu_data_t
*cpu_data
;
167 cpu_data
= getCpuDatap();
169 *ipi_test_data
= cpu_data
->cpu_number
;
173 arm64_immediate_ipi_test_callback(void *parm
)
175 volatile uint64_t *ipi_test_data
= parm
;
176 cpu_data_t
*cpu_data
;
178 cpu_data
= getCpuDatap();
180 *ipi_test_data
= cpu_data
->cpu_number
+ MAX_CPUS
;
183 uint64_t arm64_ipi_test_data
[MAX_CPUS
* 2];
188 volatile uint64_t *ipi_test_data
, *immediate_ipi_test_data
;
189 uint32_t timeout_ms
= 100;
190 uint64_t then
, now
, delta
;
191 int current_cpu_number
= getCpuDatap()->cpu_number
;
194 * probably the only way to have this on most systems is with the
195 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
196 * IPI is not available
198 if (real_ncpus
== 1) {
202 const unsigned int max_cpu_id
= ml_get_max_cpu_number();
203 for (unsigned int i
= 0; i
<= max_cpu_id
; ++i
) {
204 ipi_test_data
= &arm64_ipi_test_data
[i
];
205 immediate_ipi_test_data
= &arm64_ipi_test_data
[i
+ MAX_CPUS
];
207 kern_return_t error
= cpu_xcall((int)i
, (void *)arm64_ipi_test_callback
, (void *)(uintptr_t)ipi_test_data
);
208 if (error
!= KERN_SUCCESS
) {
209 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number
, i
, error
);
212 while ((error
= cpu_immediate_xcall((int)i
, (void *)arm64_immediate_ipi_test_callback
,
213 (void *)(uintptr_t)immediate_ipi_test_data
)) == KERN_ALREADY_WAITING
) {
214 now
= mach_absolute_time();
215 absolutetime_to_nanoseconds(now
- then
, &delta
);
216 if ((delta
/ NSEC_PER_MSEC
) > timeout_ms
) {
217 panic("CPU %d was unable to immediate-IPI CPU %u within %dms", current_cpu_number
, i
, timeout_ms
);
221 if (error
!= KERN_SUCCESS
) {
222 panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number
, i
, error
);
225 then
= mach_absolute_time();
227 while ((*ipi_test_data
!= i
) || (*immediate_ipi_test_data
!= (i
+ MAX_CPUS
))) {
228 now
= mach_absolute_time();
229 absolutetime_to_nanoseconds(now
- then
, &delta
);
230 if ((delta
/ NSEC_PER_MSEC
) > timeout_ms
) {
231 panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %dms, responses: %llx, %llx",
232 current_cpu_number
, i
, timeout_ms
, *ipi_test_data
, *immediate_ipi_test_data
);
237 #endif /* defined(CONFIG_XNUPOST) */
240 configure_coresight_registers(cpu_data_t
*cdp
)
245 vm_offset_t coresight_regs
= ml_get_topology_info()->cpus
[cdp
->cpu_number
].coresight_regs
;
248 * ARMv8 coresight registers are optional. If the device tree did not
249 * provide either cpu_regmap_paddr (from the legacy "reg-private" EDT property)
250 * or coresight_regs (from the new "coresight-reg" property), assume that
251 * coresight registers are not supported.
253 if (cdp
->cpu_regmap_paddr
|| coresight_regs
) {
254 for (i
= 0; i
< CORESIGHT_REGIONS
; ++i
) {
255 if (i
== CORESIGHT_CTI
) {
258 /* Skip debug-only registers on production chips */
259 if (((i
== CORESIGHT_ED
) || (i
== CORESIGHT_UTT
)) && !coresight_debug_enabled
) {
263 if (!cdp
->coresight_base
[i
]) {
264 if (coresight_regs
) {
265 cdp
->coresight_base
[i
] = coresight_regs
+ CORESIGHT_OFFSET(i
);
267 uint64_t addr
= cdp
->cpu_regmap_paddr
+ CORESIGHT_OFFSET(i
);
268 cdp
->coresight_base
[i
] = (vm_offset_t
)ml_io_map(addr
, CORESIGHT_SIZE
);
272 * At this point, failing to io map the
273 * registers is considered as an error.
275 if (!cdp
->coresight_base
[i
]) {
276 panic("unable to ml_io_map coresight regions");
279 /* Unlock EDLAR, CTILAR, PMLAR */
280 if (i
!= CORESIGHT_UTT
) {
281 *(volatile uint32_t *)(cdp
->coresight_base
[i
] + ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
289 * Routine: cpu_bootstrap
304 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
306 pmap_switch_user_ttb(kernel_pmap
);
307 cpu_data_ptr
->cpu_active_thread
= current_thread();
308 cpu_data_ptr
->cpu_reset_handler
= (uintptr_t) start_cpu_paddr
;
309 cpu_data_ptr
->cpu_flags
|= SleepState
;
310 cpu_data_ptr
->cpu_user_debug
= NULL
;
315 mt_cpu_down(cpu_data_ptr
);
316 #endif /* MONOTONIC */
321 if (ml_is_quiescing()) {
322 PE_cpu_machine_quiesce(cpu_data_ptr
->cpu_id
);
324 bool deep_sleep
= PE_cpu_down(cpu_data_ptr
->cpu_id
);
325 cpu_data_ptr
->cpu_sleep_token
= ARM_CPU_ON_SLEEP_PATH
;
326 // hang CPU on spurious wakeup
327 cpu_data_ptr
->cpu_reset_handler
= (uintptr_t)0;
328 __builtin_arm_dsb(DSB_ISH
);
330 arm64_prepare_for_sleep(deep_sleep
);
333 PE_cpu_machine_quiesce(cpu_data_ptr
->cpu_id
);
339 * Routine: cpu_interrupt_is_pending
340 * Function: Returns the value of ISR. Due to how this register is
341 * is implemented, this returns 0 if there are no
342 * interrupts pending, so it can be used as a boolean test.
345 cpu_interrupt_is_pending(void)
348 isr_value
= __builtin_arm_rsr64("ISR_EL1");
349 return (int)isr_value
;
353 cpu_proximate_timer(void)
355 return !SetIdlePop();
359 wfe_to_deadline_or_interrupt(uint32_t cid
, uint64_t wfe_deadline
, __unused cpu_data_t
*cdp
)
361 bool ipending
= false;
362 while ((ipending
= (cpu_interrupt_is_pending() != 0)) == false) {
363 /* Assumes event stream enablement
364 * TODO: evaluate temporarily stretching the per-CPU event
365 * interval to a larger value for possible efficiency
369 #if DEVELOPMENT || DEBUG
372 if (wfe_deadline
!= ~0ULL) {
373 #if DEVELOPMENT || DEBUG
374 cdp
->wfe_deadline_checks
++;
376 /* Check if the WFE recommendation has expired.
377 * We do not recompute the deadline here.
379 if ((ml_cluster_wfe_timeout(cid
) == 0) ||
380 mach_absolute_time() >= wfe_deadline
) {
381 #if DEVELOPMENT || DEBUG
382 cdp
->wfe_terminations
++;
388 /* TODO: worth refreshing pending interrupt status? */
396 void __attribute__((noreturn
))
399 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
400 uint64_t new_idle_timeout_ticks
= 0x0ULL
, lastPop
;
401 bool idle_disallowed
= false;
403 if (__improbable((!idle_enable
))) {
404 idle_disallowed
= true;
405 } else if (__improbable(cpu_data_ptr
->cpu_signal
& SIGPdisabled
)) {
406 idle_disallowed
= true;
409 if (__improbable(idle_disallowed
)) {
413 bool ipending
= false;
416 if (__probable(idle_proximate_io_wfe
== true)) {
417 uint64_t wfe_deadline
= 0;
418 /* Check for an active perf. controller generated
419 * WFE recommendation for this cluster.
421 cid
= cpu_data_ptr
->cpu_cluster_id
;
422 uint64_t wfe_ttd
= 0;
423 if ((wfe_ttd
= ml_cluster_wfe_timeout(cid
)) != 0) {
424 wfe_deadline
= mach_absolute_time() + wfe_ttd
;
427 if (wfe_deadline
!= 0) {
428 /* Poll issuing event-bounded WFEs until an interrupt
429 * arrives or the WFE recommendation expires
431 ipending
= wfe_to_deadline_or_interrupt(cid
, wfe_deadline
, cpu_data_ptr
);
432 #if DEVELOPMENT || DEBUG
433 KDBG(CPUPM_IDLE_WFE
, ipending
, cpu_data_ptr
->wfe_count
, wfe_deadline
, 0);
435 if (ipending
== true) {
436 /* Back to machine_idle() */
442 if (__improbable(cpu_proximate_timer())) {
443 if (idle_proximate_timer_wfe
== true) {
444 /* Poll issuing WFEs until the expected
447 ipending
= wfe_to_deadline_or_interrupt(cid
, ~0ULL, cpu_data_ptr
);
448 assert(ipending
== true);
453 lastPop
= cpu_data_ptr
->rtcPop
;
455 cpu_data_ptr
->cpu_active_thread
= current_thread();
456 if (cpu_data_ptr
->cpu_user_debug
) {
459 cpu_data_ptr
->cpu_user_debug
= NULL
;
461 if (wfi
&& (cpu_data_ptr
->cpu_idle_notify
!= NULL
)) {
462 cpu_data_ptr
->cpu_idle_notify(cpu_data_ptr
->cpu_id
, TRUE
, &new_idle_timeout_ticks
);
465 if (cpu_data_ptr
->idle_timer_notify
!= NULL
) {
466 if (new_idle_timeout_ticks
== 0x0ULL
) {
467 /* turn off the idle timer */
468 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
470 /* set the new idle timeout */
471 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
473 timer_resync_deadlines();
474 if (cpu_data_ptr
->rtcPop
!= lastPop
) {
483 mt_cpu_idle(cpu_data_ptr
);
484 #endif /* MONOTONIC */
487 #if !defined(APPLE_ARM64_ARCH_FAMILY)
488 platform_cache_idle_enter();
491 #if DEVELOPMENT || DEBUG
492 // When simulating wfi overhead,
493 // force wfi to clock gating only
495 arm64_force_wfi_clock_gate();
497 #endif /* DEVELOPMENT || DEBUG */
499 #if defined(APPLETYPHOON)
500 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
501 typhoon_prepare_for_wfi();
503 __builtin_arm_dsb(DSB_SY
);
504 #if HAS_RETENTION_STATE
505 arm64_retention_wfi();
510 #if defined(APPLETYPHOON)
511 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
512 typhoon_return_from_wfi();
515 #if DEVELOPMENT || DEBUG
516 // Handle wfi overhead simulation
520 // Calculate wfi delay deadline
521 clock_absolutetime_interval_to_deadline(wfi_delay
, &deadline
);
524 if ((wfi_flags
& 1) != 0) {
525 InvalidatePoU_Icache();
530 if ((wfi_flags
& 2) != 0) {
534 // Wait for the ballance of the wfi delay
535 clock_delay_until(deadline
);
537 #endif /* DEVELOPMENT || DEBUG */
538 #if !defined(APPLE_ARM64_ARCH_FAMILY)
539 platform_cache_idle_exit();
545 cpu_idle_exit(FALSE
);
549 * Routine: cpu_idle_exit
553 cpu_idle_exit(boolean_t from_reset
)
555 uint64_t new_idle_timeout_ticks
= 0x0ULL
;
556 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
558 assert(exception_stack_pointer() != 0);
560 /* Back from WFI, unlock OSLAR and EDLAR. */
562 configure_coresight_registers(cpu_data_ptr
);
570 mt_cpu_run(cpu_data_ptr
);
571 #endif /* MONOTONIC */
573 if (wfi
&& (cpu_data_ptr
->cpu_idle_notify
!= NULL
)) {
574 cpu_data_ptr
->cpu_idle_notify(cpu_data_ptr
->cpu_id
, FALSE
, &new_idle_timeout_ticks
);
577 if (cpu_data_ptr
->idle_timer_notify
!= NULL
) {
578 if (new_idle_timeout_ticks
== 0x0ULL
) {
579 /* turn off the idle timer */
580 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
582 /* set the new idle timeout */
583 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
585 timer_resync_deadlines();
594 cpu_data_t
*cdp
= getCpuDatap();
595 arm_cpu_info_t
*cpu_info_p
;
597 assert(exception_stack_pointer() != 0);
599 if (cdp
->cpu_type
!= CPU_TYPE_ARM64
) {
600 cdp
->cpu_type
= CPU_TYPE_ARM64
;
602 timer_call_queue_init(&cdp
->rtclock_timer
.queue
);
603 cdp
->rtclock_timer
.deadline
= EndOfAllTime
;
605 if (cdp
== &BootCpuData
) {
611 * We initialize non-boot CPUs here; the boot CPU is
612 * dealt with as part of pmap_bootstrap.
614 pmap_cpu_data_init();
616 /* ARM_SMP: Assuming identical cpu */
619 cpu_info_p
= cpuid_info();
621 /* switch based on CPU's reported architecture */
622 switch (cpu_info_p
->arm_info
.arm_arch
) {
624 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM64_V8
;
626 case CPU_ARCH_ARMv8E
:
627 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM64E
;
630 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
631 /* this panic doesn't work this early in startup */
632 panic("Unknown CPU subtype...");
636 cdp
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
638 cdp
->cpu_stat
.irq_ex_cnt_wake
= 0;
639 cdp
->cpu_stat
.ipi_cnt_wake
= 0;
641 cdp
->cpu_stat
.pmi_cnt_wake
= 0;
642 #endif /* MONOTONIC */
643 cdp
->cpu_running
= TRUE
;
644 cdp
->cpu_sleep_token_last
= cdp
->cpu_sleep_token
;
645 cdp
->cpu_sleep_token
= 0x0UL
;
651 #endif /* MONOTONIC */
655 cpu_stack_alloc(cpu_data_t
*cpu_data_ptr
)
657 vm_offset_t irq_stack
= 0;
658 vm_offset_t exc_stack
= 0;
660 kern_return_t kr
= kernel_memory_allocate(kernel_map
, &irq_stack
,
661 INTSTACK_SIZE
+ (2 * PAGE_SIZE
),
663 KMA_GUARD_FIRST
| KMA_GUARD_LAST
| KMA_KSTACK
| KMA_KOBJECT
,
664 VM_KERN_MEMORY_STACK
);
665 if (kr
!= KERN_SUCCESS
) {
666 panic("Unable to allocate cpu interrupt stack\n");
669 cpu_data_ptr
->intstack_top
= irq_stack
+ PAGE_SIZE
+ INTSTACK_SIZE
;
670 cpu_data_ptr
->istackptr
= cpu_data_ptr
->intstack_top
;
672 kr
= kernel_memory_allocate(kernel_map
, &exc_stack
,
673 EXCEPSTACK_SIZE
+ (2 * PAGE_SIZE
),
675 KMA_GUARD_FIRST
| KMA_GUARD_LAST
| KMA_KSTACK
| KMA_KOBJECT
,
676 VM_KERN_MEMORY_STACK
);
677 if (kr
!= KERN_SUCCESS
) {
678 panic("Unable to allocate cpu exception stack\n");
681 cpu_data_ptr
->excepstack_top
= exc_stack
+ PAGE_SIZE
+ EXCEPSTACK_SIZE
;
682 cpu_data_ptr
->excepstackptr
= cpu_data_ptr
->excepstack_top
;
686 cpu_data_free(cpu_data_t
*cpu_data_ptr
)
688 if ((cpu_data_ptr
== NULL
) || (cpu_data_ptr
== &BootCpuData
)) {
692 int cpu_number
= cpu_data_ptr
->cpu_number
;
694 if (CpuDataEntries
[cpu_number
].cpu_data_vaddr
== cpu_data_ptr
) {
695 CpuDataEntries
[cpu_number
].cpu_data_vaddr
= NULL
;
696 CpuDataEntries
[cpu_number
].cpu_data_paddr
= 0;
697 __builtin_arm_dmb(DMB_ISH
); // Ensure prior stores to cpu array are visible
699 (kfree
)((void *)(cpu_data_ptr
->intstack_top
- INTSTACK_SIZE
), INTSTACK_SIZE
);
700 (kfree
)((void *)(cpu_data_ptr
->excepstack_top
- EXCEPSTACK_SIZE
), EXCEPSTACK_SIZE
);
704 cpu_data_init(cpu_data_t
*cpu_data_ptr
)
708 cpu_data_ptr
->cpu_flags
= 0;
709 cpu_data_ptr
->cpu_int_state
= 0;
710 cpu_data_ptr
->cpu_pending_ast
= AST_NONE
;
711 cpu_data_ptr
->cpu_cache_dispatch
= NULL
;
712 cpu_data_ptr
->rtcPop
= EndOfAllTime
;
713 cpu_data_ptr
->rtclock_datap
= &RTClockData
;
714 cpu_data_ptr
->cpu_user_debug
= NULL
;
717 cpu_data_ptr
->cpu_base_timebase
= 0;
718 cpu_data_ptr
->cpu_idle_notify
= NULL
;
719 cpu_data_ptr
->cpu_idle_latency
= 0x0ULL
;
720 cpu_data_ptr
->cpu_idle_pop
= 0x0ULL
;
721 cpu_data_ptr
->cpu_reset_type
= 0x0UL
;
722 cpu_data_ptr
->cpu_reset_handler
= 0x0UL
;
723 cpu_data_ptr
->cpu_reset_assist
= 0x0UL
;
724 cpu_data_ptr
->cpu_regmap_paddr
= 0x0ULL
;
725 cpu_data_ptr
->cpu_phys_id
= 0x0UL
;
726 cpu_data_ptr
->cpu_l2_access_penalty
= 0;
727 cpu_data_ptr
->cpu_cluster_type
= CLUSTER_TYPE_SMP
;
728 cpu_data_ptr
->cpu_cluster_id
= 0;
729 cpu_data_ptr
->cpu_l2_id
= 0;
730 cpu_data_ptr
->cpu_l2_size
= 0;
731 cpu_data_ptr
->cpu_l3_id
= 0;
732 cpu_data_ptr
->cpu_l3_size
= 0;
734 cpu_data_ptr
->cpu_signal
= SIGPdisabled
;
736 cpu_data_ptr
->cpu_get_fiq_handler
= NULL
;
737 cpu_data_ptr
->cpu_tbd_hardware_addr
= NULL
;
738 cpu_data_ptr
->cpu_tbd_hardware_val
= NULL
;
739 cpu_data_ptr
->cpu_get_decrementer_func
= NULL
;
740 cpu_data_ptr
->cpu_set_decrementer_func
= NULL
;
741 cpu_data_ptr
->cpu_sleep_token
= ARM_CPU_ON_SLEEP_PATH
;
742 cpu_data_ptr
->cpu_sleep_token_last
= 0x00000000UL
;
743 cpu_data_ptr
->cpu_xcall_p0
= NULL
;
744 cpu_data_ptr
->cpu_xcall_p1
= NULL
;
745 cpu_data_ptr
->cpu_imm_xcall_p0
= NULL
;
746 cpu_data_ptr
->cpu_imm_xcall_p1
= NULL
;
748 for (i
= 0; i
< CORESIGHT_REGIONS
; ++i
) {
749 cpu_data_ptr
->coresight_base
[i
] = 0;
753 pmap_cpu_data_t
* pmap_cpu_data_ptr
= &cpu_data_ptr
->cpu_pmap_cpu_data
;
755 pmap_cpu_data_ptr
->cpu_nested_pmap
= (struct pmap
*) NULL
;
756 pmap_cpu_data_ptr
->cpu_number
= PMAP_INVALID_CPU_NUM
;
757 pmap_cpu_data_ptr
->pv_free
.list
= NULL
;
758 pmap_cpu_data_ptr
->pv_free
.count
= 0;
759 pmap_cpu_data_ptr
->pv_free_tail
= NULL
;
761 bzero(&(pmap_cpu_data_ptr
->cpu_sw_asids
[0]), sizeof(pmap_cpu_data_ptr
->cpu_sw_asids
));
763 cpu_data_ptr
->halt_status
= CPU_NOT_HALTED
;
764 #if __ARM_KERNEL_PROTECT__
765 cpu_data_ptr
->cpu_exc_vectors
= (vm_offset_t
)&exc_vectors_table
;
766 #endif /* __ARM_KERNEL_PROTECT__ */
768 #if defined(HAS_APPLE_PAC)
769 cpu_data_ptr
->rop_key
= 0;
770 cpu_data_ptr
->jop_key
= ml_default_jop_pid();
776 cpu_data_register(cpu_data_t
*cpu_data_ptr
)
778 int cpu
= cpu_data_ptr
->cpu_number
;
781 for (int i
= 0; i
< CPUWINDOWS_MAX
; i
++) {
782 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu
, i
), PAGE_SIZE
);
786 __builtin_arm_dmb(DMB_ISH
); // Ensure prior stores to cpu data are visible
787 CpuDataEntries
[cpu
].cpu_data_vaddr
= cpu_data_ptr
;
788 CpuDataEntries
[cpu
].cpu_data_paddr
= (void *)ml_vtophys((vm_offset_t
)cpu_data_ptr
);
792 #if defined(KERNEL_INTEGRITY_CTRR)
793 /* Hibernation needs to reset this state, so data and text are in the hib segment;
794 * this allows them be accessed and executed early.
796 LCK_GRP_DECLARE(ctrr_cpu_start_lock_grp
, "ctrr_cpu_start_lock");
797 LCK_SPIN_DECLARE(ctrr_cpu_start_lck
, &ctrr_cpu_start_lock_grp
);
798 enum ctrr_cluster_states ctrr_cluster_locked
[MAX_CPU_CLUSTERS
] MARK_AS_HIBERNATE_DATA
;
800 MARK_AS_HIBERNATE_TEXT
802 init_ctrr_cluster_states(void)
804 for (int i
= 0; i
< MAX_CPU_CLUSTERS
; i
++) {
805 ctrr_cluster_locked
[i
] = CTRR_UNLOCKED
;
813 cpu_data_t
*cpu_data_ptr
= CpuDataEntries
[cpu
].cpu_data_vaddr
;
815 kprintf("cpu_start() cpu: %d\n", cpu
);
817 if (cpu
== cpu_number()) {
819 configure_coresight_registers(cpu_data_ptr
);
821 thread_t first_thread
;
822 processor_t processor
;
824 cpu_data_ptr
->cpu_reset_handler
= (vm_offset_t
) start_cpu_paddr
;
827 cpu_data_ptr
->cpu_pmap_cpu_data
.cpu_nested_pmap
= NULL
;
830 processor
= PERCPU_GET_RELATIVE(processor
, cpu_data
, cpu_data_ptr
);
831 if (processor
->startup_thread
!= THREAD_NULL
) {
832 first_thread
= processor
->startup_thread
;
834 first_thread
= processor
->idle_thread
;
836 cpu_data_ptr
->cpu_active_thread
= first_thread
;
837 first_thread
->machine
.CpuDatap
= cpu_data_ptr
;
838 first_thread
->machine
.pcpu_data_base
=
839 (vm_address_t
)cpu_data_ptr
- __PERCPU_ADDR(cpu_data
);
841 configure_coresight_registers(cpu_data_ptr
);
843 flush_dcache((vm_offset_t
)&CpuDataEntries
[cpu
], sizeof(cpu_data_entry_t
), FALSE
);
844 flush_dcache((vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
), FALSE
);
845 #if defined(KERNEL_INTEGRITY_CTRR)
847 /* First CPU being started within a cluster goes ahead to lock CTRR for cluster;
848 * other CPUs block until cluster is locked. */
849 lck_spin_lock(&ctrr_cpu_start_lck
);
850 switch (ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
]) {
852 ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
] = CTRR_LOCKING
;
853 lck_spin_unlock(&ctrr_cpu_start_lck
);
856 assert_wait(&ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
], THREAD_UNINT
);
857 lck_spin_unlock(&ctrr_cpu_start_lck
);
858 thread_block(THREAD_CONTINUE_NULL
);
859 assert(ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
] != CTRR_LOCKING
);
861 default: // CTRR_LOCKED
862 lck_spin_unlock(&ctrr_cpu_start_lck
);
866 (void) PE_cpu_start(cpu_data_ptr
->cpu_id
, (vm_offset_t
)NULL
, (vm_offset_t
)NULL
);
874 cpu_timebase_init(boolean_t from_boot
)
876 cpu_data_t
*cdp
= getCpuDatap();
878 if (cdp
->cpu_get_fiq_handler
== NULL
) {
879 cdp
->cpu_get_fiq_handler
= rtclock_timebase_func
.tbd_fiq_handler
;
880 cdp
->cpu_get_decrementer_func
= rtclock_timebase_func
.tbd_get_decrementer
;
881 cdp
->cpu_set_decrementer_func
= rtclock_timebase_func
.tbd_set_decrementer
;
882 cdp
->cpu_tbd_hardware_addr
= (void *)rtclock_timebase_addr
;
883 cdp
->cpu_tbd_hardware_val
= (void *)rtclock_timebase_val
;
886 if (!from_boot
&& (cdp
== &BootCpuData
)) {
888 * When we wake from sleep, we have no guarantee about the state
889 * of the hardware timebase. It may have kept ticking across sleep, or
892 * To deal with this, we calculate an offset to the clock that will
893 * produce a timebase value wake_abstime at the point the boot
894 * CPU calls cpu_timebase_init on wake.
896 * This ensures that mach_absolute_time() stops ticking across sleep.
898 rtclock_base_abstime
= wake_abstime
- ml_get_hwclock();
899 } else if (from_boot
) {
900 /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
901 ml_set_reset_time(ml_get_hwclock());
904 cdp
->cpu_decrementer
= 0x7FFFFFFFUL
;
905 cdp
->cpu_timebase
= 0x0UL
;
906 cdp
->cpu_base_timebase
= rtclock_base_abstime
;
912 return getCpuDatap()->cpu_cluster_id
;
915 __attribute__((noreturn
))
919 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
921 if (cpu_data_ptr
== &BootCpuData
) {
922 cpu_data_t
*target_cdp
;
926 max_cpu
= ml_get_max_cpu_number();
927 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
928 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
930 if ((target_cdp
== NULL
) || (target_cdp
== cpu_data_ptr
)) {
934 while (target_cdp
->cpu_sleep_token
!= ARM_CPU_ON_SLEEP_PATH
) {
940 * Now that the other cores have entered the sleep path, set
941 * the abstime value we'll use when we resume.
943 wake_abstime
= ml_get_timebase();
944 ml_set_reset_time(UINT64_MAX
);
949 cpu_data_ptr
->cpu_sleep_token
= ARM_CPU_ON_SLEEP_PATH
;
951 if (cpu_data_ptr
== &BootCpuData
) {
953 // Classic suspend to RAM writes the suspend signature into the
954 // sleep token buffer so that iBoot knows that it's on the warm
955 // boot (wake) path (as opposed to the cold boot path). Newer SoC
956 // do not go through SecureROM/iBoot on the warm boot path. The
957 // reconfig engine script brings the CPU out of reset at the kernel's
958 // reset vector which points to the warm boot initialization code.
959 if (sleepTokenBuffer
!= (vm_offset_t
) NULL
) {
960 platform_cache_shutdown();
961 bcopy((const void *)suspend_signature
, (void *)sleepTokenBuffer
, sizeof(SleepToken
));
963 panic("No sleep token buffer");
967 #if __ARM_GLOBAL_SLEEP_BIT__
968 /* Allow other CPUs to go to sleep. */
969 arm64_stall_sleep
= FALSE
;
970 __builtin_arm_dmb(DMB_ISH
);
973 /* Architectural debug state: <rdar://problem/12390433>:
974 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
975 * tell debugger to not prevent power gating .
977 if (cpu_data_ptr
->coresight_base
[CORESIGHT_ED
]) {
978 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
979 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGPRCR
) = 0;
983 uint32_t mode
= hibernate_write_image();
984 if (mode
== kIOHibernatePostWriteHalt
) {
985 HIBLOG("powering off after writing hibernation image\n");
986 int halt_result
= -1;
987 if (PE_halt_restart
) {
988 halt_result
= (*PE_halt_restart
)(kPEHaltCPU
);
990 panic("can't shutdown: PE_halt_restart returned %d", halt_result
);
992 #endif /* HIBERNATION */
996 #endif /* MONOTONIC */
997 /* ARM64-specific preparation */
998 arm64_prepare_for_sleep(true);
1000 #if __ARM_GLOBAL_SLEEP_BIT__
1002 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
1003 * global register to manage entering deep sleep, as opposed to a per-CPU
1004 * register. We cannot update this register until all CPUs are ready to enter
1005 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
1006 * (by idling), it will hang (due to the side effects of enabling deep sleep),
1007 * which can hang the sleep process or cause memory corruption on wake.
1009 * To avoid these issues, we'll stall on this global value, which CPU0 will
1012 while (arm64_stall_sleep
) {
1013 __builtin_arm_wfe();
1016 CleanPoU_DcacheRegion((vm_offset_t
) cpu_data_ptr
, sizeof(cpu_data_t
));
1018 /* Architectural debug state: <rdar://problem/12390433>:
1019 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1020 * tell debugger to not prevent power gating .
1022 if (cpu_data_ptr
->coresight_base
[CORESIGHT_ED
]) {
1023 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
1024 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGPRCR
) = 0;
1027 /* ARM64-specific preparation */
1028 arm64_prepare_for_sleep(true);
1033 cpu_machine_idle_init(boolean_t from_boot
)
1035 static vm_address_t resume_idle_cpu_paddr
= (vm_address_t
)NULL
;
1036 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
1040 uint32_t production
= 1;
1043 unsigned long jtag
= 0;
1045 if (PE_parse_boot_argn("jtag", &jtag
, sizeof(jtag
))) {
1047 idle_enable
= FALSE
;
1055 #if DEVELOPMENT || DEBUG
1056 uint32_t wfe_mode
= 0;
1057 if (PE_parse_boot_argn("wfe_mode", &wfe_mode
, sizeof(wfe_mode
))) {
1058 idle_proximate_timer_wfe
= ((wfe_mode
& 1) == 1);
1059 idle_proximate_io_wfe
= ((wfe_mode
& 2) == 2);
1062 PE_parse_boot_argn("wfi", &wfi_tmp
, sizeof(wfi_tmp
));
1064 // bits 7..0 give the wfi type
1065 switch (wfi_tmp
& 0xff) {
1071 #if DEVELOPMENT || DEBUG
1073 // wfi overhead simulation
1074 // 31..16 - wfi delay is us
1078 wfi_flags
= (wfi_tmp
>> 8) & 0xFF;
1079 nanoseconds_to_absolutetime(((wfi_tmp
>> 16) & 0xFFFF) * NSEC_PER_MSEC
, &wfi_delay
);
1081 #endif /* DEVELOPMENT || DEBUG */
1089 ResetHandlerData
.assist_reset_handler
= 0;
1090 ResetHandlerData
.cpu_data_entries
= ml_static_vtop((vm_offset_t
)CpuDataEntries
);
1093 monitor_call(MONITOR_SET_ENTRY
, (uintptr_t)ml_static_vtop((vm_offset_t
)&LowResetVectorBase
), 0, 0);
1094 #elif !defined(NO_MONITOR)
1095 #error MONITOR undefined, WFI power gating may not operate correctly
1096 #endif /* MONITOR */
1098 // Determine if we are on production or debug chip
1099 if (kSuccess
== SecureDTLookupEntry(NULL
, "/chosen", &entry
)) {
1103 if (kSuccess
== SecureDTGetProperty(entry
, "effective-production-status-ap", &prop
, &size
)) {
1105 bcopy(prop
, &production
, size
);
1110 #if defined(APPLE_ARM64_ARCH_FAMILY)
1111 // Enable coresight debug registers on debug-fused chips
1112 coresight_debug_enabled
= TRUE
;
1116 start_cpu_paddr
= ml_static_vtop((vm_offset_t
)&start_cpu
);
1117 resume_idle_cpu_paddr
= ml_static_vtop((vm_offset_t
)&resume_idle_cpu
);
1120 #if WITH_CLASSIC_S2R
1121 if (cpu_data_ptr
== &BootCpuData
) {
1122 static addr64_t SleepToken_low_paddr
= (addr64_t
)NULL
;
1123 if (sleepTokenBuffer
!= (vm_offset_t
) NULL
) {
1124 SleepToken_low_paddr
= ml_vtophys(sleepTokenBuffer
);
1126 panic("No sleep token buffer");
1129 bcopy_phys((addr64_t
)ml_static_vtop((vm_offset_t
)running_signature
),
1130 SleepToken_low_paddr
, sizeof(SleepToken
));
1131 flush_dcache((vm_offset_t
)SleepToken
, sizeof(SleepToken
), TRUE
);
1136 cpu_data_ptr
->cpu_reset_handler
= resume_idle_cpu_paddr
;
1137 clean_dcache((vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
), FALSE
);
1140 _Atomic
uint32_t cpu_idle_count
= 0;
1143 machine_track_platform_idle(boolean_t entry
)
1146 os_atomic_inc(&cpu_idle_count
, relaxed
);
1148 os_atomic_dec(&cpu_idle_count
, relaxed
);
1152 #if WITH_CLASSIC_S2R
1154 sleep_token_buffer_init(void)
1156 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
1159 void const * const *prop
;
1161 if ((cpu_data_ptr
== &BootCpuData
) && (sleepTokenBuffer
== (vm_offset_t
) NULL
)) {
1162 /* Find the stpage node in the device tree */
1163 if (kSuccess
!= SecureDTLookupEntry(0, "stram", &entry
)) {
1167 if (kSuccess
!= SecureDTGetProperty(entry
, "reg", (const void **)&prop
, (unsigned int *)&size
)) {
1171 /* Map the page into the kernel space */
1172 sleepTokenBuffer
= ml_io_map(((vm_offset_t
const *)prop
)[0], ((vm_size_t
const *)prop
)[1]);