2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * cpu specific routines
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <kern/timer_queue.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpuid.h>
42 #include <arm/caches_internal.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/cpu_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/machine_cpu.h>
47 #include <arm/rtclock.h>
48 #include <arm64/proc_reg.h>
49 #include <mach/processor_info.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <pexpert/arm/protos.h>
54 #include <pexpert/device_tree.h>
55 #include <sys/kdebug.h>
56 #include <arm/machine_routines.h>
58 #include <machine/atomic.h>
60 #include <san/kasan.h>
67 #include <kern/monotonic.h>
68 #endif /* MONOTONIC */
70 extern boolean_t idle_enable
;
71 extern uint64_t wake_abstime
;
74 void sleep_token_buffer_init(void);
78 extern uintptr_t resume_idle_cpu
;
79 extern uintptr_t start_cpu
;
81 #if __ARM_KERNEL_PROTECT__
82 extern void exc_vectors_table
;
83 #endif /* __ARM_KERNEL_PROTECT__ */
85 extern void __attribute__((noreturn
)) arm64_prepare_for_sleep(void);
86 extern void arm64_force_wfi_clock_gate(void);
87 #if defined(APPLETYPHOON)
88 // <rdar://problem/15827409>
89 extern void typhoon_prepare_for_wfi(void);
90 extern void typhoon_return_from_wfi(void);
94 vm_address_t start_cpu_paddr
;
96 sysreg_restore_t sysreg_restore
__attribute__((section("__DATA, __const"))) = {
97 .tcr_el1
= TCR_EL1_BOOT
,
104 // 2 : overhead simulation (delay & flags)
107 #if DEVELOPMENT || DEBUG
110 // 1 << 0 : flush L1s
111 // 1 << 1 : flush TLBs
112 static int wfi_flags
= 0;
114 // wfi_delay - delay ticks after wfi exit
115 static uint64_t wfi_delay
= 0;
117 #endif /* DEVELOPMENT || DEBUG */
119 static bool idle_wfe_to_deadline
= false;
121 #if __ARM_GLOBAL_SLEEP_BIT__
122 volatile boolean_t arm64_stall_sleep
= TRUE
;
127 * These must be aligned to avoid issues with calling bcopy_phys on them before
128 * we are done with pmap initialization.
130 static const uint8_t __attribute__ ((aligned(8))) suspend_signature
[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
131 static const uint8_t __attribute__ ((aligned(8))) running_signature
[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
135 static vm_offset_t sleepTokenBuffer
= (vm_offset_t
)NULL
;
137 static boolean_t coresight_debug_enabled
= FALSE
;
139 #if defined(CONFIG_XNUPOST)
140 void arm64_ipi_test_callback(void *);
141 void arm64_immediate_ipi_test_callback(void *);
144 arm64_ipi_test_callback(void *parm
)
146 volatile uint64_t *ipi_test_data
= parm
;
147 cpu_data_t
*cpu_data
;
149 cpu_data
= getCpuDatap();
151 *ipi_test_data
= cpu_data
->cpu_number
;
155 arm64_immediate_ipi_test_callback(void *parm
)
157 volatile uint64_t *ipi_test_data
= parm
;
158 cpu_data_t
*cpu_data
;
160 cpu_data
= getCpuDatap();
162 *ipi_test_data
= cpu_data
->cpu_number
+ MAX_CPUS
;
165 uint64_t arm64_ipi_test_data
[MAX_CPUS
* 2];
170 volatile uint64_t *ipi_test_data
, *immediate_ipi_test_data
;
171 uint32_t timeout_ms
= 100;
172 uint64_t then
, now
, delta
;
173 int current_cpu_number
= getCpuDatap()->cpu_number
;
176 * probably the only way to have this on most systems is with the
177 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
178 * IPI is not available
180 if (real_ncpus
== 1) {
184 for (unsigned int i
= 0; i
< MAX_CPUS
; ++i
) {
185 ipi_test_data
= &arm64_ipi_test_data
[i
];
186 immediate_ipi_test_data
= &arm64_ipi_test_data
[i
+ MAX_CPUS
];
188 kern_return_t error
= cpu_xcall((int)i
, (void *)arm64_ipi_test_callback
, (void *)(uintptr_t)ipi_test_data
);
189 if (error
!= KERN_SUCCESS
) {
190 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number
, i
, error
);
193 while ((error
= cpu_immediate_xcall((int)i
, (void *)arm64_immediate_ipi_test_callback
,
194 (void *)(uintptr_t)immediate_ipi_test_data
)) == KERN_ALREADY_WAITING
) {
195 now
= mach_absolute_time();
196 absolutetime_to_nanoseconds(now
- then
, &delta
);
197 if ((delta
/ NSEC_PER_MSEC
) > timeout_ms
) {
198 panic("CPU %d was unable to immediate-IPI CPU %u within %dms", current_cpu_number
, i
, timeout_ms
);
202 if (error
!= KERN_SUCCESS
) {
203 panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number
, i
, error
);
206 then
= mach_absolute_time();
208 while ((*ipi_test_data
!= i
) || (*immediate_ipi_test_data
!= (i
+ MAX_CPUS
))) {
209 now
= mach_absolute_time();
210 absolutetime_to_nanoseconds(now
- then
, &delta
);
211 if ((delta
/ NSEC_PER_MSEC
) > timeout_ms
) {
212 panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %dms, responses: %llx, %llx",
213 current_cpu_number
, i
, timeout_ms
, *ipi_test_data
, *immediate_ipi_test_data
);
218 #endif /* defined(CONFIG_XNUPOST) */
221 configure_coresight_registers(cpu_data_t
*cdp
)
229 * ARMv8 coresight registers are optional. If the device tree did not
230 * provide cpu_regmap_paddr, assume that coresight registers are not
233 if (cdp
->cpu_regmap_paddr
) {
234 for (i
= 0; i
< CORESIGHT_REGIONS
; ++i
) {
235 /* Skip CTI; these registers are debug-only (they are
236 * not present on production hardware), and there is
237 * at least one known Cyclone errata involving CTI
238 * (rdar://12802966). We have no known clients that
239 * need the kernel to unlock CTI, so it is safer
240 * to avoid doing the access.
242 if (i
== CORESIGHT_CTI
) {
245 /* Skip debug-only registers on production chips */
246 if (((i
== CORESIGHT_ED
) || (i
== CORESIGHT_UTT
)) && !coresight_debug_enabled
) {
250 if (!cdp
->coresight_base
[i
]) {
251 addr
= cdp
->cpu_regmap_paddr
+ CORESIGHT_OFFSET(i
);
252 cdp
->coresight_base
[i
] = (vm_offset_t
)ml_io_map(addr
, CORESIGHT_SIZE
);
255 * At this point, failing to io map the
256 * registers is considered as an error.
258 if (!cdp
->coresight_base
[i
]) {
259 panic("unable to ml_io_map coresight regions");
262 /* Unlock EDLAR, CTILAR, PMLAR */
263 if (i
!= CORESIGHT_UTT
) {
264 *(volatile uint32_t *)(cdp
->coresight_base
[i
] + ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
272 * Routine: cpu_bootstrap
287 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
289 pmap_switch_user_ttb(kernel_pmap
);
290 cpu_data_ptr
->cpu_active_thread
= current_thread();
291 cpu_data_ptr
->cpu_reset_handler
= (uintptr_t) start_cpu_paddr
;
292 cpu_data_ptr
->cpu_flags
|= SleepState
;
293 cpu_data_ptr
->cpu_user_debug
= NULL
;
298 mt_cpu_down(cpu_data_ptr
);
299 #endif /* MONOTONIC */
305 * IOCPURunPlatformQuiesceActions when sleeping the boot cpu
306 * ml_arm_sleep() on all CPUs
308 * It does not return.
310 PE_cpu_machine_quiesce(cpu_data_ptr
->cpu_id
);
315 * Routine: cpu_interrupt_is_pending
316 * Function: Returns the value of ISR. Due to how this register is
317 * is implemented, this returns 0 if there are no
318 * interrupts pending, so it can be used as a boolean test.
321 cpu_interrupt_is_pending(void)
324 isr_value
= __builtin_arm_rsr64("ISR_EL1");
325 return (int)isr_value
;
332 void __attribute__((noreturn
))
335 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
336 uint64_t new_idle_timeout_ticks
= 0x0ULL
, lastPop
;
338 if ((!idle_enable
) || (cpu_data_ptr
->cpu_signal
& SIGPdisabled
)) {
343 /* If a deadline is pending, wait for it to elapse. */
344 if (idle_wfe_to_deadline
) {
345 if (arm64_wfe_allowed()) {
346 while (!cpu_interrupt_is_pending()) {
355 lastPop
= cpu_data_ptr
->rtcPop
;
357 pmap_switch_user_ttb(kernel_pmap
);
358 cpu_data_ptr
->cpu_active_thread
= current_thread();
359 if (cpu_data_ptr
->cpu_user_debug
) {
362 cpu_data_ptr
->cpu_user_debug
= NULL
;
364 if (cpu_data_ptr
->cpu_idle_notify
) {
365 ((processor_idle_t
) cpu_data_ptr
->cpu_idle_notify
)(cpu_data_ptr
->cpu_id
, TRUE
, &new_idle_timeout_ticks
);
368 if (cpu_data_ptr
->idle_timer_notify
!= 0) {
369 if (new_idle_timeout_ticks
== 0x0ULL
) {
370 /* turn off the idle timer */
371 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
373 /* set the new idle timeout */
374 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
376 timer_resync_deadlines();
377 if (cpu_data_ptr
->rtcPop
!= lastPop
) {
386 mt_cpu_idle(cpu_data_ptr
);
387 #endif /* MONOTONIC */
390 platform_cache_idle_enter();
392 #if DEVELOPMENT || DEBUG
393 // When simulating wfi overhead,
394 // force wfi to clock gating only
396 arm64_force_wfi_clock_gate();
398 #endif /* DEVELOPMENT || DEBUG */
400 #if defined(APPLETYPHOON)
401 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
402 typhoon_prepare_for_wfi();
404 __builtin_arm_dsb(DSB_SY
);
407 #if defined(APPLETYPHOON)
408 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
409 typhoon_return_from_wfi();
412 #if DEVELOPMENT || DEBUG
413 // Handle wfi overhead simulation
417 // Calculate wfi delay deadline
418 clock_absolutetime_interval_to_deadline(wfi_delay
, &deadline
);
421 if ((wfi_flags
& 1) != 0) {
422 InvalidatePoU_Icache();
427 if ((wfi_flags
& 2) != 0) {
431 // Wait for the ballance of the wfi delay
432 clock_delay_until(deadline
);
434 #endif /* DEVELOPMENT || DEBUG */
436 platform_cache_idle_exit();
441 cpu_idle_exit(FALSE
);
445 * Routine: cpu_idle_exit
449 cpu_idle_exit(boolean_t from_reset
)
451 uint64_t new_idle_timeout_ticks
= 0x0ULL
;
452 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
454 assert(exception_stack_pointer() != 0);
456 /* Back from WFI, unlock OSLAR and EDLAR. */
458 configure_coresight_registers(cpu_data_ptr
);
466 mt_cpu_run(cpu_data_ptr
);
467 #endif /* MONOTONIC */
469 pmap_switch_user_ttb(cpu_data_ptr
->cpu_active_thread
->map
->pmap
);
471 if (cpu_data_ptr
->cpu_idle_notify
) {
472 ((processor_idle_t
) cpu_data_ptr
->cpu_idle_notify
)(cpu_data_ptr
->cpu_id
, FALSE
, &new_idle_timeout_ticks
);
475 if (cpu_data_ptr
->idle_timer_notify
!= 0) {
476 if (new_idle_timeout_ticks
== 0x0ULL
) {
477 /* turn off the idle timer */
478 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
480 /* set the new idle timeout */
481 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
483 timer_resync_deadlines();
492 cpu_data_t
*cdp
= getCpuDatap();
493 arm_cpu_info_t
*cpu_info_p
;
495 assert(exception_stack_pointer() != 0);
497 if (cdp
->cpu_type
!= CPU_TYPE_ARM64
) {
498 cdp
->cpu_type
= CPU_TYPE_ARM64
;
500 timer_call_queue_init(&cdp
->rtclock_timer
.queue
);
501 cdp
->rtclock_timer
.deadline
= EndOfAllTime
;
503 if (cdp
== &BootCpuData
) {
509 * We initialize non-boot CPUs here; the boot CPU is
510 * dealt with as part of pmap_bootstrap.
512 pmap_cpu_data_init();
514 /* ARM_SMP: Assuming identical cpu */
517 cpu_info_p
= cpuid_info();
519 /* switch based on CPU's reported architecture */
520 switch (cpu_info_p
->arm_info
.arm_arch
) {
522 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM64_V8
;
525 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
526 /* this panic doesn't work this early in startup */
527 panic("Unknown CPU subtype...");
531 cdp
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
533 cdp
->cpu_stat
.irq_ex_cnt_wake
= 0;
534 cdp
->cpu_stat
.ipi_cnt_wake
= 0;
535 cdp
->cpu_stat
.timer_cnt_wake
= 0;
537 cdp
->cpu_stat
.pmi_cnt_wake
= 0;
538 #endif /* MONOTONIC */
539 cdp
->cpu_running
= TRUE
;
540 cdp
->cpu_sleep_token_last
= cdp
->cpu_sleep_token
;
541 cdp
->cpu_sleep_token
= 0x0UL
;
547 #endif /* MONOTONIC */
551 cpu_stack_alloc(cpu_data_t
*cpu_data_ptr
)
553 vm_offset_t irq_stack
= 0;
554 vm_offset_t exc_stack
= 0;
556 kern_return_t kr
= kernel_memory_allocate(kernel_map
, &irq_stack
,
557 INTSTACK_SIZE
+ (2 * PAGE_SIZE
),
559 KMA_GUARD_FIRST
| KMA_GUARD_LAST
| KMA_KSTACK
| KMA_KOBJECT
,
560 VM_KERN_MEMORY_STACK
);
561 if (kr
!= KERN_SUCCESS
) {
562 panic("Unable to allocate cpu interrupt stack\n");
565 cpu_data_ptr
->intstack_top
= irq_stack
+ PAGE_SIZE
+ INTSTACK_SIZE
;
566 cpu_data_ptr
->istackptr
= cpu_data_ptr
->intstack_top
;
568 kr
= kernel_memory_allocate(kernel_map
, &exc_stack
,
569 EXCEPSTACK_SIZE
+ (2 * PAGE_SIZE
),
571 KMA_GUARD_FIRST
| KMA_GUARD_LAST
| KMA_KSTACK
| KMA_KOBJECT
,
572 VM_KERN_MEMORY_STACK
);
573 if (kr
!= KERN_SUCCESS
) {
574 panic("Unable to allocate cpu exception stack\n");
577 cpu_data_ptr
->excepstack_top
= exc_stack
+ PAGE_SIZE
+ EXCEPSTACK_SIZE
;
578 cpu_data_ptr
->excepstackptr
= cpu_data_ptr
->excepstack_top
;
582 cpu_data_free(cpu_data_t
*cpu_data_ptr
)
584 if ((cpu_data_ptr
== NULL
) || (cpu_data_ptr
== &BootCpuData
)) {
588 cpu_processor_free( cpu_data_ptr
->cpu_processor
);
589 if (CpuDataEntries
[cpu_data_ptr
->cpu_number
].cpu_data_vaddr
== cpu_data_ptr
) {
590 CpuDataEntries
[cpu_data_ptr
->cpu_number
].cpu_data_vaddr
= NULL
;
591 CpuDataEntries
[cpu_data_ptr
->cpu_number
].cpu_data_paddr
= 0;
592 __builtin_arm_dmb(DMB_ISH
); // Ensure prior stores to cpu array are visible
594 (kfree
)((void *)(cpu_data_ptr
->intstack_top
- INTSTACK_SIZE
), INTSTACK_SIZE
);
595 (kfree
)((void *)(cpu_data_ptr
->excepstack_top
- EXCEPSTACK_SIZE
), EXCEPSTACK_SIZE
);
596 kmem_free(kernel_map
, (vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
));
600 cpu_data_init(cpu_data_t
*cpu_data_ptr
)
604 cpu_data_ptr
->cpu_flags
= 0;
605 cpu_data_ptr
->interrupts_enabled
= 0;
606 cpu_data_ptr
->cpu_int_state
= 0;
607 cpu_data_ptr
->cpu_pending_ast
= AST_NONE
;
608 cpu_data_ptr
->cpu_cache_dispatch
= (void *) 0;
609 cpu_data_ptr
->rtcPop
= EndOfAllTime
;
610 cpu_data_ptr
->rtclock_datap
= &RTClockData
;
611 cpu_data_ptr
->cpu_user_debug
= NULL
;
614 cpu_data_ptr
->cpu_base_timebase
= 0;
615 cpu_data_ptr
->cpu_idle_notify
= (void *) 0;
616 cpu_data_ptr
->cpu_idle_latency
= 0x0ULL
;
617 cpu_data_ptr
->cpu_idle_pop
= 0x0ULL
;
618 cpu_data_ptr
->cpu_reset_type
= 0x0UL
;
619 cpu_data_ptr
->cpu_reset_handler
= 0x0UL
;
620 cpu_data_ptr
->cpu_reset_assist
= 0x0UL
;
621 cpu_data_ptr
->cpu_regmap_paddr
= 0x0ULL
;
622 cpu_data_ptr
->cpu_phys_id
= 0x0UL
;
623 cpu_data_ptr
->cpu_l2_access_penalty
= 0;
624 cpu_data_ptr
->cpu_cluster_type
= CLUSTER_TYPE_SMP
;
625 cpu_data_ptr
->cpu_cluster_id
= 0;
626 cpu_data_ptr
->cpu_l2_id
= 0;
627 cpu_data_ptr
->cpu_l2_size
= 0;
628 cpu_data_ptr
->cpu_l3_id
= 0;
629 cpu_data_ptr
->cpu_l3_size
= 0;
631 cpu_data_ptr
->cpu_signal
= SIGPdisabled
;
633 cpu_data_ptr
->cpu_get_fiq_handler
= NULL
;
634 cpu_data_ptr
->cpu_tbd_hardware_addr
= NULL
;
635 cpu_data_ptr
->cpu_tbd_hardware_val
= NULL
;
636 cpu_data_ptr
->cpu_get_decrementer_func
= NULL
;
637 cpu_data_ptr
->cpu_set_decrementer_func
= NULL
;
638 cpu_data_ptr
->cpu_sleep_token
= ARM_CPU_ON_SLEEP_PATH
;
639 cpu_data_ptr
->cpu_sleep_token_last
= 0x00000000UL
;
640 cpu_data_ptr
->cpu_xcall_p0
= NULL
;
641 cpu_data_ptr
->cpu_xcall_p1
= NULL
;
642 cpu_data_ptr
->cpu_imm_xcall_p0
= NULL
;
643 cpu_data_ptr
->cpu_imm_xcall_p1
= NULL
;
645 for (i
= 0; i
< CORESIGHT_REGIONS
; ++i
) {
646 cpu_data_ptr
->coresight_base
[i
] = 0;
649 pmap_cpu_data_t
* pmap_cpu_data_ptr
= &cpu_data_ptr
->cpu_pmap_cpu_data
;
651 pmap_cpu_data_ptr
->cpu_nested_pmap
= (struct pmap
*) NULL
;
652 pmap_cpu_data_ptr
->cpu_number
= PMAP_INVALID_CPU_NUM
;
654 for (i
= 0; i
< (sizeof(pmap_cpu_data_ptr
->cpu_asid_high_bits
) / sizeof(*pmap_cpu_data_ptr
->cpu_asid_high_bits
)); i
++) {
655 pmap_cpu_data_ptr
->cpu_asid_high_bits
[i
] = 0;
657 cpu_data_ptr
->halt_status
= CPU_NOT_HALTED
;
658 #if __ARM_KERNEL_PROTECT__
659 cpu_data_ptr
->cpu_exc_vectors
= (vm_offset_t
)&exc_vectors_table
;
660 #endif /* __ARM_KERNEL_PROTECT__ */
662 #if defined(HAS_APPLE_PAC)
663 cpu_data_ptr
->rop_key
= 0;
668 cpu_data_register(cpu_data_t
*cpu_data_ptr
)
670 int cpu
= cpu_data_ptr
->cpu_number
;
673 for (int i
= 0; i
< CPUWINDOWS_MAX
; i
++) {
674 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu
, i
), PAGE_SIZE
);
678 __builtin_arm_dmb(DMB_ISH
); // Ensure prior stores to cpu data are visible
679 CpuDataEntries
[cpu
].cpu_data_vaddr
= cpu_data_ptr
;
680 CpuDataEntries
[cpu
].cpu_data_paddr
= (void *)ml_vtophys((vm_offset_t
)cpu_data_ptr
);
688 cpu_data_t
*cpu_data_ptr
= CpuDataEntries
[cpu
].cpu_data_vaddr
;
690 kprintf("cpu_start() cpu: %d\n", cpu
);
692 if (cpu
== cpu_number()) {
694 configure_coresight_registers(cpu_data_ptr
);
696 thread_t first_thread
;
698 cpu_data_ptr
->cpu_reset_handler
= (vm_offset_t
) start_cpu_paddr
;
700 cpu_data_ptr
->cpu_pmap_cpu_data
.cpu_nested_pmap
= NULL
;
702 if (cpu_data_ptr
->cpu_processor
->startup_thread
!= THREAD_NULL
) {
703 first_thread
= cpu_data_ptr
->cpu_processor
->startup_thread
;
705 first_thread
= cpu_data_ptr
->cpu_processor
->idle_thread
;
707 cpu_data_ptr
->cpu_active_thread
= first_thread
;
708 first_thread
->machine
.CpuDatap
= cpu_data_ptr
;
710 configure_coresight_registers(cpu_data_ptr
);
712 flush_dcache((vm_offset_t
)&CpuDataEntries
[cpu
], sizeof(cpu_data_entry_t
), FALSE
);
713 flush_dcache((vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
), FALSE
);
714 (void) PE_cpu_start(cpu_data_ptr
->cpu_id
, (vm_offset_t
)NULL
, (vm_offset_t
)NULL
);
722 cpu_timebase_init(boolean_t from_boot
)
724 cpu_data_t
*cdp
= getCpuDatap();
726 if (cdp
->cpu_get_fiq_handler
== NULL
) {
727 cdp
->cpu_get_fiq_handler
= rtclock_timebase_func
.tbd_fiq_handler
;
728 cdp
->cpu_get_decrementer_func
= rtclock_timebase_func
.tbd_get_decrementer
;
729 cdp
->cpu_set_decrementer_func
= rtclock_timebase_func
.tbd_set_decrementer
;
730 cdp
->cpu_tbd_hardware_addr
= (void *)rtclock_timebase_addr
;
731 cdp
->cpu_tbd_hardware_val
= (void *)rtclock_timebase_val
;
734 if (!from_boot
&& (cdp
== &BootCpuData
)) {
736 * When we wake from sleep, we have no guarantee about the state
737 * of the hardware timebase. It may have kept ticking across sleep, or
740 * To deal with this, we calculate an offset to the clock that will
741 * produce a timebase value wake_abstime at the point the boot
742 * CPU calls cpu_timebase_init on wake.
744 * This ensures that mach_absolute_time() stops ticking across sleep.
746 rtclock_base_abstime
= wake_abstime
- ml_get_hwclock();
747 } else if (from_boot
) {
748 /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
749 ml_set_reset_time(ml_get_hwclock());
752 cdp
->cpu_decrementer
= 0x7FFFFFFFUL
;
753 cdp
->cpu_timebase
= 0x0UL
;
754 cdp
->cpu_base_timebase
= rtclock_base_abstime
;
760 return getCpuDatap()->cpu_cluster_id
;
763 __attribute__((noreturn
))
767 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
769 if (cpu_data_ptr
== &BootCpuData
) {
770 cpu_data_t
*target_cdp
;
774 max_cpu
= ml_get_max_cpu_number();
775 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
776 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
778 if ((target_cdp
== NULL
) || (target_cdp
== cpu_data_ptr
)) {
782 while (target_cdp
->cpu_sleep_token
!= ARM_CPU_ON_SLEEP_PATH
) {
788 * Now that the other cores have entered the sleep path, set
789 * the abstime value we'll use when we resume.
791 wake_abstime
= ml_get_timebase();
792 ml_set_reset_time(UINT64_MAX
);
797 cpu_data_ptr
->cpu_sleep_token
= ARM_CPU_ON_SLEEP_PATH
;
799 if (cpu_data_ptr
== &BootCpuData
) {
801 // Classic suspend to RAM writes the suspend signature into the
802 // sleep token buffer so that iBoot knows that it's on the warm
803 // boot (wake) path (as opposed to the cold boot path). Newer SoC
804 // do not go through SecureROM/iBoot on the warm boot path. The
805 // reconfig engine script brings the CPU out of reset at the kernel's
806 // reset vector which points to the warm boot initialization code.
807 if (sleepTokenBuffer
!= (vm_offset_t
) NULL
) {
808 platform_cache_shutdown();
809 bcopy((const void *)suspend_signature
, (void *)sleepTokenBuffer
, sizeof(SleepToken
));
811 panic("No sleep token buffer");
815 #if __ARM_GLOBAL_SLEEP_BIT__
816 /* Allow other CPUs to go to sleep. */
817 arm64_stall_sleep
= FALSE
;
818 __builtin_arm_dmb(DMB_ISH
);
821 /* Architectural debug state: <rdar://problem/12390433>:
822 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
823 * tell debugger to not prevent power gating .
825 if (cpu_data_ptr
->coresight_base
[CORESIGHT_ED
]) {
826 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
827 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGPRCR
) = 0;
832 #endif /* MONOTONIC */
833 /* ARM64-specific preparation */
834 arm64_prepare_for_sleep();
836 #if __ARM_GLOBAL_SLEEP_BIT__
838 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
839 * global register to manage entering deep sleep, as opposed to a per-CPU
840 * register. We cannot update this register until all CPUs are ready to enter
841 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
842 * (by idling), it will hang (due to the side effects of enabling deep sleep),
843 * which can hang the sleep process or cause memory corruption on wake.
845 * To avoid these issues, we'll stall on this global value, which CPU0 will
848 while (arm64_stall_sleep
) {
852 CleanPoU_DcacheRegion((vm_offset_t
) cpu_data_ptr
, sizeof(cpu_data_t
));
854 /* Architectural debug state: <rdar://problem/12390433>:
855 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
856 * tell debugger to not prevent power gating .
858 if (cpu_data_ptr
->coresight_base
[CORESIGHT_ED
]) {
859 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
860 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGPRCR
) = 0;
863 /* ARM64-specific preparation */
864 arm64_prepare_for_sleep();
869 cpu_machine_idle_init(boolean_t from_boot
)
871 static vm_address_t resume_idle_cpu_paddr
= (vm_address_t
)NULL
;
872 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
875 unsigned long jtag
= 0;
877 uint32_t production
= 1;
880 if (PE_parse_boot_argn("jtag", &jtag
, sizeof(jtag
))) {
890 PE_parse_boot_argn("wfi", &wfi_tmp
, sizeof(wfi_tmp
));
892 // bits 7..0 give the wfi type
893 switch (wfi_tmp
& 0xff) {
899 #if DEVELOPMENT || DEBUG
901 // wfi overhead simulation
902 // 31..16 - wfi delay is us
906 wfi_flags
= (wfi_tmp
>> 8) & 0xFF;
907 nanoseconds_to_absolutetime(((wfi_tmp
>> 16) & 0xFFFF) * NSEC_PER_MSEC
, &wfi_delay
);
909 #endif /* DEVELOPMENT || DEBUG */
917 PE_parse_boot_argn("idle_wfe_to_deadline", &idle_wfe_to_deadline
, sizeof(idle_wfe_to_deadline
));
919 ResetHandlerData
.assist_reset_handler
= 0;
920 ResetHandlerData
.cpu_data_entries
= ml_static_vtop((vm_offset_t
)CpuDataEntries
);
923 monitor_call(MONITOR_SET_ENTRY
, (uintptr_t)ml_static_vtop((vm_offset_t
)&LowResetVectorBase
), 0, 0);
924 #elif !defined(NO_MONITOR)
925 #error MONITOR undefined, WFI power gating may not operate correctly
928 // Determine if we are on production or debug chip
929 if (kSuccess
== DTLookupEntry(NULL
, "/chosen", &entry
)) {
933 if (kSuccess
== DTGetProperty(entry
, "effective-production-status-ap", &prop
, &size
)) {
935 bcopy(prop
, &production
, size
);
940 #if defined(APPLE_ARM64_ARCH_FAMILY)
941 // Enable coresight debug registers on debug-fused chips
942 coresight_debug_enabled
= TRUE
;
946 start_cpu_paddr
= ml_static_vtop((vm_offset_t
)&start_cpu
);
947 resume_idle_cpu_paddr
= ml_static_vtop((vm_offset_t
)&resume_idle_cpu
);
951 if (cpu_data_ptr
== &BootCpuData
) {
952 static addr64_t SleepToken_low_paddr
= (addr64_t
)NULL
;
953 if (sleepTokenBuffer
!= (vm_offset_t
) NULL
) {
954 SleepToken_low_paddr
= ml_vtophys(sleepTokenBuffer
);
956 panic("No sleep token buffer");
959 bcopy_phys((addr64_t
)ml_static_vtop((vm_offset_t
)running_signature
),
960 SleepToken_low_paddr
, sizeof(SleepToken
));
961 flush_dcache((vm_offset_t
)SleepToken
, sizeof(SleepToken
), TRUE
);
966 cpu_data_ptr
->cpu_reset_handler
= resume_idle_cpu_paddr
;
967 clean_dcache((vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
), FALSE
);
970 _Atomic
uint32_t cpu_idle_count
= 0;
973 machine_track_platform_idle(boolean_t entry
)
976 os_atomic_inc(&cpu_idle_count
, relaxed
);
978 os_atomic_dec(&cpu_idle_count
, relaxed
);
984 sleep_token_buffer_init(void)
986 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
991 if ((cpu_data_ptr
== &BootCpuData
) && (sleepTokenBuffer
== (vm_offset_t
) NULL
)) {
992 /* Find the stpage node in the device tree */
993 if (kSuccess
!= DTLookupEntry(0, "stram", &entry
)) {
997 if (kSuccess
!= DTGetProperty(entry
, "reg", (void **)&prop
, (unsigned int *)&size
)) {
1001 /* Map the page into the kernel space */
1002 sleepTokenBuffer
= ml_io_map(((vm_offset_t
*)prop
)[0], ((vm_size_t
*)prop
)[1]);