2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * cpu specific routines
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <kern/timer_queue.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpuid.h>
42 #include <arm/caches_internal.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/cpu_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/machine_cpu.h>
47 #include <arm/rtclock.h>
48 #include <arm64/proc_reg.h>
49 #include <mach/processor_info.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <pexpert/arm/protos.h>
54 #include <pexpert/device_tree.h>
55 #include <sys/kdebug.h>
56 #include <arm/machine_routines.h>
58 #include <machine/atomic.h>
60 #include <san/kasan.h>
67 #include <kern/monotonic.h>
68 #endif /* MONOTONIC */
70 extern boolean_t idle_enable
;
71 extern uint64_t wake_abstime
;
74 void sleep_token_buffer_init(void);
78 extern uintptr_t resume_idle_cpu
;
79 extern uintptr_t start_cpu
;
81 #if __ARM_KERNEL_PROTECT__
82 extern void exc_vectors_table
;
83 #endif /* __ARM_KERNEL_PROTECT__ */
85 extern void __attribute__((noreturn
)) arm64_prepare_for_sleep(void);
86 extern void arm64_force_wfi_clock_gate(void);
87 #if (defined(APPLECYCLONE) || defined(APPLETYPHOON))
88 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
89 extern void cyclone_typhoon_prepare_for_wfi(void);
90 extern void cyclone_typhoon_return_from_wfi(void);
94 vm_address_t start_cpu_paddr
;
96 sysreg_restore_t sysreg_restore
__attribute__((section("__DATA, __const"))) = {
97 .tcr_el1
= TCR_EL1_BOOT
,
104 // 2 : overhead simulation (delay & flags)
107 #if DEVELOPMENT || DEBUG
110 // 1 << 0 : flush L1s
111 // 1 << 1 : flush TLBs
112 static int wfi_flags
= 0;
114 // wfi_delay - delay ticks after wfi exit
115 static uint64_t wfi_delay
= 0;
117 #endif /* DEVELOPMENT || DEBUG */
119 #if __ARM_GLOBAL_SLEEP_BIT__
120 volatile boolean_t arm64_stall_sleep
= TRUE
;
125 * These must be aligned to avoid issues with calling bcopy_phys on them before
126 * we are done with pmap initialization.
128 static const uint8_t __attribute__ ((aligned(8))) suspend_signature
[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
129 static const uint8_t __attribute__ ((aligned(8))) running_signature
[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
133 static vm_offset_t sleepTokenBuffer
= (vm_offset_t
)NULL
;
135 static boolean_t coresight_debug_enabled
= FALSE
;
137 #if defined(CONFIG_XNUPOST)
138 void arm64_ipi_test_callback(void *);
141 arm64_ipi_test_callback(void *parm
)
143 volatile uint64_t *ipi_test_data
= parm
;
144 cpu_data_t
*cpu_data
;
146 cpu_data
= getCpuDatap();
148 *ipi_test_data
= cpu_data
->cpu_number
;
151 uint64_t arm64_ipi_test_data
[MAX_CPUS
];
156 volatile uint64_t *ipi_test_data
;
157 uint32_t timeout_ms
= 100;
158 uint64_t then
, now
, delta
;
159 int current_cpu_number
= getCpuDatap()->cpu_number
;
162 * probably the only way to have this on most systems is with the
163 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
164 * IPI is not available
166 if (real_ncpus
== 1) {
170 for (unsigned int i
= 0; i
< MAX_CPUS
; ++i
) {
171 ipi_test_data
= &arm64_ipi_test_data
[i
];
173 kern_return_t error
= cpu_xcall((int)i
, (void *)arm64_ipi_test_callback
, (void *)(uintptr_t)ipi_test_data
);
174 if (error
!= KERN_SUCCESS
) {
175 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number
, i
, error
);
178 then
= mach_absolute_time();
180 while (*ipi_test_data
!= i
) {
181 now
= mach_absolute_time();
182 absolutetime_to_nanoseconds(now
- then
, &delta
);
183 if ((delta
/ NSEC_PER_MSEC
) > timeout_ms
) {
184 panic("CPU %d tried to IPI CPU %d but didn't get correct response within %dms, respose: %llx", current_cpu_number
, i
, timeout_ms
, *ipi_test_data
);
189 #endif /* defined(CONFIG_XNUPOST) */
192 configure_coresight_registers(cpu_data_t
*cdp
)
200 * ARMv8 coresight registers are optional. If the device tree did not
201 * provide cpu_regmap_paddr, assume that coresight registers are not
204 if (cdp
->cpu_regmap_paddr
) {
205 for (i
= 0; i
< CORESIGHT_REGIONS
; ++i
) {
206 /* Skip CTI; these registers are debug-only (they are
207 * not present on production hardware), and there is
208 * at least one known Cyclone errata involving CTI
209 * (rdar://12802966). We have no known clients that
210 * need the kernel to unlock CTI, so it is safer
211 * to avoid doing the access.
213 if (i
== CORESIGHT_CTI
) {
216 /* Skip debug-only registers on production chips */
217 if (((i
== CORESIGHT_ED
) || (i
== CORESIGHT_UTT
)) && !coresight_debug_enabled
) {
221 if (!cdp
->coresight_base
[i
]) {
222 addr
= cdp
->cpu_regmap_paddr
+ CORESIGHT_OFFSET(i
);
223 cdp
->coresight_base
[i
] = (vm_offset_t
)ml_io_map(addr
, CORESIGHT_SIZE
);
226 * At this point, failing to io map the
227 * registers is considered as an error.
229 if (!cdp
->coresight_base
[i
]) {
230 panic("unable to ml_io_map coresight regions");
233 /* Unlock EDLAR, CTILAR, PMLAR */
234 if (i
!= CORESIGHT_UTT
) {
235 *(volatile uint32_t *)(cdp
->coresight_base
[i
] + ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
243 * Routine: cpu_bootstrap
258 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
260 pmap_switch_user_ttb(kernel_pmap
);
261 cpu_data_ptr
->cpu_active_thread
= current_thread();
262 cpu_data_ptr
->cpu_reset_handler
= (uintptr_t) start_cpu_paddr
;
263 cpu_data_ptr
->cpu_flags
|= SleepState
;
264 cpu_data_ptr
->cpu_user_debug
= NULL
;
269 mt_cpu_down(cpu_data_ptr
);
270 #endif /* MONOTONIC */
274 PE_cpu_machine_quiesce(cpu_data_ptr
->cpu_id
);
281 void __attribute__((noreturn
))
284 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
285 uint64_t new_idle_timeout_ticks
= 0x0ULL
, lastPop
;
287 if ((!idle_enable
) || (cpu_data_ptr
->cpu_signal
& SIGPdisabled
)) {
293 lastPop
= cpu_data_ptr
->rtcPop
;
295 pmap_switch_user_ttb(kernel_pmap
);
296 cpu_data_ptr
->cpu_active_thread
= current_thread();
297 if (cpu_data_ptr
->cpu_user_debug
) {
300 cpu_data_ptr
->cpu_user_debug
= NULL
;
302 if (cpu_data_ptr
->cpu_idle_notify
) {
303 ((processor_idle_t
) cpu_data_ptr
->cpu_idle_notify
)(cpu_data_ptr
->cpu_id
, TRUE
, &new_idle_timeout_ticks
);
306 if (cpu_data_ptr
->idle_timer_notify
!= 0) {
307 if (new_idle_timeout_ticks
== 0x0ULL
) {
308 /* turn off the idle timer */
309 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
311 /* set the new idle timeout */
312 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
314 timer_resync_deadlines();
315 if (cpu_data_ptr
->rtcPop
!= lastPop
) {
324 mt_cpu_idle(cpu_data_ptr
);
325 #endif /* MONOTONIC */
328 platform_cache_idle_enter();
330 #if DEVELOPMENT || DEBUG
331 // When simulating wfi overhead,
332 // force wfi to clock gating only
334 arm64_force_wfi_clock_gate();
336 #endif /* DEVELOPMENT || DEBUG */
338 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
339 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
340 cyclone_typhoon_prepare_for_wfi();
342 __builtin_arm_dsb(DSB_SY
);
345 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
346 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
347 cyclone_typhoon_return_from_wfi();
350 #if DEVELOPMENT || DEBUG
351 // Handle wfi overhead simulation
355 // Calculate wfi delay deadline
356 clock_absolutetime_interval_to_deadline(wfi_delay
, &deadline
);
359 if ((wfi_flags
& 1) != 0) {
360 InvalidatePoU_Icache();
365 if ((wfi_flags
& 2) != 0) {
369 // Wait for the ballance of the wfi delay
370 clock_delay_until(deadline
);
372 #endif /* DEVELOPMENT || DEBUG */
374 platform_cache_idle_exit();
379 cpu_idle_exit(FALSE
);
383 * Routine: cpu_idle_exit
387 cpu_idle_exit(boolean_t from_reset
)
389 uint64_t new_idle_timeout_ticks
= 0x0ULL
;
390 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
392 assert(exception_stack_pointer() != 0);
394 /* Back from WFI, unlock OSLAR and EDLAR. */
396 configure_coresight_registers(cpu_data_ptr
);
404 mt_cpu_run(cpu_data_ptr
);
405 #endif /* MONOTONIC */
407 pmap_switch_user_ttb(cpu_data_ptr
->cpu_active_thread
->map
->pmap
);
409 if (cpu_data_ptr
->cpu_idle_notify
) {
410 ((processor_idle_t
) cpu_data_ptr
->cpu_idle_notify
)(cpu_data_ptr
->cpu_id
, FALSE
, &new_idle_timeout_ticks
);
413 if (cpu_data_ptr
->idle_timer_notify
!= 0) {
414 if (new_idle_timeout_ticks
== 0x0ULL
) {
415 /* turn off the idle timer */
416 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
418 /* set the new idle timeout */
419 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
421 timer_resync_deadlines();
430 cpu_data_t
*cdp
= getCpuDatap();
431 arm_cpu_info_t
*cpu_info_p
;
433 assert(exception_stack_pointer() != 0);
435 if (cdp
->cpu_type
!= CPU_TYPE_ARM64
) {
436 cdp
->cpu_type
= CPU_TYPE_ARM64
;
438 timer_call_queue_init(&cdp
->rtclock_timer
.queue
);
439 cdp
->rtclock_timer
.deadline
= EndOfAllTime
;
441 if (cdp
== &BootCpuData
) {
447 * We initialize non-boot CPUs here; the boot CPU is
448 * dealt with as part of pmap_bootstrap.
450 pmap_cpu_data_init();
452 /* ARM_SMP: Assuming identical cpu */
455 cpu_info_p
= cpuid_info();
457 /* switch based on CPU's reported architecture */
458 switch (cpu_info_p
->arm_info
.arm_arch
) {
460 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM64_V8
;
463 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
464 /* this panic doesn't work this early in startup */
465 panic("Unknown CPU subtype...");
469 cdp
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
471 cdp
->cpu_stat
.irq_ex_cnt_wake
= 0;
472 cdp
->cpu_stat
.ipi_cnt_wake
= 0;
473 cdp
->cpu_stat
.timer_cnt_wake
= 0;
474 cdp
->cpu_stat
.pmi_cnt_wake
= 0;
475 cdp
->cpu_running
= TRUE
;
476 cdp
->cpu_sleep_token_last
= cdp
->cpu_sleep_token
;
477 cdp
->cpu_sleep_token
= 0x0UL
;
483 #endif /* MONOTONIC */
487 cpu_stack_alloc(cpu_data_t
*cpu_data_ptr
)
489 vm_offset_t irq_stack
= 0;
490 vm_offset_t exc_stack
= 0;
492 kern_return_t kr
= kernel_memory_allocate(kernel_map
, &irq_stack
,
493 INTSTACK_SIZE
+ (2 * PAGE_SIZE
),
495 KMA_GUARD_FIRST
| KMA_GUARD_LAST
| KMA_KSTACK
| KMA_KOBJECT
,
496 VM_KERN_MEMORY_STACK
);
497 if (kr
!= KERN_SUCCESS
) {
498 panic("Unable to allocate cpu interrupt stack\n");
501 cpu_data_ptr
->intstack_top
= irq_stack
+ PAGE_SIZE
+ INTSTACK_SIZE
;
502 cpu_data_ptr
->istackptr
= cpu_data_ptr
->intstack_top
;
504 kr
= kernel_memory_allocate(kernel_map
, &exc_stack
,
505 EXCEPSTACK_SIZE
+ (2 * PAGE_SIZE
),
507 KMA_GUARD_FIRST
| KMA_GUARD_LAST
| KMA_KSTACK
| KMA_KOBJECT
,
508 VM_KERN_MEMORY_STACK
);
509 if (kr
!= KERN_SUCCESS
) {
510 panic("Unable to allocate cpu exception stack\n");
513 cpu_data_ptr
->excepstack_top
= exc_stack
+ PAGE_SIZE
+ EXCEPSTACK_SIZE
;
514 cpu_data_ptr
->excepstackptr
= cpu_data_ptr
->excepstack_top
;
518 cpu_data_free(cpu_data_t
*cpu_data_ptr
)
520 if (cpu_data_ptr
== &BootCpuData
) {
524 cpu_processor_free( cpu_data_ptr
->cpu_processor
);
525 (kfree
)((void *)(cpu_data_ptr
->intstack_top
- INTSTACK_SIZE
), INTSTACK_SIZE
);
526 (kfree
)((void *)(cpu_data_ptr
->excepstack_top
- EXCEPSTACK_SIZE
), EXCEPSTACK_SIZE
);
527 kmem_free(kernel_map
, (vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
));
531 cpu_data_init(cpu_data_t
*cpu_data_ptr
)
535 cpu_data_ptr
->cpu_flags
= 0;
536 cpu_data_ptr
->interrupts_enabled
= 0;
537 cpu_data_ptr
->cpu_int_state
= 0;
538 cpu_data_ptr
->cpu_pending_ast
= AST_NONE
;
539 cpu_data_ptr
->cpu_cache_dispatch
= (void *) 0;
540 cpu_data_ptr
->rtcPop
= EndOfAllTime
;
541 cpu_data_ptr
->rtclock_datap
= &RTClockData
;
542 cpu_data_ptr
->cpu_user_debug
= NULL
;
545 cpu_data_ptr
->cpu_base_timebase
= 0;
546 cpu_data_ptr
->cpu_idle_notify
= (void *) 0;
547 cpu_data_ptr
->cpu_idle_latency
= 0x0ULL
;
548 cpu_data_ptr
->cpu_idle_pop
= 0x0ULL
;
549 cpu_data_ptr
->cpu_reset_type
= 0x0UL
;
550 cpu_data_ptr
->cpu_reset_handler
= 0x0UL
;
551 cpu_data_ptr
->cpu_reset_assist
= 0x0UL
;
552 cpu_data_ptr
->cpu_regmap_paddr
= 0x0ULL
;
553 cpu_data_ptr
->cpu_phys_id
= 0x0UL
;
554 cpu_data_ptr
->cpu_l2_access_penalty
= 0;
555 cpu_data_ptr
->cpu_cluster_type
= CLUSTER_TYPE_SMP
;
556 cpu_data_ptr
->cpu_cluster_id
= 0;
557 cpu_data_ptr
->cpu_l2_id
= 0;
558 cpu_data_ptr
->cpu_l2_size
= 0;
559 cpu_data_ptr
->cpu_l3_id
= 0;
560 cpu_data_ptr
->cpu_l3_size
= 0;
562 cpu_data_ptr
->cpu_signal
= SIGPdisabled
;
564 #if DEBUG || DEVELOPMENT
565 cpu_data_ptr
->failed_xcall
= NULL
;
566 cpu_data_ptr
->failed_signal
= 0;
567 cpu_data_ptr
->failed_signal_count
= 0;
570 cpu_data_ptr
->cpu_get_fiq_handler
= NULL
;
571 cpu_data_ptr
->cpu_tbd_hardware_addr
= NULL
;
572 cpu_data_ptr
->cpu_tbd_hardware_val
= NULL
;
573 cpu_data_ptr
->cpu_get_decrementer_func
= NULL
;
574 cpu_data_ptr
->cpu_set_decrementer_func
= NULL
;
575 cpu_data_ptr
->cpu_sleep_token
= ARM_CPU_ON_SLEEP_PATH
;
576 cpu_data_ptr
->cpu_sleep_token_last
= 0x00000000UL
;
577 cpu_data_ptr
->cpu_xcall_p0
= NULL
;
578 cpu_data_ptr
->cpu_xcall_p1
= NULL
;
580 for (i
= 0; i
< CORESIGHT_REGIONS
; ++i
) {
581 cpu_data_ptr
->coresight_base
[i
] = 0;
584 pmap_cpu_data_t
* pmap_cpu_data_ptr
= &cpu_data_ptr
->cpu_pmap_cpu_data
;
586 pmap_cpu_data_ptr
->cpu_nested_pmap
= (struct pmap
*) NULL
;
587 pmap_cpu_data_ptr
->cpu_number
= PMAP_INVALID_CPU_NUM
;
589 for (i
= 0; i
< (sizeof(pmap_cpu_data_ptr
->cpu_asid_high_bits
) / sizeof(*pmap_cpu_data_ptr
->cpu_asid_high_bits
)); i
++) {
590 pmap_cpu_data_ptr
->cpu_asid_high_bits
[i
] = 0;
592 cpu_data_ptr
->halt_status
= CPU_NOT_HALTED
;
593 #if __ARM_KERNEL_PROTECT__
594 cpu_data_ptr
->cpu_exc_vectors
= (vm_offset_t
)&exc_vectors_table
;
595 #endif /* __ARM_KERNEL_PROTECT__ */
600 cpu_data_register(cpu_data_t
*cpu_data_ptr
)
602 int cpu
= cpu_data_ptr
->cpu_number
;
605 for (int i
= 0; i
< CPUWINDOWS_MAX
; i
++) {
606 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu
, i
), PAGE_SIZE
);
610 CpuDataEntries
[cpu
].cpu_data_vaddr
= cpu_data_ptr
;
611 CpuDataEntries
[cpu
].cpu_data_paddr
= (void *)ml_vtophys((vm_offset_t
)cpu_data_ptr
);
619 cpu_data_t
*cpu_data_ptr
= CpuDataEntries
[cpu
].cpu_data_vaddr
;
621 kprintf("cpu_start() cpu: %d\n", cpu
);
623 if (cpu
== cpu_number()) {
625 configure_coresight_registers(cpu_data_ptr
);
627 thread_t first_thread
;
629 cpu_data_ptr
->cpu_reset_handler
= (vm_offset_t
) start_cpu_paddr
;
631 cpu_data_ptr
->cpu_pmap_cpu_data
.cpu_nested_pmap
= NULL
;
633 if (cpu_data_ptr
->cpu_processor
->next_thread
!= THREAD_NULL
) {
634 first_thread
= cpu_data_ptr
->cpu_processor
->next_thread
;
636 first_thread
= cpu_data_ptr
->cpu_processor
->idle_thread
;
638 cpu_data_ptr
->cpu_active_thread
= first_thread
;
639 first_thread
->machine
.CpuDatap
= cpu_data_ptr
;
641 configure_coresight_registers(cpu_data_ptr
);
643 flush_dcache((vm_offset_t
)&CpuDataEntries
[cpu
], sizeof(cpu_data_entry_t
), FALSE
);
644 flush_dcache((vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
), FALSE
);
645 (void) PE_cpu_start(cpu_data_ptr
->cpu_id
, (vm_offset_t
)NULL
, (vm_offset_t
)NULL
);
653 cpu_timebase_init(boolean_t from_boot
)
655 cpu_data_t
*cdp
= getCpuDatap();
657 if (cdp
->cpu_get_fiq_handler
== NULL
) {
658 cdp
->cpu_get_fiq_handler
= rtclock_timebase_func
.tbd_fiq_handler
;
659 cdp
->cpu_get_decrementer_func
= rtclock_timebase_func
.tbd_get_decrementer
;
660 cdp
->cpu_set_decrementer_func
= rtclock_timebase_func
.tbd_set_decrementer
;
661 cdp
->cpu_tbd_hardware_addr
= (void *)rtclock_timebase_addr
;
662 cdp
->cpu_tbd_hardware_val
= (void *)rtclock_timebase_val
;
665 if (!from_boot
&& (cdp
== &BootCpuData
)) {
667 * When we wake from sleep, we have no guarantee about the state
668 * of the hardware timebase. It may have kept ticking across sleep, or
671 * To deal with this, we calculate an offset to the clock that will
672 * produce a timebase value wake_abstime at the point the boot
673 * CPU calls cpu_timebase_init on wake.
675 * This ensures that mach_absolute_time() stops ticking across sleep.
677 rtclock_base_abstime
= wake_abstime
- ml_get_hwclock();
680 cdp
->cpu_decrementer
= 0x7FFFFFFFUL
;
681 cdp
->cpu_timebase
= 0x0UL
;
682 cdp
->cpu_base_timebase
= rtclock_base_abstime
;
688 return getCpuDatap()->cpu_cluster_id
;
691 __attribute__((noreturn
))
695 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
697 if (cpu_data_ptr
== &BootCpuData
) {
698 cpu_data_t
*target_cdp
;
702 max_cpu
= ml_get_max_cpu_number();
703 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
704 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
706 if ((target_cdp
== NULL
) || (target_cdp
== cpu_data_ptr
)) {
710 while (target_cdp
->cpu_sleep_token
!= ARM_CPU_ON_SLEEP_PATH
) {
716 * Now that the other cores have entered the sleep path, set
717 * the abstime value we'll use when we resume.
719 wake_abstime
= ml_get_timebase();
724 cpu_data_ptr
->cpu_sleep_token
= ARM_CPU_ON_SLEEP_PATH
;
726 if (cpu_data_ptr
== &BootCpuData
) {
728 // Classic suspend to RAM writes the suspend signature into the
729 // sleep token buffer so that iBoot knows that it's on the warm
730 // boot (wake) path (as opposed to the cold boot path). Newer SoC
731 // do not go through SecureROM/iBoot on the warm boot path. The
732 // reconfig engine script brings the CPU out of reset at the kernel's
733 // reset vector which points to the warm boot initialization code.
734 if (sleepTokenBuffer
!= (vm_offset_t
) NULL
) {
735 platform_cache_shutdown();
736 bcopy((const void *)suspend_signature
, (void *)sleepTokenBuffer
, sizeof(SleepToken
));
738 panic("No sleep token buffer");
742 #if __ARM_GLOBAL_SLEEP_BIT__
743 /* Allow other CPUs to go to sleep. */
744 arm64_stall_sleep
= FALSE
;
745 __builtin_arm_dmb(DMB_ISH
);
748 /* Architectural debug state: <rdar://problem/12390433>:
749 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
750 * tell debugger to not prevent power gating .
752 if (cpu_data_ptr
->coresight_base
[CORESIGHT_ED
]) {
753 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
754 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGPRCR
) = 0;
759 #endif /* MONOTONIC */
760 /* ARM64-specific preparation */
761 arm64_prepare_for_sleep();
763 #if __ARM_GLOBAL_SLEEP_BIT__
765 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
766 * global register to manage entering deep sleep, as opposed to a per-CPU
767 * register. We cannot update this register until all CPUs are ready to enter
768 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
769 * (by idling), it will hang (due to the side effects of enabling deep sleep),
770 * which can hang the sleep process or cause memory corruption on wake.
772 * To avoid these issues, we'll stall on this global value, which CPU0 will
775 while (arm64_stall_sleep
) {
779 CleanPoU_DcacheRegion((vm_offset_t
) cpu_data_ptr
, sizeof(cpu_data_t
));
781 /* Architectural debug state: <rdar://problem/12390433>:
782 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
783 * tell debugger to not prevent power gating .
785 if (cpu_data_ptr
->coresight_base
[CORESIGHT_ED
]) {
786 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
787 *(volatile uint32_t *)(cpu_data_ptr
->coresight_base
[CORESIGHT_ED
] + ARM_DEBUG_OFFSET_DBGPRCR
) = 0;
790 /* ARM64-specific preparation */
791 arm64_prepare_for_sleep();
796 cpu_machine_idle_init(boolean_t from_boot
)
798 static vm_address_t resume_idle_cpu_paddr
= (vm_address_t
)NULL
;
799 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
802 unsigned long jtag
= 0;
804 uint32_t production
= 1;
807 if (PE_parse_boot_argn("jtag", &jtag
, sizeof(jtag
))) {
817 PE_parse_boot_argn("wfi", &wfi_tmp
, sizeof(wfi_tmp
));
819 // bits 7..0 give the wfi type
820 switch (wfi_tmp
& 0xff) {
826 #if DEVELOPMENT || DEBUG
828 // wfi overhead simulation
829 // 31..16 - wfi delay is us
833 wfi_flags
= (wfi_tmp
>> 8) & 0xFF;
834 nanoseconds_to_absolutetime(((wfi_tmp
>> 16) & 0xFFFF) * NSEC_PER_MSEC
, &wfi_delay
);
836 #endif /* DEVELOPMENT || DEBUG */
844 ResetHandlerData
.assist_reset_handler
= 0;
845 ResetHandlerData
.cpu_data_entries
= ml_static_vtop((vm_offset_t
)CpuDataEntries
);
848 monitor_call(MONITOR_SET_ENTRY
, (uintptr_t)ml_static_vtop((vm_offset_t
)&LowResetVectorBase
), 0, 0);
849 #elif !defined(NO_MONITOR)
850 #error MONITOR undefined, WFI power gating may not operate correctly
853 // Determine if we are on production or debug chip
854 if (kSuccess
== DTLookupEntry(NULL
, "/chosen", &entry
)) {
858 if (kSuccess
== DTGetProperty(entry
, "effective-production-status-ap", &prop
, &size
)) {
860 bcopy(prop
, &production
, size
);
865 #if defined(APPLE_ARM64_ARCH_FAMILY)
866 // Enable coresight debug registers on debug-fused chips
867 coresight_debug_enabled
= TRUE
;
871 start_cpu_paddr
= ml_static_vtop((vm_offset_t
)&start_cpu
);
872 resume_idle_cpu_paddr
= ml_static_vtop((vm_offset_t
)&resume_idle_cpu
);
876 if (cpu_data_ptr
== &BootCpuData
) {
877 static addr64_t SleepToken_low_paddr
= (addr64_t
)NULL
;
878 if (sleepTokenBuffer
!= (vm_offset_t
) NULL
) {
879 SleepToken_low_paddr
= ml_vtophys(sleepTokenBuffer
);
881 panic("No sleep token buffer");
884 bcopy_phys((addr64_t
)ml_static_vtop((vm_offset_t
)running_signature
),
885 SleepToken_low_paddr
, sizeof(SleepToken
));
886 flush_dcache((vm_offset_t
)SleepToken
, sizeof(SleepToken
), TRUE
);
891 cpu_data_ptr
->cpu_reset_handler
= resume_idle_cpu_paddr
;
892 clean_dcache((vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
), FALSE
);
895 _Atomic
uint32_t cpu_idle_count
= 0;
898 machine_track_platform_idle(boolean_t entry
)
901 (void)__c11_atomic_fetch_add(&cpu_idle_count
, 1, __ATOMIC_RELAXED
);
903 (void)__c11_atomic_fetch_sub(&cpu_idle_count
, 1, __ATOMIC_RELAXED
);
909 sleep_token_buffer_init(void)
911 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
916 if ((cpu_data_ptr
== &BootCpuData
) && (sleepTokenBuffer
== (vm_offset_t
) NULL
)) {
917 /* Find the stpage node in the device tree */
918 if (kSuccess
!= DTLookupEntry(0, "stram", &entry
)) {
922 if (kSuccess
!= DTGetProperty(entry
, "reg", (void **)&prop
, (unsigned int *)&size
)) {
926 /* Map the page into the kernel space */
927 sleepTokenBuffer
= ml_io_map(((vm_offset_t
*)prop
)[0], ((vm_size_t
*)prop
)[1]);