2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 #include <mach_ldebug.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <kern/processor.h>
40 #include <kern/startup.h>
41 #include <kern/debug.h>
42 #include <prng/random.h>
43 #include <machine/machine_routines.h>
44 #include <machine/commpage.h>
46 #include <machine/pal_hibernate.h>
47 #endif /* HIBERNATION */
48 /* ARM64_TODO unify boot.h */
50 #include <pexpert/arm64/apple_arm64_common.h>
51 #include <pexpert/arm64/boot.h>
53 #include <pexpert/arm/boot.h>
55 #error Unsupported arch
57 #include <pexpert/arm/consistent_debug.h>
58 #include <pexpert/device_tree.h>
59 #include <arm/proc_reg.h>
61 #include <arm/caches_internal.h>
62 #include <arm/cpu_internal.h>
63 #include <arm/cpu_data_internal.h>
64 #include <arm/cpuid_internal.h>
65 #include <arm/io_map_entries.h>
66 #include <arm/misc_protos.h>
67 #include <arm/machine_cpu.h>
68 #include <arm/rtclock.h>
69 #include <vm/vm_map.h>
71 #include <libkern/kernel_mach_header.h>
72 #include <libkern/stack_protector.h>
73 #include <libkern/section_keywords.h>
74 #include <san/kasan.h>
75 #include <sys/kdebug.h>
77 #include <pexpert/pexpert.h>
79 #include <console/serial_protos.h>
82 #include <kern/telemetry.h>
85 #include <kern/monotonic.h>
86 #endif /* MONOTONIC */
89 #include <IOKit/IOPlatformExpert.h>
90 #endif /* HIBERNATION */
92 extern void patch_low_glo(void);
93 extern int serial_init(void);
94 extern void sleep_token_buffer_init(void);
96 extern vm_offset_t intstack_top
;
98 extern vm_offset_t excepstack_top
;
100 extern vm_offset_t fiqstack_top
;
103 extern const char version
[];
104 extern const char version_variant
[];
105 extern int disableConsoleOutput
;
107 int pc_trace_buf
[PC_TRACE_BUF_SIZE
] = {0};
108 int pc_trace_cnt
= PC_TRACE_BUF_SIZE
;
111 bool need_wa_rdar_55577508
= false;
112 SECURITY_READ_ONLY_LATE(bool) static_kernelcache
= false;
115 /* Enable both branch target retention (0x2) and branch direction retention (0x1) across sleep */
117 extern void set_bp_ret(void);
120 #if INTERRUPT_MASKED_DEBUG
121 boolean_t interrupt_masked_debug
= 1;
122 /* the following are in mach timebase units */
123 uint64_t interrupt_masked_timeout
= 0xd0000;
124 uint64_t stackshot_interrupt_masked_timeout
= 0xf9999;
127 boot_args const_boot_args
__attribute__((section("__DATA, __const")));
128 boot_args
*BootArgs
__attribute__((section("__DATA, __const")));
130 TUNABLE(uint32_t, arm_diag
, "diag", 0);
132 static unsigned cpus_defeatures
= 0x0;
133 extern void cpu_defeatures_set(unsigned int);
136 #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
137 extern volatile boolean_t arm64_stall_sleep
;
140 extern boolean_t force_immediate_debug_halt
;
143 SECURITY_READ_ONLY_LATE(boolean_t
) diversify_user_jop
= TRUE
;
146 SECURITY_READ_ONLY_LATE(uint64_t) gDramBase
;
147 SECURITY_READ_ONLY_LATE(uint64_t) gDramSize
;
152 void arm_init(boot_args
* args
);
155 unsigned int page_shift_user32
; /* for page_size as seen by a 32-bit task */
157 extern void configure_misc_apple_boot_args(void);
158 extern void configure_misc_apple_regs(void);
159 #endif /* __arm64__ */
166 #define dyldLogFunc(msg, ...)
167 #include <mach/dyld_kernel_fixups.h>
169 extern uint32_t __thread_starts_sect_start
[] __asm("section$start$__TEXT$__thread_starts");
170 extern uint32_t __thread_starts_sect_end
[] __asm("section$end$__TEXT$__thread_starts");
171 #if defined(HAS_APPLE_PAC)
172 extern void OSRuntimeSignStructors(kernel_mach_header_t
* header
);
173 extern void OSRuntimeSignStructorsInFileset(kernel_mach_header_t
* header
);
174 #endif /* defined(HAS_APPLE_PAC) */
176 extern vm_offset_t vm_kernel_slide
;
177 extern vm_offset_t segLOWESTKC
, segHIGHESTKC
, segLOWESTROKC
, segHIGHESTROKC
;
178 extern vm_offset_t segLOWESTAuxKC
, segHIGHESTAuxKC
, segLOWESTROAuxKC
, segHIGHESTROAuxKC
;
179 extern vm_offset_t segLOWESTRXAuxKC
, segHIGHESTRXAuxKC
, segHIGHESTNLEAuxKC
;
182 arm_slide_rebase_and_sign_image(void)
184 kernel_mach_header_t
*k_mh
, *kc_mh
= NULL
;
185 kernel_segment_command_t
*seg
;
188 k_mh
= &_mh_execute_header
;
189 if (kernel_mach_header_is_in_fileset(k_mh
)) {
191 * The kernel is part of a MH_FILESET kernel collection, determine slide
192 * based on first segment's mach-o vmaddr (requires first kernel load
193 * command to be LC_SEGMENT_64 of the __TEXT segment)
195 seg
= (kernel_segment_command_t
*)((uintptr_t)k_mh
+ sizeof(*k_mh
));
196 assert(seg
->cmd
== LC_SEGMENT_KERNEL
);
197 slide
= (uintptr_t)k_mh
- seg
->vmaddr
;
200 * The kernel collection linker guarantees that the boot collection mach
201 * header vmaddr is the hardcoded kernel link address (as specified to
202 * ld64 when linking the kernel).
204 kc_mh
= (kernel_mach_header_t
*)(VM_KERNEL_LINK_ADDRESS
+ slide
);
205 assert(kc_mh
->filetype
== MH_FILESET
);
208 * rebase and sign jops
209 * Note that we can't call any functions before this point, so
210 * we have to hard-code the knowledge that the base of the KC
211 * is the KC's mach-o header. This would change if any
212 * segment's VA started *before* the text segment
213 * (as the HIB segment does on x86).
215 const void *collection_base_pointers
[KCNumKinds
] = {[0] = kc_mh
, };
216 kernel_collection_slide((struct mach_header_64
*)kc_mh
, collection_base_pointers
);
218 PE_set_kc_header(KCKindPrimary
, kc_mh
, slide
);
221 * iBoot doesn't slide load command vmaddrs in an MH_FILESET kernel
222 * collection, so adjust them now, and determine the vmaddr range
223 * covered by read-only segments for the CTRR rorgn.
225 kernel_collection_adjust_mh_addrs((struct mach_header_64
*)kc_mh
, slide
, false,
226 (uintptr_t *)&segLOWESTKC
, (uintptr_t *)&segHIGHESTKC
,
227 (uintptr_t *)&segLOWESTROKC
, (uintptr_t *)&segHIGHESTROKC
,
229 #if defined(HAS_APPLE_PAC)
230 OSRuntimeSignStructorsInFileset(kc_mh
);
231 #endif /* defined(HAS_APPLE_PAC) */
234 * Static kernelcache: iBoot slid kernel MachO vmaddrs, determine slide
235 * using hardcoded kernel link address
237 slide
= (uintptr_t)k_mh
- VM_KERNEL_LINK_ADDRESS
;
239 /* rebase and sign jops */
240 static_kernelcache
= &__thread_starts_sect_end
[0] != &__thread_starts_sect_start
[0];
241 if (static_kernelcache
) {
242 rebase_threaded_starts( &__thread_starts_sect_start
[0],
243 &__thread_starts_sect_end
[0],
244 (uintptr_t)k_mh
, (uintptr_t)k_mh
- slide
, slide
);
246 #if defined(HAS_APPLE_PAC)
247 OSRuntimeSignStructors(&_mh_execute_header
);
248 #endif /* defined(HAS_APPLE_PAC) */
253 * Initialize slide global here to avoid duplicating this logic in
256 vm_kernel_slide
= slide
;
260 arm_auxkc_init(void *mh
, void *base
)
263 * The kernel collection linker guarantees that the lowest vmaddr in an
264 * AuxKC collection is 0 (but note that the mach header is higher up since
265 * RW segments precede RO segments in the AuxKC).
267 uintptr_t slide
= (uintptr_t)base
;
268 kernel_mach_header_t
*akc_mh
= (kernel_mach_header_t
*)mh
;
270 assert(akc_mh
->filetype
== MH_FILESET
);
271 PE_set_kc_header_and_base(KCKindAuxiliary
, akc_mh
, base
, slide
);
273 /* rebase and sign jops */
274 const void *collection_base_pointers
[KCNumKinds
];
275 memcpy(collection_base_pointers
, PE_get_kc_base_pointers(), sizeof(collection_base_pointers
));
276 kernel_collection_slide((struct mach_header_64
*)akc_mh
, collection_base_pointers
);
278 kernel_collection_adjust_mh_addrs((struct mach_header_64
*)akc_mh
, slide
, false,
279 (uintptr_t *)&segLOWESTAuxKC
, (uintptr_t *)&segHIGHESTAuxKC
, (uintptr_t *)&segLOWESTROAuxKC
,
280 (uintptr_t *)&segHIGHESTROAuxKC
, (uintptr_t *)&segLOWESTRXAuxKC
, (uintptr_t *)&segHIGHESTRXAuxKC
,
281 (uintptr_t *)&segHIGHESTNLEAuxKC
);
282 #if defined(HAS_APPLE_PAC)
283 OSRuntimeSignStructorsInFileset(akc_mh
);
284 #endif /* defined(HAS_APPLE_PAC) */
289 * Function: Runs on the boot CPU, once, on entry from iBoot.
302 arm_slide_rebase_and_sign_image();
304 /* If kernel integrity is supported, use a constant copy of the boot args. */
305 const_boot_args
= *args
;
306 BootArgs
= args
= &const_boot_args
;
308 cpu_data_init(&BootCpuData
);
309 #if defined(HAS_APPLE_PAC)
310 /* bootstrap cpu process dependent key for kernel has been loaded by start.s */
311 BootCpuData
.rop_key
= KERNEL_ROP_ID
;
312 BootCpuData
.jop_key
= ml_default_jop_pid();
313 #endif /* defined(HAS_APPLE_PAC) */
315 PE_init_platform(FALSE
, args
); /* Get platform expert set up */
318 wfe_timeout_configure();
320 configure_misc_apple_boot_args();
321 configure_misc_apple_regs();
326 * Select the advertised kernel page size.
328 if (args
->memSize
> 1ULL * 1024 * 1024 * 1024) {
330 * arm64 device with > 1GB of RAM:
331 * kernel uses 16KB pages.
333 PAGE_SHIFT_CONST
= PAGE_MAX_SHIFT
;
336 * arm64 device with <= 1GB of RAM:
337 * kernel uses hardware page size
338 * (4KB for H6/H7, 16KB for H8+).
340 PAGE_SHIFT_CONST
= ARM_PGSHIFT
;
343 /* 32-bit apps always see 16KB page size */
344 page_shift_user32
= PAGE_MAX_SHIFT
;
346 if (PE_parse_boot_argn("cpus_defeatures", &cpus_defeatures
, sizeof(cpus_defeatures
))) {
347 if ((cpus_defeatures
& 0xF) != 0) {
348 cpu_defeatures_set(cpus_defeatures
& 0xF);
355 ml_parse_cpu_topology();
357 master_cpu
= ml_get_boot_cpu_number();
358 assert(master_cpu
>= 0 && master_cpu
<= ml_get_max_cpu_number());
360 BootCpuData
.cpu_number
= (unsigned short)master_cpu
;
362 BootCpuData
.cpu_exc_vectors
= (vm_offset_t
)&ExceptionVectorsTable
;
364 BootCpuData
.intstack_top
= (vm_offset_t
) &intstack_top
;
365 BootCpuData
.istackptr
= BootCpuData
.intstack_top
;
367 BootCpuData
.excepstack_top
= (vm_offset_t
) &excepstack_top
;
368 BootCpuData
.excepstackptr
= BootCpuData
.excepstack_top
;
370 BootCpuData
.fiqstack_top
= (vm_offset_t
) &fiqstack_top
;
371 BootCpuData
.fiqstackptr
= BootCpuData
.fiqstack_top
;
373 BootCpuData
.cpu_console_buf
= (void *)NULL
;
374 CpuDataEntries
[master_cpu
].cpu_data_vaddr
= &BootCpuData
;
375 CpuDataEntries
[master_cpu
].cpu_data_paddr
= (void *)((uintptr_t)(args
->physBase
)
376 + ((uintptr_t)&BootCpuData
377 - (uintptr_t)(args
->virtBase
)));
379 thread
= thread_bootstrap();
380 thread
->machine
.CpuDatap
= &BootCpuData
;
381 thread
->machine
.pcpu_data_base
= (vm_offset_t
)0;
382 machine_set_current_thread(thread
);
385 * Preemption is enabled for this thread so that it can lock mutexes without
386 * tripping the preemption check. In reality scheduling is not enabled until
387 * this thread completes, and there are no other threads to switch to, so
388 * preemption level is not really meaningful for the bootstrap thread.
390 thread
->machine
.preemption_count
= 0;
391 #if __arm__ && __ARM_USER_PROTECT__
393 unsigned int ttbr0_val
, ttbr1_val
;
394 __asm__
volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val
));
395 __asm__
volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val
));
396 thread
->machine
.uptw_ttb
= ttbr0_val
;
397 thread
->machine
.kptw_ttb
= ttbr1_val
;
400 processor_t boot_processor
= PERCPU_GET_MASTER(processor
);
401 boot_processor
->kernel_timer
= &thread
->system_timer
;
402 boot_processor
->thread_timer
= &thread
->system_timer
;
406 rtclock_early_init();
408 kernel_debug_string_early("kernel_startup_bootstrap");
409 kernel_startup_bootstrap();
412 * Initialize the timer callout world
418 processor_bootstrap();
420 if (PE_parse_boot_argn("maxmem", &maxmem
, sizeof(maxmem
))) {
421 xmaxmem
= (uint64_t) maxmem
* (1024 * 1024);
422 } else if (PE_get_default("hw.memsize", &memsize
, sizeof(memsize
))) {
423 xmaxmem
= (uint64_t) memsize
;
428 #if INTERRUPT_MASKED_DEBUG
429 int wdt_boot_arg
= 0;
430 /* Disable if WDT is disabled or no_interrupt_mask_debug in boot-args */
431 if (PE_parse_boot_argn("no_interrupt_masked_debug", &interrupt_masked_debug
,
432 sizeof(interrupt_masked_debug
)) || (PE_parse_boot_argn("wdt", &wdt_boot_arg
,
433 sizeof(wdt_boot_arg
)) && (wdt_boot_arg
== -1)) || kern_feature_override(KF_INTERRUPT_MASKED_DEBUG_OVRD
)) {
434 interrupt_masked_debug
= 0;
437 PE_parse_boot_argn("interrupt_masked_debug_timeout", &interrupt_masked_timeout
, sizeof(interrupt_masked_timeout
));
441 PE_parse_boot_argn("bpret", &bp_ret
, sizeof(bp_ret
));
442 set_bp_ret(); // Apply branch predictor retention settings to boot CPU
445 PE_parse_boot_argn("immediate_NMI", &force_immediate_debug_halt
, sizeof(force_immediate_debug_halt
));
447 #if __ARM_PAN_AVAILABLE__
448 __builtin_arm_wsr("pan", 1);
449 #endif /* __ARM_PAN_AVAILABLE__ */
451 arm_vm_init(xmaxmem
, args
);
453 if (debug_boot_arg
) {
457 #if __arm64__ && WITH_CLASSIC_S2R
458 sleep_token_buffer_init();
461 PE_consistent_debug_inherit();
464 * rdar://54622819 Insufficient HSP purge window can cause incorrect translation when ASID and TTBR base address is changed at same time)
465 * (original info on HSP purge window issues can be found in rdar://55577508)
466 * We need a flag to check for this, so calculate and set it here. We'll use it in machine_switch_amx_context().
469 need_wa_rdar_55577508
= cpuid_get_cpufamily() == CPUFAMILY_ARM_LIGHTNING_THUNDER
;
470 #ifndef RC_HIDE_XNU_FIRESTORM
471 need_wa_rdar_55577508
|= (cpuid_get_cpufamily() == CPUFAMILY_ARM_FIRESTORM_ICESTORM
&& get_arm_cpu_version() == CPU_VERSION_A0
);
475 /* setup debugging output if one has been chosen */
476 kernel_startup_initialize_upto(STARTUP_SUB_KPRINTF
);
477 kprintf("kprintf initialized\n");
480 if (PE_parse_boot_argn("serial", &serialmode
, sizeof(serialmode
))) {
481 /* Do we want a serial keyboard and/or console? */
482 kprintf("Serial mode specified: %08X\n", serialmode
);
483 int force_sync
= serialmode
& SERIALMODE_SYNCDRAIN
;
484 if (force_sync
|| PE_parse_boot_argn("drain_uart_sync", &force_sync
, sizeof(force_sync
))) {
486 serialmode
|= SERIALMODE_SYNCDRAIN
;
488 "WARNING: Forcing uart driver to output synchronously."
489 "printf()s/IOLogs will impact kernel performance.\n"
490 "You are advised to avoid using 'drain_uart_sync' boot-arg.\n");
494 if (kern_feature_override(KF_SERIAL_OVRD
)) {
498 if (serialmode
& SERIALMODE_OUTPUT
) { /* Start serial if requested */
499 (void)switch_to_serial_console(); /* Switch into serial mode */
500 disableConsoleOutput
= FALSE
; /* Allow printfs to happen */
504 /* setup console output */
505 PE_init_printf(FALSE
);
513 cpu_machine_idle_init(TRUE
);
515 #if (__ARM_ARCH__ == 7)
516 if (arm_diag
& 0x8000) {
517 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC
);
521 PE_init_platform(TRUE
, &BootCpuData
);
527 cpu_timebase_init(TRUE
);
529 fiq_context_init(TRUE
);
534 #endif /* HIBERNATION */
537 * gPhysBase/Size only represent kernel-managed memory. These globals represent
538 * the actual DRAM base address and size as reported by iBoot through the
542 unsigned int dt_entry_size
;
543 unsigned long const *dram_base
;
544 unsigned long const *dram_size
;
545 if (SecureDTLookupEntry(NULL
, "/chosen", &chosen
) != kSuccess
) {
546 panic("%s: Unable to find 'chosen' DT node", __FUNCTION__
);
549 if (SecureDTGetProperty(chosen
, "dram-base", (void const **)&dram_base
, &dt_entry_size
) != kSuccess
) {
550 panic("%s: Unable to find 'dram-base' entry in the 'chosen' DT node", __FUNCTION__
);
553 if (SecureDTGetProperty(chosen
, "dram-size", (void const **)&dram_size
, &dt_entry_size
) != kSuccess
) {
554 panic("%s: Unable to find 'dram-size' entry in the 'chosen' DT node", __FUNCTION__
);
557 gDramBase
= *dram_base
;
558 gDramSize
= *dram_size
;
561 * Initialize the stack protector for all future calls
562 * to C code. Since kernel_bootstrap() eventually
563 * switches stack context without returning through this
564 * function, we do not risk failing the check even though
565 * we mutate the guard word during execution.
567 __stack_chk_guard
= (unsigned long)early_random();
568 /* Zero a byte of the protector to guard
569 * against string vulnerabilities
571 __stack_chk_guard
&= ~(0xFFULL
<< 8);
572 machine_startup(args
);
576 * Routine: arm_init_cpu
578 * Runs on S2R resume (all CPUs) and SMP boot (non-boot CPUs only).
583 cpu_data_t
*cpu_data_ptr
)
585 #if __ARM_PAN_AVAILABLE__
586 __builtin_arm_wsr("pan", 1);
590 configure_misc_apple_regs();
593 cpu_data_ptr
->cpu_flags
&= ~SleepState
;
595 cpu_data_ptr
->cpu_CLW_active
= 1;
598 machine_set_current_thread(cpu_data_ptr
->cpu_active_thread
);
601 if ((cpu_data_ptr
== &BootCpuData
) && (gIOHibernateState
== kIOHibernateStateWakingFromHibernate
)) {
602 // the "normal" S2R code captures wake_abstime too early, so on a hibernation resume we fix it up here
603 extern uint64_t wake_abstime
;
604 wake_abstime
= gIOHibernateCurrentHeader
->lastHibAbsTime
;
606 // since the hw clock stops ticking across hibernation, we need to apply an offset;
607 // iBoot computes this offset for us and passes it via the hibernation header
608 extern uint64_t hwclock_conttime_offset
;
609 hwclock_conttime_offset
= gIOHibernateCurrentHeader
->hwClockOffset
;
611 // during hibernation, we captured the idle thread's state from inside the PPL context, so we have to
612 // fix up its preemption count
613 unsigned int expected_preemption_count
= (gEnforceQuiesceSafety
? 2 : 1);
614 if (cpu_data_ptr
->cpu_active_thread
->machine
.preemption_count
!= expected_preemption_count
) {
615 panic("unexpected preemption count %u on boot cpu thread (should be %u)\n",
616 cpu_data_ptr
->cpu_active_thread
->machine
.preemption_count
,
617 expected_preemption_count
);
619 cpu_data_ptr
->cpu_active_thread
->machine
.preemption_count
--;
621 #endif /* HIBERNATION */
625 pmap_clear_user_ttb();
629 cpu_machine_idle_init(FALSE
);
633 #if (__ARM_ARCH__ == 7)
634 if (arm_diag
& 0x8000) {
635 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC
);
639 if ((cpus_defeatures
& (0xF << 4 * cpu_data_ptr
->cpu_number
)) != 0) {
640 cpu_defeatures_set((cpus_defeatures
>> 4 * cpu_data_ptr
->cpu_number
) & 0xF);
643 /* Initialize the timebase before serial_init, as some serial
644 * drivers use mach_absolute_time() to implement rate control
646 cpu_timebase_init(FALSE
);
648 if (cpu_data_ptr
== &BootCpuData
) {
649 #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
651 * Prevent CPUs from going into deep sleep until all
652 * CPUs are ready to do so.
654 arm64_stall_sleep
= TRUE
;
657 PE_init_platform(TRUE
, NULL
);
658 commpage_update_timebase();
662 fiq_context_init(TRUE
);
663 cpu_data_ptr
->rtcPop
= EndOfAllTime
;
664 timer_resync_deadlines();
666 #if DEVELOPMENT || DEBUG
667 PE_arm_debug_enable_trace();
671 kprintf("arm_cpu_init(): cpu %d online\n", cpu_data_ptr
->cpu_number
);
673 if (cpu_data_ptr
== &BootCpuData
) {
674 if (kdebug_enable
== 0) {
675 __kdebug_only
uint64_t elapsed
= kdebug_wake();
676 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 15), mach_absolute_time() - elapsed
);
680 bootprofile_wake_from_sleep();
681 #endif /* CONFIG_TELEMETRY */
683 #if MONOTONIC && defined(__arm64__)
685 #endif /* MONOTONIC && defined(__arm64__) */
687 #if defined(KERNEL_INTEGRITY_CTRR)
688 if (ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
] != CTRR_LOCKED
) {
689 lck_spin_lock(&ctrr_cpu_start_lck
);
690 ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
] = CTRR_LOCKED
;
691 thread_wakeup(&ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
]);
692 lck_spin_unlock(&ctrr_cpu_start_lck
);
700 * Routine: arm_init_idle_cpu
701 * Function: Resume from non-retention WFI. Called from the reset vector.
703 void __attribute__((noreturn
))
705 cpu_data_t
*cpu_data_ptr
)
707 #if __ARM_PAN_AVAILABLE__
708 __builtin_arm_wsr("pan", 1);
711 cpu_data_ptr
->cpu_CLW_active
= 1;
714 machine_set_current_thread(cpu_data_ptr
->cpu_active_thread
);
718 pmap_clear_user_ttb();
720 /* Enable asynchronous exceptions */
721 __builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF
);
724 #if (__ARM_ARCH__ == 7)
725 if (arm_diag
& 0x8000) {
726 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC
);
730 if ((cpus_defeatures
& (0xF << 4 * cpu_data_ptr
->cpu_number
)) != 0) {
731 cpu_defeatures_set((cpus_defeatures
>> 4 * cpu_data_ptr
->cpu_number
) & 0xF);
735 fiq_context_init(FALSE
);