2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 #include <mach_ldebug.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <kern/processor.h>
40 #include <kern/startup.h>
41 #include <kern/debug.h>
42 #include <prng/random.h>
43 #include <machine/machine_routines.h>
44 #include <machine/commpage.h>
46 #include <machine/pal_hibernate.h>
47 #endif /* HIBERNATION */
48 /* ARM64_TODO unify boot.h */
50 #include <pexpert/arm64/apple_arm64_common.h>
51 #include <pexpert/arm64/boot.h>
53 #include <pexpert/arm/boot.h>
55 #error Unsupported arch
57 #include <pexpert/arm/consistent_debug.h>
58 #include <pexpert/device_tree.h>
59 #include <arm/proc_reg.h>
61 #include <arm/caches_internal.h>
62 #include <arm/cpu_internal.h>
63 #include <arm/cpu_data_internal.h>
64 #include <arm/cpuid_internal.h>
65 #include <arm/io_map_entries.h>
66 #include <arm/misc_protos.h>
67 #include <arm/machine_cpu.h>
68 #include <arm/rtclock.h>
69 #include <vm/vm_map.h>
71 #include <libkern/kernel_mach_header.h>
72 #include <libkern/stack_protector.h>
73 #include <libkern/section_keywords.h>
74 #include <san/kasan.h>
75 #include <sys/kdebug.h>
77 #include <pexpert/pexpert.h>
79 #include <console/serial_protos.h>
82 #include <kern/telemetry.h>
85 #include <kern/monotonic.h>
86 #endif /* MONOTONIC */
89 #include <IOKit/IOPlatformExpert.h>
90 #endif /* HIBERNATION */
92 extern void patch_low_glo(void);
93 extern int serial_init(void);
94 extern void sleep_token_buffer_init(void);
96 extern vm_offset_t intstack_top
;
98 extern vm_offset_t excepstack_top
;
100 extern vm_offset_t fiqstack_top
;
103 extern const char version
[];
104 extern const char version_variant
[];
105 extern int disableConsoleOutput
;
107 int pc_trace_buf
[PC_TRACE_BUF_SIZE
] = {0};
108 int pc_trace_cnt
= PC_TRACE_BUF_SIZE
;
111 bool need_wa_rdar_55577508
= false;
112 SECURITY_READ_ONLY_LATE(bool) static_kernelcache
= false;
116 /* Enable both branch target retention (0x2) and branch direction retention (0x1) across sleep */
118 extern void set_bp_ret(void);
121 #if INTERRUPT_MASKED_DEBUG
122 boolean_t interrupt_masked_debug
= 1;
123 /* the following are in mach timebase units */
124 uint64_t interrupt_masked_timeout
= 0xd0000;
125 uint64_t stackshot_interrupt_masked_timeout
= 0xf9999;
128 boot_args const_boot_args
__attribute__((section("__DATA, __const")));
129 boot_args
*BootArgs
__attribute__((section("__DATA, __const")));
131 TUNABLE(uint32_t, arm_diag
, "diag", 0);
133 static unsigned cpus_defeatures
= 0x0;
134 extern void cpu_defeatures_set(unsigned int);
137 #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
138 extern volatile boolean_t arm64_stall_sleep
;
141 extern boolean_t force_immediate_debug_halt
;
144 SECURITY_READ_ONLY_LATE(boolean_t
) diversify_user_jop
= TRUE
;
147 SECURITY_READ_ONLY_LATE(uint64_t) gDramBase
;
148 SECURITY_READ_ONLY_LATE(uint64_t) gDramSize
;
153 void arm_init(boot_args
* args
);
156 unsigned int page_shift_user32
; /* for page_size as seen by a 32-bit task */
157 #endif /* __arm64__ */
164 #define dyldLogFunc(msg, ...)
165 #include <mach/dyld_kernel_fixups.h>
167 extern uint32_t __thread_starts_sect_start
[] __asm("section$start$__TEXT$__thread_starts");
168 extern uint32_t __thread_starts_sect_end
[] __asm("section$end$__TEXT$__thread_starts");
169 #if defined(HAS_APPLE_PAC)
170 extern void OSRuntimeSignStructors(kernel_mach_header_t
* header
);
171 extern void OSRuntimeSignStructorsInFileset(kernel_mach_header_t
* header
);
172 #endif /* defined(HAS_APPLE_PAC) */
174 extern vm_offset_t vm_kernel_slide
;
175 extern vm_offset_t segLOWESTKC
, segHIGHESTKC
, segLOWESTROKC
, segHIGHESTROKC
;
176 extern vm_offset_t segLOWESTAuxKC
, segHIGHESTAuxKC
, segLOWESTROAuxKC
, segHIGHESTROAuxKC
;
177 extern vm_offset_t segLOWESTRXAuxKC
, segHIGHESTRXAuxKC
, segHIGHESTNLEAuxKC
;
180 arm_slide_rebase_and_sign_image(void)
182 kernel_mach_header_t
*k_mh
, *kc_mh
= NULL
;
183 kernel_segment_command_t
*seg
;
186 k_mh
= &_mh_execute_header
;
187 if (kernel_mach_header_is_in_fileset(k_mh
)) {
189 * The kernel is part of a MH_FILESET kernel collection, determine slide
190 * based on first segment's mach-o vmaddr (requires first kernel load
191 * command to be LC_SEGMENT_64 of the __TEXT segment)
193 seg
= (kernel_segment_command_t
*)((uintptr_t)k_mh
+ sizeof(*k_mh
));
194 assert(seg
->cmd
== LC_SEGMENT_KERNEL
);
195 slide
= (uintptr_t)k_mh
- seg
->vmaddr
;
198 * The kernel collection linker guarantees that the boot collection mach
199 * header vmaddr is the hardcoded kernel link address (as specified to
200 * ld64 when linking the kernel).
202 kc_mh
= (kernel_mach_header_t
*)(VM_KERNEL_LINK_ADDRESS
+ slide
);
203 assert(kc_mh
->filetype
== MH_FILESET
);
206 * rebase and sign jops
207 * Note that we can't call any functions before this point, so
208 * we have to hard-code the knowledge that the base of the KC
209 * is the KC's mach-o header. This would change if any
210 * segment's VA started *before* the text segment
211 * (as the HIB segment does on x86).
213 const void *collection_base_pointers
[KCNumKinds
] = {[0] = kc_mh
, };
214 kernel_collection_slide((struct mach_header_64
*)kc_mh
, collection_base_pointers
);
216 PE_set_kc_header(KCKindPrimary
, kc_mh
, slide
);
219 * iBoot doesn't slide load command vmaddrs in an MH_FILESET kernel
220 * collection, so adjust them now, and determine the vmaddr range
221 * covered by read-only segments for the CTRR rorgn.
223 kernel_collection_adjust_mh_addrs((struct mach_header_64
*)kc_mh
, slide
, false,
224 (uintptr_t *)&segLOWESTKC
, (uintptr_t *)&segHIGHESTKC
,
225 (uintptr_t *)&segLOWESTROKC
, (uintptr_t *)&segHIGHESTROKC
,
227 #if defined(HAS_APPLE_PAC)
228 OSRuntimeSignStructorsInFileset(kc_mh
);
229 #endif /* defined(HAS_APPLE_PAC) */
232 * Static kernelcache: iBoot slid kernel MachO vmaddrs, determine slide
233 * using hardcoded kernel link address
235 slide
= (uintptr_t)k_mh
- VM_KERNEL_LINK_ADDRESS
;
237 /* rebase and sign jops */
238 static_kernelcache
= &__thread_starts_sect_end
[0] != &__thread_starts_sect_start
[0];
239 if (static_kernelcache
) {
240 rebase_threaded_starts( &__thread_starts_sect_start
[0],
241 &__thread_starts_sect_end
[0],
242 (uintptr_t)k_mh
, (uintptr_t)k_mh
- slide
, slide
);
244 #if defined(HAS_APPLE_PAC)
245 OSRuntimeSignStructors(&_mh_execute_header
);
246 #endif /* defined(HAS_APPLE_PAC) */
251 * Initialize slide global here to avoid duplicating this logic in
254 vm_kernel_slide
= slide
;
258 arm_auxkc_init(void *mh
, void *base
)
261 * The kernel collection linker guarantees that the lowest vmaddr in an
262 * AuxKC collection is 0 (but note that the mach header is higher up since
263 * RW segments precede RO segments in the AuxKC).
265 uintptr_t slide
= (uintptr_t)base
;
266 kernel_mach_header_t
*akc_mh
= (kernel_mach_header_t
*)mh
;
268 assert(akc_mh
->filetype
== MH_FILESET
);
269 PE_set_kc_header_and_base(KCKindAuxiliary
, akc_mh
, base
, slide
);
271 /* rebase and sign jops */
272 const void *collection_base_pointers
[KCNumKinds
];
273 memcpy(collection_base_pointers
, PE_get_kc_base_pointers(), sizeof(collection_base_pointers
));
274 kernel_collection_slide((struct mach_header_64
*)akc_mh
, collection_base_pointers
);
276 kernel_collection_adjust_mh_addrs((struct mach_header_64
*)akc_mh
, slide
, false,
277 (uintptr_t *)&segLOWESTAuxKC
, (uintptr_t *)&segHIGHESTAuxKC
, (uintptr_t *)&segLOWESTROAuxKC
,
278 (uintptr_t *)&segHIGHESTROAuxKC
, (uintptr_t *)&segLOWESTRXAuxKC
, (uintptr_t *)&segHIGHESTRXAuxKC
,
279 (uintptr_t *)&segHIGHESTNLEAuxKC
);
280 #if defined(HAS_APPLE_PAC)
281 OSRuntimeSignStructorsInFileset(akc_mh
);
282 #endif /* defined(HAS_APPLE_PAC) */
285 #if HAS_IC_INVAL_FILTERS
287 configure_misc_apple_regs(void)
289 uint64_t actlr
, __unused acfg
, __unused ahcr
;
291 actlr
= get_aux_control();
293 #if HAS_IC_INVAL_FILTERS
294 ahcr
= __builtin_arm_rsr64(ARM64_REG_AHCR_EL2
);
295 ahcr
|= AHCR_IC_IVAU_EnRegime
;
296 ahcr
|= AHCR_IC_IVAU_EnVMID
;
297 ahcr
|= AHCR_IC_IALLU_EnRegime
;
298 ahcr
|= AHCR_IC_IALLU_EnVMID
;
299 __builtin_arm_wsr64(ARM64_REG_AHCR_EL2
, ahcr
);
300 #endif /* HAS_IC_INVAL_FILTERS */
303 #if HAS_IC_INVAL_FILTERS
304 actlr
|= ACTLR_EL1_IC_IVAU_EnASID
;
305 #endif /* HAS_IC_INVAL_FILTERS */
307 set_aux_control(actlr
);
310 #endif /* HAS_IC_INVAL_FILTERS */
314 * Function: Runs on the boot CPU, once, on entry from iBoot.
327 arm_slide_rebase_and_sign_image();
329 /* If kernel integrity is supported, use a constant copy of the boot args. */
330 const_boot_args
= *args
;
331 BootArgs
= args
= &const_boot_args
;
333 cpu_data_init(&BootCpuData
);
334 #if defined(HAS_APPLE_PAC)
335 /* bootstrap cpu process dependent key for kernel has been loaded by start.s */
336 BootCpuData
.rop_key
= KERNEL_ROP_ID
;
337 BootCpuData
.jop_key
= ml_default_jop_pid();
338 #endif /* defined(HAS_APPLE_PAC) */
340 PE_init_platform(FALSE
, args
); /* Get platform expert set up */
343 wfe_timeout_configure();
344 #if HAS_IC_INVAL_FILTERS
345 configure_misc_apple_regs();
346 #endif /* HAS_IC_INVAL_FILTERS */
348 #if defined(HAS_APPLE_PAC)
349 #if DEVELOPMENT || DEBUG
350 boolean_t user_jop
= TRUE
;
351 PE_parse_boot_argn("user_jop", &user_jop
, sizeof(user_jop
));
353 args
->bootFlags
|= kBootFlagsDisableUserJOP
;
355 #endif /* DEVELOPMENT || DEBUG */
356 boolean_t user_ts_jop
= TRUE
;
357 PE_parse_boot_argn("user_ts_jop", &user_ts_jop
, sizeof(user_ts_jop
));
359 args
->bootFlags
|= kBootFlagsDisableUserThreadStateJOP
;
361 PE_parse_boot_argn("diversify_user_jop", &diversify_user_jop
, sizeof(diversify_user_jop
));
362 #endif /* defined(HAS_APPLE_PAC) */
366 * Select the advertised kernel page size.
368 if (args
->memSize
> 1ULL * 1024 * 1024 * 1024) {
370 * arm64 device with > 1GB of RAM:
371 * kernel uses 16KB pages.
373 PAGE_SHIFT_CONST
= PAGE_MAX_SHIFT
;
376 * arm64 device with <= 1GB of RAM:
377 * kernel uses hardware page size
378 * (4KB for H6/H7, 16KB for H8+).
380 PAGE_SHIFT_CONST
= ARM_PGSHIFT
;
383 /* 32-bit apps always see 16KB page size */
384 page_shift_user32
= PAGE_MAX_SHIFT
;
386 if (PE_parse_boot_argn("cpus_defeatures", &cpus_defeatures
, sizeof(cpus_defeatures
))) {
387 if ((cpus_defeatures
& 0xF) != 0) {
388 cpu_defeatures_set(cpus_defeatures
& 0xF);
395 ml_parse_cpu_topology();
397 master_cpu
= ml_get_boot_cpu_number();
398 assert(master_cpu
>= 0 && master_cpu
<= ml_get_max_cpu_number());
400 BootCpuData
.cpu_number
= (unsigned short)master_cpu
;
402 BootCpuData
.cpu_exc_vectors
= (vm_offset_t
)&ExceptionVectorsTable
;
404 BootCpuData
.intstack_top
= (vm_offset_t
) &intstack_top
;
405 BootCpuData
.istackptr
= BootCpuData
.intstack_top
;
407 BootCpuData
.excepstack_top
= (vm_offset_t
) &excepstack_top
;
408 BootCpuData
.excepstackptr
= BootCpuData
.excepstack_top
;
410 BootCpuData
.fiqstack_top
= (vm_offset_t
) &fiqstack_top
;
411 BootCpuData
.fiqstackptr
= BootCpuData
.fiqstack_top
;
413 BootCpuData
.cpu_console_buf
= (void *)NULL
;
414 CpuDataEntries
[master_cpu
].cpu_data_vaddr
= &BootCpuData
;
415 CpuDataEntries
[master_cpu
].cpu_data_paddr
= (void *)((uintptr_t)(args
->physBase
)
416 + ((uintptr_t)&BootCpuData
417 - (uintptr_t)(args
->virtBase
)));
419 thread
= thread_bootstrap();
420 thread
->machine
.CpuDatap
= &BootCpuData
;
421 thread
->machine
.pcpu_data_base
= (vm_offset_t
)0;
422 machine_set_current_thread(thread
);
425 * Preemption is enabled for this thread so that it can lock mutexes without
426 * tripping the preemption check. In reality scheduling is not enabled until
427 * this thread completes, and there are no other threads to switch to, so
428 * preemption level is not really meaningful for the bootstrap thread.
430 thread
->machine
.preemption_count
= 0;
431 #if __arm__ && __ARM_USER_PROTECT__
433 unsigned int ttbr0_val
, ttbr1_val
;
434 __asm__
volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val
));
435 __asm__
volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val
));
436 thread
->machine
.uptw_ttb
= ttbr0_val
;
437 thread
->machine
.kptw_ttb
= ttbr1_val
;
440 processor_t boot_processor
= PERCPU_GET_MASTER(processor
);
441 boot_processor
->kernel_timer
= &thread
->system_timer
;
442 boot_processor
->thread_timer
= &thread
->system_timer
;
446 rtclock_early_init();
448 kernel_debug_string_early("kernel_startup_bootstrap");
449 kernel_startup_bootstrap();
452 * Initialize the timer callout world
458 processor_bootstrap();
460 if (PE_parse_boot_argn("maxmem", &maxmem
, sizeof(maxmem
))) {
461 xmaxmem
= (uint64_t) maxmem
* (1024 * 1024);
462 } else if (PE_get_default("hw.memsize", &memsize
, sizeof(memsize
))) {
463 xmaxmem
= (uint64_t) memsize
;
468 #if INTERRUPT_MASKED_DEBUG
469 int wdt_boot_arg
= 0;
470 /* Disable if WDT is disabled or no_interrupt_mask_debug in boot-args */
471 if (PE_parse_boot_argn("no_interrupt_masked_debug", &interrupt_masked_debug
,
472 sizeof(interrupt_masked_debug
)) || (PE_parse_boot_argn("wdt", &wdt_boot_arg
,
473 sizeof(wdt_boot_arg
)) && (wdt_boot_arg
== -1)) || kern_feature_override(KF_INTERRUPT_MASKED_DEBUG_OVRD
)) {
474 interrupt_masked_debug
= 0;
477 PE_parse_boot_argn("interrupt_masked_debug_timeout", &interrupt_masked_timeout
, sizeof(interrupt_masked_timeout
));
481 PE_parse_boot_argn("bpret", &bp_ret
, sizeof(bp_ret
));
482 set_bp_ret(); // Apply branch predictor retention settings to boot CPU
485 PE_parse_boot_argn("immediate_NMI", &force_immediate_debug_halt
, sizeof(force_immediate_debug_halt
));
487 #if __ARM_PAN_AVAILABLE__
488 __builtin_arm_wsr("pan", 1);
489 #endif /* __ARM_PAN_AVAILABLE__ */
491 arm_vm_init(xmaxmem
, args
);
493 if (debug_boot_arg
) {
497 #if __arm64__ && WITH_CLASSIC_S2R
498 sleep_token_buffer_init();
501 PE_consistent_debug_inherit();
504 * rdar://54622819 Insufficient HSP purge window can cause incorrect translation when ASID and TTBR base address is changed at same time)
505 * (original info on HSP purge window issues can be found in rdar://55577508)
506 * We need a flag to check for this, so calculate and set it here. We'll use it in machine_switch_amx_context().
509 need_wa_rdar_55577508
= cpuid_get_cpufamily() == CPUFAMILY_ARM_LIGHTNING_THUNDER
;
512 /* setup debugging output if one has been chosen */
513 kernel_startup_initialize_upto(STARTUP_SUB_KPRINTF
);
514 kprintf("kprintf initialized\n");
517 if (PE_parse_boot_argn("serial", &serialmode
, sizeof(serialmode
))) {
518 /* Do we want a serial keyboard and/or console? */
519 kprintf("Serial mode specified: %08X\n", serialmode
);
520 int force_sync
= serialmode
& SERIALMODE_SYNCDRAIN
;
521 if (force_sync
|| PE_parse_boot_argn("drain_uart_sync", &force_sync
, sizeof(force_sync
))) {
523 serialmode
|= SERIALMODE_SYNCDRAIN
;
525 "WARNING: Forcing uart driver to output synchronously."
526 "printf()s/IOLogs will impact kernel performance.\n"
527 "You are advised to avoid using 'drain_uart_sync' boot-arg.\n");
531 if (kern_feature_override(KF_SERIAL_OVRD
)) {
535 if (serialmode
& SERIALMODE_OUTPUT
) { /* Start serial if requested */
536 (void)switch_to_serial_console(); /* Switch into serial mode */
537 disableConsoleOutput
= FALSE
; /* Allow printfs to happen */
541 /* setup console output */
542 PE_init_printf(FALSE
);
550 cpu_machine_idle_init(TRUE
);
552 #if (__ARM_ARCH__ == 7)
553 if (arm_diag
& 0x8000) {
554 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC
);
558 PE_init_platform(TRUE
, &BootCpuData
);
564 cpu_timebase_init(TRUE
);
566 fiq_context_init(TRUE
);
571 #endif /* HIBERNATION */
574 * gPhysBase/Size only represent kernel-managed memory. These globals represent
575 * the actual DRAM base address and size as reported by iBoot through the
579 unsigned int dt_entry_size
;
580 unsigned long const *dram_base
;
581 unsigned long const *dram_size
;
582 if (SecureDTLookupEntry(NULL
, "/chosen", &chosen
) != kSuccess
) {
583 panic("%s: Unable to find 'chosen' DT node", __FUNCTION__
);
586 if (SecureDTGetProperty(chosen
, "dram-base", (void const **)&dram_base
, &dt_entry_size
) != kSuccess
) {
587 panic("%s: Unable to find 'dram-base' entry in the 'chosen' DT node", __FUNCTION__
);
590 if (SecureDTGetProperty(chosen
, "dram-size", (void const **)&dram_size
, &dt_entry_size
) != kSuccess
) {
591 panic("%s: Unable to find 'dram-size' entry in the 'chosen' DT node", __FUNCTION__
);
594 gDramBase
= *dram_base
;
595 gDramSize
= *dram_size
;
598 * Initialize the stack protector for all future calls
599 * to C code. Since kernel_bootstrap() eventually
600 * switches stack context without returning through this
601 * function, we do not risk failing the check even though
602 * we mutate the guard word during execution.
604 __stack_chk_guard
= (unsigned long)early_random();
605 /* Zero a byte of the protector to guard
606 * against string vulnerabilities
608 __stack_chk_guard
&= ~(0xFFULL
<< 8);
609 machine_startup(args
);
613 * Routine: arm_init_cpu
615 * Runs on S2R resume (all CPUs) and SMP boot (non-boot CPUs only).
620 cpu_data_t
*cpu_data_ptr
)
622 #if __ARM_PAN_AVAILABLE__
623 __builtin_arm_wsr("pan", 1);
626 #if HAS_IC_INVAL_FILTERS
627 configure_misc_apple_regs();
628 #endif /* HAS_IC_INVAL_FILTERS */
630 cpu_data_ptr
->cpu_flags
&= ~SleepState
;
632 cpu_data_ptr
->cpu_CLW_active
= 1;
635 machine_set_current_thread(cpu_data_ptr
->cpu_active_thread
);
638 if ((cpu_data_ptr
== &BootCpuData
) && (gIOHibernateState
== kIOHibernateStateWakingFromHibernate
)) {
639 // the "normal" S2R code captures wake_abstime too early, so on a hibernation resume we fix it up here
640 extern uint64_t wake_abstime
;
641 wake_abstime
= gIOHibernateCurrentHeader
->lastHibAbsTime
;
643 // since the hw clock stops ticking across hibernation, we need to apply an offset;
644 // iBoot computes this offset for us and passes it via the hibernation header
645 extern uint64_t hwclock_conttime_offset
;
646 hwclock_conttime_offset
= gIOHibernateCurrentHeader
->hwClockOffset
;
648 // during hibernation, we captured the idle thread's state from inside the PPL context, so we have to
649 // fix up its preemption count
650 unsigned int expected_preemption_count
= (gEnforceQuiesceSafety
? 2 : 1);
651 if (cpu_data_ptr
->cpu_active_thread
->machine
.preemption_count
!= expected_preemption_count
) {
652 panic("unexpected preemption count %u on boot cpu thread (should be %u)\n",
653 cpu_data_ptr
->cpu_active_thread
->machine
.preemption_count
,
654 expected_preemption_count
);
656 cpu_data_ptr
->cpu_active_thread
->machine
.preemption_count
--;
658 #endif /* HIBERNATION */
662 pmap_clear_user_ttb();
666 cpu_machine_idle_init(FALSE
);
670 #if (__ARM_ARCH__ == 7)
671 if (arm_diag
& 0x8000) {
672 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC
);
676 if ((cpus_defeatures
& (0xF << 4 * cpu_data_ptr
->cpu_number
)) != 0) {
677 cpu_defeatures_set((cpus_defeatures
>> 4 * cpu_data_ptr
->cpu_number
) & 0xF);
680 /* Initialize the timebase before serial_init, as some serial
681 * drivers use mach_absolute_time() to implement rate control
683 cpu_timebase_init(FALSE
);
685 if (cpu_data_ptr
== &BootCpuData
) {
686 #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
688 * Prevent CPUs from going into deep sleep until all
689 * CPUs are ready to do so.
691 arm64_stall_sleep
= TRUE
;
694 PE_init_platform(TRUE
, NULL
);
695 commpage_update_timebase();
699 fiq_context_init(TRUE
);
700 cpu_data_ptr
->rtcPop
= EndOfAllTime
;
701 timer_resync_deadlines();
703 #if DEVELOPMENT || DEBUG
704 PE_arm_debug_enable_trace();
708 kprintf("arm_cpu_init(): cpu %d online\n", cpu_data_ptr
->cpu_number
);
710 if (cpu_data_ptr
== &BootCpuData
) {
711 if (kdebug_enable
== 0) {
712 __kdebug_only
uint64_t elapsed
= kdebug_wake();
713 KDBG(IOKDBG_CODE(DBG_HIBERNATE
, 15), mach_absolute_time() - elapsed
);
717 bootprofile_wake_from_sleep();
718 #endif /* CONFIG_TELEMETRY */
720 #if MONOTONIC && defined(__arm64__)
722 #endif /* MONOTONIC && defined(__arm64__) */
724 #if defined(KERNEL_INTEGRITY_CTRR)
725 if (ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
] != CTRR_LOCKED
) {
726 lck_spin_lock(&ctrr_cpu_start_lck
);
727 ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
] = CTRR_LOCKED
;
728 thread_wakeup(&ctrr_cluster_locked
[cpu_data_ptr
->cpu_cluster_id
]);
729 lck_spin_unlock(&ctrr_cpu_start_lck
);
737 * Routine: arm_init_idle_cpu
738 * Function: Resume from non-retention WFI. Called from the reset vector.
740 void __attribute__((noreturn
))
742 cpu_data_t
*cpu_data_ptr
)
744 #if __ARM_PAN_AVAILABLE__
745 __builtin_arm_wsr("pan", 1);
748 cpu_data_ptr
->cpu_CLW_active
= 1;
751 machine_set_current_thread(cpu_data_ptr
->cpu_active_thread
);
755 pmap_clear_user_ttb();
757 /* Enable asynchronous exceptions */
758 __builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF
);
761 #if (__ARM_ARCH__ == 7)
762 if (arm_diag
& 0x8000) {
763 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC
);
767 if ((cpus_defeatures
& (0xF << 4 * cpu_data_ptr
->cpu_number
)) != 0) {
768 cpu_defeatures_set((cpus_defeatures
>> 4 * cpu_data_ptr
->cpu_number
) & 0xF);
772 fiq_context_init(FALSE
);