]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/arm_init.c
80a448ecb2cb4d4805fd43a81f656534a547f4f4
[apple/xnu.git] / osfmk / arm / arm_init.c
1 /*
2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <debug.h>
33 #include <mach_ldebug.h>
34 #include <mach_kdp.h>
35
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <kern/processor.h>
40 #include <kern/startup.h>
41 #include <kern/debug.h>
42 #include <prng/random.h>
43 #include <machine/machine_routines.h>
44 #include <machine/commpage.h>
45 #if HIBERNATION
46 #include <machine/pal_hibernate.h>
47 #endif /* HIBERNATION */
48 /* ARM64_TODO unify boot.h */
49 #if __arm64__
50 #include <pexpert/arm64/apple_arm64_common.h>
51 #include <pexpert/arm64/boot.h>
52 #elif __arm__
53 #include <pexpert/arm/boot.h>
54 #else
55 #error Unsupported arch
56 #endif
57 #include <pexpert/arm/consistent_debug.h>
58 #include <pexpert/device_tree.h>
59 #include <arm/proc_reg.h>
60 #include <arm/pmap.h>
61 #include <arm/caches_internal.h>
62 #include <arm/cpu_internal.h>
63 #include <arm/cpu_data_internal.h>
64 #include <arm/cpuid_internal.h>
65 #include <arm/io_map_entries.h>
66 #include <arm/misc_protos.h>
67 #include <arm/machine_cpu.h>
68 #include <arm/rtclock.h>
69 #include <vm/vm_map.h>
70
71 #include <libkern/kernel_mach_header.h>
72 #include <libkern/stack_protector.h>
73 #include <libkern/section_keywords.h>
74 #include <san/kasan.h>
75 #include <sys/kdebug.h>
76
77 #include <pexpert/pexpert.h>
78
79 #include <console/serial_protos.h>
80
81 #if CONFIG_TELEMETRY
82 #include <kern/telemetry.h>
83 #endif
84 #if MONOTONIC
85 #include <kern/monotonic.h>
86 #endif /* MONOTONIC */
87
88 #if HIBERNATION
89 #include <IOKit/IOPlatformExpert.h>
90 #endif /* HIBERNATION */
91
92 extern void patch_low_glo(void);
93 extern int serial_init(void);
94 extern void sleep_token_buffer_init(void);
95
96 extern vm_offset_t intstack_top;
97 #if __arm64__
98 extern vm_offset_t excepstack_top;
99 #else
100 extern vm_offset_t fiqstack_top;
101 #endif
102
103 extern const char version[];
104 extern const char version_variant[];
105 extern int disableConsoleOutput;
106
107 int pc_trace_buf[PC_TRACE_BUF_SIZE] = {0};
108 int pc_trace_cnt = PC_TRACE_BUF_SIZE;
109 int debug_task;
110
111 bool need_wa_rdar_55577508 = false;
112 SECURITY_READ_ONLY_LATE(bool) static_kernelcache = false;
113
114 #if HAS_BP_RET
115 /* Enable both branch target retention (0x2) and branch direction retention (0x1) across sleep */
116 uint32_t bp_ret = 3;
117 extern void set_bp_ret(void);
118 #endif
119
120 #if INTERRUPT_MASKED_DEBUG
121 boolean_t interrupt_masked_debug = 1;
122 /* the following are in mach timebase units */
123 uint64_t interrupt_masked_timeout = 0xd0000;
124 uint64_t stackshot_interrupt_masked_timeout = 0xf9999;
125 #endif
126
127 boot_args const_boot_args __attribute__((section("__DATA, __const")));
128 boot_args *BootArgs __attribute__((section("__DATA, __const")));
129
130 TUNABLE(uint32_t, arm_diag, "diag", 0);
131 #ifdef APPLETYPHOON
132 static unsigned cpus_defeatures = 0x0;
133 extern void cpu_defeatures_set(unsigned int);
134 #endif
135
136 #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
137 extern volatile boolean_t arm64_stall_sleep;
138 #endif
139
140 extern boolean_t force_immediate_debug_halt;
141
142 #if HAS_APPLE_PAC
143 SECURITY_READ_ONLY_LATE(boolean_t) diversify_user_jop = TRUE;
144 #endif
145
146 SECURITY_READ_ONLY_LATE(uint64_t) gDramBase;
147 SECURITY_READ_ONLY_LATE(uint64_t) gDramSize;
148
149 /*
150 * Forward definition
151 */
152 void arm_init(boot_args * args);
153
154 #if __arm64__
155 unsigned int page_shift_user32; /* for page_size as seen by a 32-bit task */
156
157 extern void configure_misc_apple_boot_args(void);
158 extern void configure_misc_apple_regs(void);
159 #endif /* __arm64__ */
160
161
162 /*
163 * JOP rebasing
164 */
165
166 #define dyldLogFunc(msg, ...)
167 #include <mach/dyld_kernel_fixups.h>
168
169 extern uint32_t __thread_starts_sect_start[] __asm("section$start$__TEXT$__thread_starts");
170 extern uint32_t __thread_starts_sect_end[] __asm("section$end$__TEXT$__thread_starts");
171 #if defined(HAS_APPLE_PAC)
172 extern void OSRuntimeSignStructors(kernel_mach_header_t * header);
173 extern void OSRuntimeSignStructorsInFileset(kernel_mach_header_t * header);
174 #endif /* defined(HAS_APPLE_PAC) */
175
176 extern vm_offset_t vm_kernel_slide;
177 extern vm_offset_t segLOWESTKC, segHIGHESTKC, segLOWESTROKC, segHIGHESTROKC;
178 extern vm_offset_t segLOWESTAuxKC, segHIGHESTAuxKC, segLOWESTROAuxKC, segHIGHESTROAuxKC;
179 extern vm_offset_t segLOWESTRXAuxKC, segHIGHESTRXAuxKC, segHIGHESTNLEAuxKC;
180
181 static void
182 arm_slide_rebase_and_sign_image(void)
183 {
184 kernel_mach_header_t *k_mh, *kc_mh = NULL;
185 kernel_segment_command_t *seg;
186 uintptr_t slide;
187
188 k_mh = &_mh_execute_header;
189 if (kernel_mach_header_is_in_fileset(k_mh)) {
190 /*
191 * The kernel is part of a MH_FILESET kernel collection, determine slide
192 * based on first segment's mach-o vmaddr (requires first kernel load
193 * command to be LC_SEGMENT_64 of the __TEXT segment)
194 */
195 seg = (kernel_segment_command_t *)((uintptr_t)k_mh + sizeof(*k_mh));
196 assert(seg->cmd == LC_SEGMENT_KERNEL);
197 slide = (uintptr_t)k_mh - seg->vmaddr;
198
199 /*
200 * The kernel collection linker guarantees that the boot collection mach
201 * header vmaddr is the hardcoded kernel link address (as specified to
202 * ld64 when linking the kernel).
203 */
204 kc_mh = (kernel_mach_header_t*)(VM_KERNEL_LINK_ADDRESS + slide);
205 assert(kc_mh->filetype == MH_FILESET);
206
207 /*
208 * rebase and sign jops
209 * Note that we can't call any functions before this point, so
210 * we have to hard-code the knowledge that the base of the KC
211 * is the KC's mach-o header. This would change if any
212 * segment's VA started *before* the text segment
213 * (as the HIB segment does on x86).
214 */
215 const void *collection_base_pointers[KCNumKinds] = {[0] = kc_mh, };
216 kernel_collection_slide((struct mach_header_64 *)kc_mh, collection_base_pointers);
217
218 PE_set_kc_header(KCKindPrimary, kc_mh, slide);
219
220 /*
221 * iBoot doesn't slide load command vmaddrs in an MH_FILESET kernel
222 * collection, so adjust them now, and determine the vmaddr range
223 * covered by read-only segments for the CTRR rorgn.
224 */
225 kernel_collection_adjust_mh_addrs((struct mach_header_64 *)kc_mh, slide, false,
226 (uintptr_t *)&segLOWESTKC, (uintptr_t *)&segHIGHESTKC,
227 (uintptr_t *)&segLOWESTROKC, (uintptr_t *)&segHIGHESTROKC,
228 NULL, NULL, NULL);
229 #if defined(HAS_APPLE_PAC)
230 OSRuntimeSignStructorsInFileset(kc_mh);
231 #endif /* defined(HAS_APPLE_PAC) */
232 } else {
233 /*
234 * Static kernelcache: iBoot slid kernel MachO vmaddrs, determine slide
235 * using hardcoded kernel link address
236 */
237 slide = (uintptr_t)k_mh - VM_KERNEL_LINK_ADDRESS;
238
239 /* rebase and sign jops */
240 static_kernelcache = &__thread_starts_sect_end[0] != &__thread_starts_sect_start[0];
241 if (static_kernelcache) {
242 rebase_threaded_starts( &__thread_starts_sect_start[0],
243 &__thread_starts_sect_end[0],
244 (uintptr_t)k_mh, (uintptr_t)k_mh - slide, slide);
245 }
246 #if defined(HAS_APPLE_PAC)
247 OSRuntimeSignStructors(&_mh_execute_header);
248 #endif /* defined(HAS_APPLE_PAC) */
249 }
250
251
252 /*
253 * Initialize slide global here to avoid duplicating this logic in
254 * arm_vm_init()
255 */
256 vm_kernel_slide = slide;
257 }
258
259 void
260 arm_auxkc_init(void *mh, void *base)
261 {
262 /*
263 * The kernel collection linker guarantees that the lowest vmaddr in an
264 * AuxKC collection is 0 (but note that the mach header is higher up since
265 * RW segments precede RO segments in the AuxKC).
266 */
267 uintptr_t slide = (uintptr_t)base;
268 kernel_mach_header_t *akc_mh = (kernel_mach_header_t*)mh;
269
270 assert(akc_mh->filetype == MH_FILESET);
271 PE_set_kc_header_and_base(KCKindAuxiliary, akc_mh, base, slide);
272
273 /* rebase and sign jops */
274 const void *collection_base_pointers[KCNumKinds];
275 memcpy(collection_base_pointers, PE_get_kc_base_pointers(), sizeof(collection_base_pointers));
276 kernel_collection_slide((struct mach_header_64 *)akc_mh, collection_base_pointers);
277
278 kernel_collection_adjust_mh_addrs((struct mach_header_64 *)akc_mh, slide, false,
279 (uintptr_t *)&segLOWESTAuxKC, (uintptr_t *)&segHIGHESTAuxKC, (uintptr_t *)&segLOWESTROAuxKC,
280 (uintptr_t *)&segHIGHESTROAuxKC, (uintptr_t *)&segLOWESTRXAuxKC, (uintptr_t *)&segHIGHESTRXAuxKC,
281 (uintptr_t *)&segHIGHESTNLEAuxKC);
282 #if defined(HAS_APPLE_PAC)
283 OSRuntimeSignStructorsInFileset(akc_mh);
284 #endif /* defined(HAS_APPLE_PAC) */
285 }
286
287 /*
288 * Routine: arm_init
289 * Function: Runs on the boot CPU, once, on entry from iBoot.
290 */
291
292 __startup_func
293 void
294 arm_init(
295 boot_args *args)
296 {
297 unsigned int maxmem;
298 uint32_t memsize;
299 uint64_t xmaxmem;
300 thread_t thread;
301
302 arm_slide_rebase_and_sign_image();
303
304 /* If kernel integrity is supported, use a constant copy of the boot args. */
305 const_boot_args = *args;
306 BootArgs = args = &const_boot_args;
307
308 cpu_data_init(&BootCpuData);
309 #if defined(HAS_APPLE_PAC)
310 /* bootstrap cpu process dependent key for kernel has been loaded by start.s */
311 BootCpuData.rop_key = KERNEL_ROP_ID;
312 BootCpuData.jop_key = ml_default_jop_pid();
313 #endif /* defined(HAS_APPLE_PAC) */
314
315 PE_init_platform(FALSE, args); /* Get platform expert set up */
316
317 #if __arm64__
318 wfe_timeout_configure();
319
320 configure_misc_apple_boot_args();
321 configure_misc_apple_regs();
322
323
324 {
325 /*
326 * Select the advertised kernel page size.
327 */
328 if (args->memSize > 1ULL * 1024 * 1024 * 1024) {
329 /*
330 * arm64 device with > 1GB of RAM:
331 * kernel uses 16KB pages.
332 */
333 PAGE_SHIFT_CONST = PAGE_MAX_SHIFT;
334 } else {
335 /*
336 * arm64 device with <= 1GB of RAM:
337 * kernel uses hardware page size
338 * (4KB for H6/H7, 16KB for H8+).
339 */
340 PAGE_SHIFT_CONST = ARM_PGSHIFT;
341 }
342
343 /* 32-bit apps always see 16KB page size */
344 page_shift_user32 = PAGE_MAX_SHIFT;
345 #ifdef APPLETYPHOON
346 if (PE_parse_boot_argn("cpus_defeatures", &cpus_defeatures, sizeof(cpus_defeatures))) {
347 if ((cpus_defeatures & 0xF) != 0) {
348 cpu_defeatures_set(cpus_defeatures & 0xF);
349 }
350 }
351 #endif
352 }
353 #endif
354
355 ml_parse_cpu_topology();
356
357 master_cpu = ml_get_boot_cpu_number();
358 assert(master_cpu >= 0 && master_cpu <= ml_get_max_cpu_number());
359
360 BootCpuData.cpu_number = (unsigned short)master_cpu;
361 #if __arm__
362 BootCpuData.cpu_exc_vectors = (vm_offset_t)&ExceptionVectorsTable;
363 #endif
364 BootCpuData.intstack_top = (vm_offset_t) &intstack_top;
365 BootCpuData.istackptr = BootCpuData.intstack_top;
366 #if __arm64__
367 BootCpuData.excepstack_top = (vm_offset_t) &excepstack_top;
368 BootCpuData.excepstackptr = BootCpuData.excepstack_top;
369 #else
370 BootCpuData.fiqstack_top = (vm_offset_t) &fiqstack_top;
371 BootCpuData.fiqstackptr = BootCpuData.fiqstack_top;
372 #endif
373 BootCpuData.cpu_console_buf = (void *)NULL;
374 CpuDataEntries[master_cpu].cpu_data_vaddr = &BootCpuData;
375 CpuDataEntries[master_cpu].cpu_data_paddr = (void *)((uintptr_t)(args->physBase)
376 + ((uintptr_t)&BootCpuData
377 - (uintptr_t)(args->virtBase)));
378
379 thread = thread_bootstrap();
380 thread->machine.CpuDatap = &BootCpuData;
381 thread->machine.pcpu_data_base = (vm_offset_t)0;
382 machine_set_current_thread(thread);
383
384 /*
385 * Preemption is enabled for this thread so that it can lock mutexes without
386 * tripping the preemption check. In reality scheduling is not enabled until
387 * this thread completes, and there are no other threads to switch to, so
388 * preemption level is not really meaningful for the bootstrap thread.
389 */
390 thread->machine.preemption_count = 0;
391 #if __arm__ && __ARM_USER_PROTECT__
392 {
393 unsigned int ttbr0_val, ttbr1_val;
394 __asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val));
395 __asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val));
396 thread->machine.uptw_ttb = ttbr0_val;
397 thread->machine.kptw_ttb = ttbr1_val;
398 }
399 #endif
400 processor_t boot_processor = PERCPU_GET_MASTER(processor);
401 boot_processor->kernel_timer = &thread->system_timer;
402 boot_processor->thread_timer = &thread->system_timer;
403
404 cpu_bootstrap();
405
406 rtclock_early_init();
407
408 kernel_debug_string_early("kernel_startup_bootstrap");
409 kernel_startup_bootstrap();
410
411 /*
412 * Initialize the timer callout world
413 */
414 timer_call_init();
415
416 cpu_init();
417
418 processor_bootstrap();
419
420 if (PE_parse_boot_argn("maxmem", &maxmem, sizeof(maxmem))) {
421 xmaxmem = (uint64_t) maxmem * (1024 * 1024);
422 } else if (PE_get_default("hw.memsize", &memsize, sizeof(memsize))) {
423 xmaxmem = (uint64_t) memsize;
424 } else {
425 xmaxmem = 0;
426 }
427
428 #if INTERRUPT_MASKED_DEBUG
429 int wdt_boot_arg = 0;
430 /* Disable if WDT is disabled or no_interrupt_mask_debug in boot-args */
431 if (PE_parse_boot_argn("no_interrupt_masked_debug", &interrupt_masked_debug,
432 sizeof(interrupt_masked_debug)) || (PE_parse_boot_argn("wdt", &wdt_boot_arg,
433 sizeof(wdt_boot_arg)) && (wdt_boot_arg == -1)) || kern_feature_override(KF_INTERRUPT_MASKED_DEBUG_OVRD)) {
434 interrupt_masked_debug = 0;
435 }
436
437 PE_parse_boot_argn("interrupt_masked_debug_timeout", &interrupt_masked_timeout, sizeof(interrupt_masked_timeout));
438 #endif
439
440 #if HAS_BP_RET
441 PE_parse_boot_argn("bpret", &bp_ret, sizeof(bp_ret));
442 set_bp_ret(); // Apply branch predictor retention settings to boot CPU
443 #endif
444
445 PE_parse_boot_argn("immediate_NMI", &force_immediate_debug_halt, sizeof(force_immediate_debug_halt));
446
447 #if __ARM_PAN_AVAILABLE__
448 __builtin_arm_wsr("pan", 1);
449 #endif /* __ARM_PAN_AVAILABLE__ */
450
451 arm_vm_init(xmaxmem, args);
452
453 if (debug_boot_arg) {
454 patch_low_glo();
455 }
456
457 #if __arm64__ && WITH_CLASSIC_S2R
458 sleep_token_buffer_init();
459 #endif
460
461 PE_consistent_debug_inherit();
462
463 /*
464 * rdar://54622819 Insufficient HSP purge window can cause incorrect translation when ASID and TTBR base address is changed at same time)
465 * (original info on HSP purge window issues can be found in rdar://55577508)
466 * We need a flag to check for this, so calculate and set it here. We'll use it in machine_switch_amx_context().
467 */
468 #if __arm64__
469 need_wa_rdar_55577508 = cpuid_get_cpufamily() == CPUFAMILY_ARM_LIGHTNING_THUNDER;
470 #ifndef RC_HIDE_XNU_FIRESTORM
471 need_wa_rdar_55577508 |= (cpuid_get_cpufamily() == CPUFAMILY_ARM_FIRESTORM_ICESTORM && get_arm_cpu_version() == CPU_VERSION_A0);
472 #endif
473 #endif
474
475 /* setup debugging output if one has been chosen */
476 kernel_startup_initialize_upto(STARTUP_SUB_KPRINTF);
477 kprintf("kprintf initialized\n");
478
479 serialmode = 0;
480 if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) {
481 /* Do we want a serial keyboard and/or console? */
482 kprintf("Serial mode specified: %08X\n", serialmode);
483 int force_sync = serialmode & SERIALMODE_SYNCDRAIN;
484 if (force_sync || PE_parse_boot_argn("drain_uart_sync", &force_sync, sizeof(force_sync))) {
485 if (force_sync) {
486 serialmode |= SERIALMODE_SYNCDRAIN;
487 kprintf(
488 "WARNING: Forcing uart driver to output synchronously."
489 "printf()s/IOLogs will impact kernel performance.\n"
490 "You are advised to avoid using 'drain_uart_sync' boot-arg.\n");
491 }
492 }
493 }
494 if (kern_feature_override(KF_SERIAL_OVRD)) {
495 serialmode = 0;
496 }
497
498 if (serialmode & SERIALMODE_OUTPUT) { /* Start serial if requested */
499 (void)switch_to_serial_console(); /* Switch into serial mode */
500 disableConsoleOutput = FALSE; /* Allow printfs to happen */
501 }
502 PE_create_console();
503
504 /* setup console output */
505 PE_init_printf(FALSE);
506
507 #if __arm64__
508 #if DEBUG
509 dump_kva_space();
510 #endif
511 #endif
512
513 cpu_machine_idle_init(TRUE);
514
515 #if (__ARM_ARCH__ == 7)
516 if (arm_diag & 0x8000) {
517 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC);
518 }
519 #endif
520
521 PE_init_platform(TRUE, &BootCpuData);
522
523 #if __arm64__
524 ml_map_cpu_pio();
525 #endif
526
527 cpu_timebase_init(TRUE);
528 PE_init_cpu();
529 fiq_context_init(TRUE);
530
531
532 #if HIBERNATION
533 pal_hib_init();
534 #endif /* HIBERNATION */
535
536 /*
537 * gPhysBase/Size only represent kernel-managed memory. These globals represent
538 * the actual DRAM base address and size as reported by iBoot through the
539 * device tree.
540 */
541 DTEntry chosen;
542 unsigned int dt_entry_size;
543 unsigned long const *dram_base;
544 unsigned long const *dram_size;
545 if (SecureDTLookupEntry(NULL, "/chosen", &chosen) != kSuccess) {
546 panic("%s: Unable to find 'chosen' DT node", __FUNCTION__);
547 }
548
549 if (SecureDTGetProperty(chosen, "dram-base", (void const **)&dram_base, &dt_entry_size) != kSuccess) {
550 panic("%s: Unable to find 'dram-base' entry in the 'chosen' DT node", __FUNCTION__);
551 }
552
553 if (SecureDTGetProperty(chosen, "dram-size", (void const **)&dram_size, &dt_entry_size) != kSuccess) {
554 panic("%s: Unable to find 'dram-size' entry in the 'chosen' DT node", __FUNCTION__);
555 }
556
557 gDramBase = *dram_base;
558 gDramSize = *dram_size;
559
560 /*
561 * Initialize the stack protector for all future calls
562 * to C code. Since kernel_bootstrap() eventually
563 * switches stack context without returning through this
564 * function, we do not risk failing the check even though
565 * we mutate the guard word during execution.
566 */
567 __stack_chk_guard = (unsigned long)early_random();
568 /* Zero a byte of the protector to guard
569 * against string vulnerabilities
570 */
571 __stack_chk_guard &= ~(0xFFULL << 8);
572 machine_startup(args);
573 }
574
575 /*
576 * Routine: arm_init_cpu
577 * Function:
578 * Runs on S2R resume (all CPUs) and SMP boot (non-boot CPUs only).
579 */
580
581 void
582 arm_init_cpu(
583 cpu_data_t *cpu_data_ptr)
584 {
585 #if __ARM_PAN_AVAILABLE__
586 __builtin_arm_wsr("pan", 1);
587 #endif
588
589 #ifdef __arm64__
590 configure_misc_apple_regs();
591 #endif
592
593 cpu_data_ptr->cpu_flags &= ~SleepState;
594 #if defined(ARMA7)
595 cpu_data_ptr->cpu_CLW_active = 1;
596 #endif
597
598 machine_set_current_thread(cpu_data_ptr->cpu_active_thread);
599
600 #if HIBERNATION
601 if ((cpu_data_ptr == &BootCpuData) && (gIOHibernateState == kIOHibernateStateWakingFromHibernate)) {
602 // the "normal" S2R code captures wake_abstime too early, so on a hibernation resume we fix it up here
603 extern uint64_t wake_abstime;
604 wake_abstime = gIOHibernateCurrentHeader->lastHibAbsTime;
605
606 // since the hw clock stops ticking across hibernation, we need to apply an offset;
607 // iBoot computes this offset for us and passes it via the hibernation header
608 extern uint64_t hwclock_conttime_offset;
609 hwclock_conttime_offset = gIOHibernateCurrentHeader->hwClockOffset;
610
611 // during hibernation, we captured the idle thread's state from inside the PPL context, so we have to
612 // fix up its preemption count
613 unsigned int expected_preemption_count = (gEnforceQuiesceSafety ? 2 : 1);
614 if (cpu_data_ptr->cpu_active_thread->machine.preemption_count != expected_preemption_count) {
615 panic("unexpected preemption count %u on boot cpu thread (should be %u)\n",
616 cpu_data_ptr->cpu_active_thread->machine.preemption_count,
617 expected_preemption_count);
618 }
619 cpu_data_ptr->cpu_active_thread->machine.preemption_count--;
620 }
621 #endif /* HIBERNATION */
622
623 #if __arm64__
624 wfe_timeout_init();
625 pmap_clear_user_ttb();
626 flush_mmu_tlb();
627 #endif
628
629 cpu_machine_idle_init(FALSE);
630
631 cpu_init();
632
633 #if (__ARM_ARCH__ == 7)
634 if (arm_diag & 0x8000) {
635 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC);
636 }
637 #endif
638 #ifdef APPLETYPHOON
639 if ((cpus_defeatures & (0xF << 4 * cpu_data_ptr->cpu_number)) != 0) {
640 cpu_defeatures_set((cpus_defeatures >> 4 * cpu_data_ptr->cpu_number) & 0xF);
641 }
642 #endif
643 /* Initialize the timebase before serial_init, as some serial
644 * drivers use mach_absolute_time() to implement rate control
645 */
646 cpu_timebase_init(FALSE);
647
648 if (cpu_data_ptr == &BootCpuData) {
649 #if __arm64__ && __ARM_GLOBAL_SLEEP_BIT__
650 /*
651 * Prevent CPUs from going into deep sleep until all
652 * CPUs are ready to do so.
653 */
654 arm64_stall_sleep = TRUE;
655 #endif
656 serial_init();
657 PE_init_platform(TRUE, NULL);
658 commpage_update_timebase();
659 }
660 PE_init_cpu();
661
662 fiq_context_init(TRUE);
663 cpu_data_ptr->rtcPop = EndOfAllTime;
664 timer_resync_deadlines();
665
666 #if DEVELOPMENT || DEBUG
667 PE_arm_debug_enable_trace();
668 #endif
669
670
671 kprintf("arm_cpu_init(): cpu %d online\n", cpu_data_ptr->cpu_number);
672
673 if (cpu_data_ptr == &BootCpuData) {
674 if (kdebug_enable == 0) {
675 __kdebug_only uint64_t elapsed = kdebug_wake();
676 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 15), mach_absolute_time() - elapsed);
677 }
678
679 #if CONFIG_TELEMETRY
680 bootprofile_wake_from_sleep();
681 #endif /* CONFIG_TELEMETRY */
682 }
683 #if MONOTONIC && defined(__arm64__)
684 mt_wake_per_core();
685 #endif /* MONOTONIC && defined(__arm64__) */
686
687 #if defined(KERNEL_INTEGRITY_CTRR)
688 if (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKED) {
689 lck_spin_lock(&ctrr_cpu_start_lck);
690 ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKED;
691 thread_wakeup(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]);
692 lck_spin_unlock(&ctrr_cpu_start_lck);
693 }
694 #endif
695
696 slave_main(NULL);
697 }
698
699 /*
700 * Routine: arm_init_idle_cpu
701 * Function: Resume from non-retention WFI. Called from the reset vector.
702 */
703 void __attribute__((noreturn))
704 arm_init_idle_cpu(
705 cpu_data_t *cpu_data_ptr)
706 {
707 #if __ARM_PAN_AVAILABLE__
708 __builtin_arm_wsr("pan", 1);
709 #endif
710 #if defined(ARMA7)
711 cpu_data_ptr->cpu_CLW_active = 1;
712 #endif
713
714 machine_set_current_thread(cpu_data_ptr->cpu_active_thread);
715
716 #if __arm64__
717 wfe_timeout_init();
718 pmap_clear_user_ttb();
719 flush_mmu_tlb();
720 /* Enable asynchronous exceptions */
721 __builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF);
722 #endif
723
724 #if (__ARM_ARCH__ == 7)
725 if (arm_diag & 0x8000) {
726 set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC);
727 }
728 #endif
729 #ifdef APPLETYPHOON
730 if ((cpus_defeatures & (0xF << 4 * cpu_data_ptr->cpu_number)) != 0) {
731 cpu_defeatures_set((cpus_defeatures >> 4 * cpu_data_ptr->cpu_number) & 0xF);
732 }
733 #endif
734
735 fiq_context_init(FALSE);
736
737 cpu_idle_exit(TRUE);
738 }