]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/cpu.c
2360e698264a7eaedcdc6aa166d8c748b4be786f
[apple/xnu.git] / osfmk / arm64 / cpu.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <kern/timer_queue.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpuid.h>
42 #include <arm/caches_internal.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/cpu_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/machine_cpu.h>
47 #include <arm/rtclock.h>
48 #include <arm64/proc_reg.h>
49 #include <mach/processor_info.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <pexpert/arm/protos.h>
54 #include <pexpert/device_tree.h>
55 #include <sys/kdebug.h>
56 #include <arm/machine_routines.h>
57
58 #include <machine/atomic.h>
59
60 #include <san/kasan.h>
61
62 #if KPC
63 #include <kern/kpc.h>
64 #endif
65
66 #if MONOTONIC
67 #include <kern/monotonic.h>
68 #endif /* MONOTONIC */
69
70 extern boolean_t idle_enable;
71 extern uint64_t wake_abstime;
72
73 #if WITH_CLASSIC_S2R
74 void sleep_token_buffer_init(void);
75 #endif
76
77
78 extern uintptr_t resume_idle_cpu;
79 extern uintptr_t start_cpu;
80
81 #if __ARM_KERNEL_PROTECT__
82 extern void exc_vectors_table;
83 #endif /* __ARM_KERNEL_PROTECT__ */
84
85 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(void);
86 extern void arm64_force_wfi_clock_gate(void);
87 #if defined(APPLETYPHOON)
88 // <rdar://problem/15827409>
89 extern void typhoon_prepare_for_wfi(void);
90 extern void typhoon_return_from_wfi(void);
91 #endif
92
93
94 vm_address_t start_cpu_paddr;
95
96 sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
97 .tcr_el1 = TCR_EL1_BOOT,
98 };
99
100
101 // wfi - wfi mode
102 // 0 : disabled
103 // 1 : normal
104 // 2 : overhead simulation (delay & flags)
105 static int wfi = 1;
106
107 #if DEVELOPMENT || DEBUG
108
109 // wfi_flags
110 // 1 << 0 : flush L1s
111 // 1 << 1 : flush TLBs
112 static int wfi_flags = 0;
113
114 // wfi_delay - delay ticks after wfi exit
115 static uint64_t wfi_delay = 0;
116
117 #endif /* DEVELOPMENT || DEBUG */
118
119 static bool idle_wfe_to_deadline = false;
120
121 #if __ARM_GLOBAL_SLEEP_BIT__
122 volatile boolean_t arm64_stall_sleep = TRUE;
123 #endif
124
125 #if WITH_CLASSIC_S2R
126 /*
127 * These must be aligned to avoid issues with calling bcopy_phys on them before
128 * we are done with pmap initialization.
129 */
130 static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
131 static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
132 #endif
133
134 #if WITH_CLASSIC_S2R
135 static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
136 #endif
137 static boolean_t coresight_debug_enabled = FALSE;
138
139 #if defined(CONFIG_XNUPOST)
140 void arm64_ipi_test_callback(void *);
141 void arm64_immediate_ipi_test_callback(void *);
142
143 void
144 arm64_ipi_test_callback(void *parm)
145 {
146 volatile uint64_t *ipi_test_data = parm;
147 cpu_data_t *cpu_data;
148
149 cpu_data = getCpuDatap();
150
151 *ipi_test_data = cpu_data->cpu_number;
152 }
153
154 void
155 arm64_immediate_ipi_test_callback(void *parm)
156 {
157 volatile uint64_t *ipi_test_data = parm;
158 cpu_data_t *cpu_data;
159
160 cpu_data = getCpuDatap();
161
162 *ipi_test_data = cpu_data->cpu_number + MAX_CPUS;
163 }
164
165 uint64_t arm64_ipi_test_data[MAX_CPUS * 2];
166
167 void
168 arm64_ipi_test()
169 {
170 volatile uint64_t *ipi_test_data, *immediate_ipi_test_data;
171 uint32_t timeout_ms = 100;
172 uint64_t then, now, delta;
173 int current_cpu_number = getCpuDatap()->cpu_number;
174
175 /*
176 * probably the only way to have this on most systems is with the
177 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
178 * IPI is not available
179 */
180 if (real_ncpus == 1) {
181 return;
182 }
183
184 for (unsigned int i = 0; i < MAX_CPUS; ++i) {
185 ipi_test_data = &arm64_ipi_test_data[i];
186 immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS];
187 *ipi_test_data = ~i;
188 kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
189 if (error != KERN_SUCCESS) {
190 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
191 }
192
193 while ((error = cpu_immediate_xcall((int)i, (void *)arm64_immediate_ipi_test_callback,
194 (void *)(uintptr_t)immediate_ipi_test_data)) == KERN_ALREADY_WAITING) {
195 now = mach_absolute_time();
196 absolutetime_to_nanoseconds(now - then, &delta);
197 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
198 panic("CPU %d was unable to immediate-IPI CPU %u within %dms", current_cpu_number, i, timeout_ms);
199 }
200 }
201
202 if (error != KERN_SUCCESS) {
203 panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number, i, error);
204 }
205
206 then = mach_absolute_time();
207
208 while ((*ipi_test_data != i) || (*immediate_ipi_test_data != (i + MAX_CPUS))) {
209 now = mach_absolute_time();
210 absolutetime_to_nanoseconds(now - then, &delta);
211 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
212 panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %dms, responses: %llx, %llx",
213 current_cpu_number, i, timeout_ms, *ipi_test_data, *immediate_ipi_test_data);
214 }
215 }
216 }
217 }
218 #endif /* defined(CONFIG_XNUPOST) */
219
220 static void
221 configure_coresight_registers(cpu_data_t *cdp)
222 {
223 uint64_t addr;
224 int i;
225
226 assert(cdp);
227
228 /*
229 * ARMv8 coresight registers are optional. If the device tree did not
230 * provide cpu_regmap_paddr, assume that coresight registers are not
231 * supported.
232 */
233 if (cdp->cpu_regmap_paddr) {
234 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
235 /* Skip CTI; these registers are debug-only (they are
236 * not present on production hardware), and there is
237 * at least one known Cyclone errata involving CTI
238 * (rdar://12802966). We have no known clients that
239 * need the kernel to unlock CTI, so it is safer
240 * to avoid doing the access.
241 */
242 if (i == CORESIGHT_CTI) {
243 continue;
244 }
245 /* Skip debug-only registers on production chips */
246 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) {
247 continue;
248 }
249
250 if (!cdp->coresight_base[i]) {
251 addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
252 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
253
254 /*
255 * At this point, failing to io map the
256 * registers is considered as an error.
257 */
258 if (!cdp->coresight_base[i]) {
259 panic("unable to ml_io_map coresight regions");
260 }
261 }
262 /* Unlock EDLAR, CTILAR, PMLAR */
263 if (i != CORESIGHT_UTT) {
264 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
265 }
266 }
267 }
268 }
269
270
271 /*
272 * Routine: cpu_bootstrap
273 * Function:
274 */
275 void
276 cpu_bootstrap(void)
277 {
278 }
279
280 /*
281 * Routine: cpu_sleep
282 * Function:
283 */
284 void
285 cpu_sleep(void)
286 {
287 cpu_data_t *cpu_data_ptr = getCpuDatap();
288
289 pmap_switch_user_ttb(kernel_pmap);
290 cpu_data_ptr->cpu_active_thread = current_thread();
291 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
292 cpu_data_ptr->cpu_flags |= SleepState;
293 cpu_data_ptr->cpu_user_debug = NULL;
294 #if KPC
295 kpc_idle();
296 #endif /* KPC */
297 #if MONOTONIC
298 mt_cpu_down(cpu_data_ptr);
299 #endif /* MONOTONIC */
300
301 CleanPoC_Dcache();
302
303 /* This calls:
304 *
305 * IOCPURunPlatformQuiesceActions when sleeping the boot cpu
306 * ml_arm_sleep() on all CPUs
307 *
308 * It does not return.
309 */
310 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
311 /*NOTREACHED*/
312 }
313
314 /*
315 * Routine: cpu_interrupt_is_pending
316 * Function: Returns the value of ISR. Due to how this register is
317 * is implemented, this returns 0 if there are no
318 * interrupts pending, so it can be used as a boolean test.
319 */
320 static int
321 cpu_interrupt_is_pending(void)
322 {
323 uint64_t isr_value;
324 isr_value = __builtin_arm_rsr64("ISR_EL1");
325 return (int)isr_value;
326 }
327
328 /*
329 * Routine: cpu_idle
330 * Function:
331 */
332 void __attribute__((noreturn))
333 cpu_idle(void)
334 {
335 cpu_data_t *cpu_data_ptr = getCpuDatap();
336 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
337
338 if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) {
339 Idle_load_context();
340 }
341
342 if (!SetIdlePop()) {
343 /* If a deadline is pending, wait for it to elapse. */
344 if (idle_wfe_to_deadline) {
345 if (arm64_wfe_allowed()) {
346 while (!cpu_interrupt_is_pending()) {
347 __builtin_arm_wfe();
348 }
349 }
350 }
351
352 Idle_load_context();
353 }
354
355 lastPop = cpu_data_ptr->rtcPop;
356
357 pmap_switch_user_ttb(kernel_pmap);
358 cpu_data_ptr->cpu_active_thread = current_thread();
359 if (cpu_data_ptr->cpu_user_debug) {
360 arm_debug_set(NULL);
361 }
362 cpu_data_ptr->cpu_user_debug = NULL;
363
364 if (cpu_data_ptr->cpu_idle_notify) {
365 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
366 }
367
368 if (cpu_data_ptr->idle_timer_notify != 0) {
369 if (new_idle_timeout_ticks == 0x0ULL) {
370 /* turn off the idle timer */
371 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
372 } else {
373 /* set the new idle timeout */
374 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
375 }
376 timer_resync_deadlines();
377 if (cpu_data_ptr->rtcPop != lastPop) {
378 SetIdlePop();
379 }
380 }
381
382 #if KPC
383 kpc_idle();
384 #endif
385 #if MONOTONIC
386 mt_cpu_idle(cpu_data_ptr);
387 #endif /* MONOTONIC */
388
389 if (wfi) {
390 platform_cache_idle_enter();
391
392 #if DEVELOPMENT || DEBUG
393 // When simulating wfi overhead,
394 // force wfi to clock gating only
395 if (wfi == 2) {
396 arm64_force_wfi_clock_gate();
397 }
398 #endif /* DEVELOPMENT || DEBUG */
399
400 #if defined(APPLETYPHOON)
401 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
402 typhoon_prepare_for_wfi();
403 #endif
404 __builtin_arm_dsb(DSB_SY);
405 __builtin_arm_wfi();
406
407 #if defined(APPLETYPHOON)
408 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
409 typhoon_return_from_wfi();
410 #endif
411
412 #if DEVELOPMENT || DEBUG
413 // Handle wfi overhead simulation
414 if (wfi == 2) {
415 uint64_t deadline;
416
417 // Calculate wfi delay deadline
418 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
419
420 // Flush L1 caches
421 if ((wfi_flags & 1) != 0) {
422 InvalidatePoU_Icache();
423 FlushPoC_Dcache();
424 }
425
426 // Flush TLBs
427 if ((wfi_flags & 2) != 0) {
428 flush_core_tlb();
429 }
430
431 // Wait for the ballance of the wfi delay
432 clock_delay_until(deadline);
433 }
434 #endif /* DEVELOPMENT || DEBUG */
435
436 platform_cache_idle_exit();
437 }
438
439 ClearIdlePop(TRUE);
440
441 cpu_idle_exit(FALSE);
442 }
443
444 /*
445 * Routine: cpu_idle_exit
446 * Function:
447 */
448 void
449 cpu_idle_exit(boolean_t from_reset)
450 {
451 uint64_t new_idle_timeout_ticks = 0x0ULL;
452 cpu_data_t *cpu_data_ptr = getCpuDatap();
453
454 assert(exception_stack_pointer() != 0);
455
456 /* Back from WFI, unlock OSLAR and EDLAR. */
457 if (from_reset) {
458 configure_coresight_registers(cpu_data_ptr);
459 }
460
461 #if KPC
462 kpc_idle_exit();
463 #endif
464
465 #if MONOTONIC
466 mt_cpu_run(cpu_data_ptr);
467 #endif /* MONOTONIC */
468
469 pmap_switch_user_ttb(cpu_data_ptr->cpu_active_thread->map->pmap);
470
471 if (cpu_data_ptr->cpu_idle_notify) {
472 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
473 }
474
475 if (cpu_data_ptr->idle_timer_notify != 0) {
476 if (new_idle_timeout_ticks == 0x0ULL) {
477 /* turn off the idle timer */
478 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
479 } else {
480 /* set the new idle timeout */
481 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
482 }
483 timer_resync_deadlines();
484 }
485
486 Idle_load_context();
487 }
488
489 void
490 cpu_init(void)
491 {
492 cpu_data_t *cdp = getCpuDatap();
493 arm_cpu_info_t *cpu_info_p;
494
495 assert(exception_stack_pointer() != 0);
496
497 if (cdp->cpu_type != CPU_TYPE_ARM64) {
498 cdp->cpu_type = CPU_TYPE_ARM64;
499
500 timer_call_queue_init(&cdp->rtclock_timer.queue);
501 cdp->rtclock_timer.deadline = EndOfAllTime;
502
503 if (cdp == &BootCpuData) {
504 do_cpuid();
505 do_cacheid();
506 do_mvfpid();
507 } else {
508 /*
509 * We initialize non-boot CPUs here; the boot CPU is
510 * dealt with as part of pmap_bootstrap.
511 */
512 pmap_cpu_data_init();
513 }
514 /* ARM_SMP: Assuming identical cpu */
515 do_debugid();
516
517 cpu_info_p = cpuid_info();
518
519 /* switch based on CPU's reported architecture */
520 switch (cpu_info_p->arm_info.arm_arch) {
521 case CPU_ARCH_ARMv8:
522 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
523 break;
524 default:
525 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
526 /* this panic doesn't work this early in startup */
527 panic("Unknown CPU subtype...");
528 break;
529 }
530
531 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
532 }
533 cdp->cpu_stat.irq_ex_cnt_wake = 0;
534 cdp->cpu_stat.ipi_cnt_wake = 0;
535 cdp->cpu_stat.timer_cnt_wake = 0;
536 #if MONOTONIC
537 cdp->cpu_stat.pmi_cnt_wake = 0;
538 #endif /* MONOTONIC */
539 cdp->cpu_running = TRUE;
540 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
541 cdp->cpu_sleep_token = 0x0UL;
542 #if KPC
543 kpc_idle_exit();
544 #endif /* KPC */
545 #if MONOTONIC
546 mt_cpu_up(cdp);
547 #endif /* MONOTONIC */
548 }
549
550 void
551 cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
552 {
553 vm_offset_t irq_stack = 0;
554 vm_offset_t exc_stack = 0;
555
556 kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
557 INTSTACK_SIZE + (2 * PAGE_SIZE),
558 PAGE_MASK,
559 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
560 VM_KERN_MEMORY_STACK);
561 if (kr != KERN_SUCCESS) {
562 panic("Unable to allocate cpu interrupt stack\n");
563 }
564
565 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
566 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
567
568 kr = kernel_memory_allocate(kernel_map, &exc_stack,
569 EXCEPSTACK_SIZE + (2 * PAGE_SIZE),
570 PAGE_MASK,
571 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
572 VM_KERN_MEMORY_STACK);
573 if (kr != KERN_SUCCESS) {
574 panic("Unable to allocate cpu exception stack\n");
575 }
576
577 cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
578 cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
579 }
580
581 void
582 cpu_data_free(cpu_data_t *cpu_data_ptr)
583 {
584 if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) {
585 return;
586 }
587
588 cpu_processor_free( cpu_data_ptr->cpu_processor);
589 if (CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr == cpu_data_ptr) {
590 CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr = NULL;
591 CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_paddr = 0;
592 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible
593 }
594 (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
595 (kfree)((void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE);
596 kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t));
597 }
598
599 void
600 cpu_data_init(cpu_data_t *cpu_data_ptr)
601 {
602 uint32_t i;
603
604 cpu_data_ptr->cpu_flags = 0;
605 cpu_data_ptr->interrupts_enabled = 0;
606 cpu_data_ptr->cpu_int_state = 0;
607 cpu_data_ptr->cpu_pending_ast = AST_NONE;
608 cpu_data_ptr->cpu_cache_dispatch = (void *) 0;
609 cpu_data_ptr->rtcPop = EndOfAllTime;
610 cpu_data_ptr->rtclock_datap = &RTClockData;
611 cpu_data_ptr->cpu_user_debug = NULL;
612
613
614 cpu_data_ptr->cpu_base_timebase = 0;
615 cpu_data_ptr->cpu_idle_notify = (void *) 0;
616 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
617 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
618 cpu_data_ptr->cpu_reset_type = 0x0UL;
619 cpu_data_ptr->cpu_reset_handler = 0x0UL;
620 cpu_data_ptr->cpu_reset_assist = 0x0UL;
621 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
622 cpu_data_ptr->cpu_phys_id = 0x0UL;
623 cpu_data_ptr->cpu_l2_access_penalty = 0;
624 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
625 cpu_data_ptr->cpu_cluster_id = 0;
626 cpu_data_ptr->cpu_l2_id = 0;
627 cpu_data_ptr->cpu_l2_size = 0;
628 cpu_data_ptr->cpu_l3_id = 0;
629 cpu_data_ptr->cpu_l3_size = 0;
630
631 cpu_data_ptr->cpu_signal = SIGPdisabled;
632
633 cpu_data_ptr->cpu_get_fiq_handler = NULL;
634 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
635 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
636 cpu_data_ptr->cpu_get_decrementer_func = NULL;
637 cpu_data_ptr->cpu_set_decrementer_func = NULL;
638 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
639 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
640 cpu_data_ptr->cpu_xcall_p0 = NULL;
641 cpu_data_ptr->cpu_xcall_p1 = NULL;
642 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
643 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
644
645 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
646 cpu_data_ptr->coresight_base[i] = 0;
647 }
648
649 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
650
651 pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
652 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
653
654 for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) {
655 pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0;
656 }
657 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
658 #if __ARM_KERNEL_PROTECT__
659 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
660 #endif /* __ARM_KERNEL_PROTECT__ */
661
662 #if defined(HAS_APPLE_PAC)
663 cpu_data_ptr->rop_key = 0;
664 #endif
665 }
666
667 kern_return_t
668 cpu_data_register(cpu_data_t *cpu_data_ptr)
669 {
670 int cpu = cpu_data_ptr->cpu_number;
671
672 #if KASAN
673 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
674 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
675 }
676 #endif
677
678 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
679 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
680 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
681 return KERN_SUCCESS;
682 }
683
684
685 kern_return_t
686 cpu_start(int cpu)
687 {
688 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
689
690 kprintf("cpu_start() cpu: %d\n", cpu);
691
692 if (cpu == cpu_number()) {
693 cpu_machine_init();
694 configure_coresight_registers(cpu_data_ptr);
695 } else {
696 thread_t first_thread;
697
698 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
699
700 cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
701
702 if (cpu_data_ptr->cpu_processor->startup_thread != THREAD_NULL) {
703 first_thread = cpu_data_ptr->cpu_processor->startup_thread;
704 } else {
705 first_thread = cpu_data_ptr->cpu_processor->idle_thread;
706 }
707 cpu_data_ptr->cpu_active_thread = first_thread;
708 first_thread->machine.CpuDatap = cpu_data_ptr;
709
710 configure_coresight_registers(cpu_data_ptr);
711
712 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
713 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
714 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
715 }
716
717 return KERN_SUCCESS;
718 }
719
720
721 void
722 cpu_timebase_init(boolean_t from_boot)
723 {
724 cpu_data_t *cdp = getCpuDatap();
725
726 if (cdp->cpu_get_fiq_handler == NULL) {
727 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
728 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
729 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
730 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
731 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
732 }
733
734 if (!from_boot && (cdp == &BootCpuData)) {
735 /*
736 * When we wake from sleep, we have no guarantee about the state
737 * of the hardware timebase. It may have kept ticking across sleep, or
738 * it may have reset.
739 *
740 * To deal with this, we calculate an offset to the clock that will
741 * produce a timebase value wake_abstime at the point the boot
742 * CPU calls cpu_timebase_init on wake.
743 *
744 * This ensures that mach_absolute_time() stops ticking across sleep.
745 */
746 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
747 } else if (from_boot) {
748 /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
749 ml_set_reset_time(ml_get_hwclock());
750 }
751
752 cdp->cpu_decrementer = 0x7FFFFFFFUL;
753 cdp->cpu_timebase = 0x0UL;
754 cdp->cpu_base_timebase = rtclock_base_abstime;
755 }
756
757 int
758 cpu_cluster_id(void)
759 {
760 return getCpuDatap()->cpu_cluster_id;
761 }
762
763 __attribute__((noreturn))
764 void
765 ml_arm_sleep(void)
766 {
767 cpu_data_t *cpu_data_ptr = getCpuDatap();
768
769 if (cpu_data_ptr == &BootCpuData) {
770 cpu_data_t *target_cdp;
771 int cpu;
772 int max_cpu;
773
774 max_cpu = ml_get_max_cpu_number();
775 for (cpu = 0; cpu <= max_cpu; cpu++) {
776 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
777
778 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) {
779 continue;
780 }
781
782 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
783 ;
784 }
785 }
786
787 /*
788 * Now that the other cores have entered the sleep path, set
789 * the abstime value we'll use when we resume.
790 */
791 wake_abstime = ml_get_timebase();
792 ml_set_reset_time(UINT64_MAX);
793 } else {
794 CleanPoU_Dcache();
795 }
796
797 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
798
799 if (cpu_data_ptr == &BootCpuData) {
800 #if WITH_CLASSIC_S2R
801 // Classic suspend to RAM writes the suspend signature into the
802 // sleep token buffer so that iBoot knows that it's on the warm
803 // boot (wake) path (as opposed to the cold boot path). Newer SoC
804 // do not go through SecureROM/iBoot on the warm boot path. The
805 // reconfig engine script brings the CPU out of reset at the kernel's
806 // reset vector which points to the warm boot initialization code.
807 if (sleepTokenBuffer != (vm_offset_t) NULL) {
808 platform_cache_shutdown();
809 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
810 } else {
811 panic("No sleep token buffer");
812 }
813 #endif
814
815 #if __ARM_GLOBAL_SLEEP_BIT__
816 /* Allow other CPUs to go to sleep. */
817 arm64_stall_sleep = FALSE;
818 __builtin_arm_dmb(DMB_ISH);
819 #endif
820
821 /* Architectural debug state: <rdar://problem/12390433>:
822 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
823 * tell debugger to not prevent power gating .
824 */
825 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
826 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
827 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
828 }
829
830 #if MONOTONIC
831 mt_sleep();
832 #endif /* MONOTONIC */
833 /* ARM64-specific preparation */
834 arm64_prepare_for_sleep();
835 } else {
836 #if __ARM_GLOBAL_SLEEP_BIT__
837 /*
838 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
839 * global register to manage entering deep sleep, as opposed to a per-CPU
840 * register. We cannot update this register until all CPUs are ready to enter
841 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
842 * (by idling), it will hang (due to the side effects of enabling deep sleep),
843 * which can hang the sleep process or cause memory corruption on wake.
844 *
845 * To avoid these issues, we'll stall on this global value, which CPU0 will
846 * manage.
847 */
848 while (arm64_stall_sleep) {
849 __builtin_arm_wfe();
850 }
851 #endif
852 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
853
854 /* Architectural debug state: <rdar://problem/12390433>:
855 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
856 * tell debugger to not prevent power gating .
857 */
858 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
859 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
860 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
861 }
862
863 /* ARM64-specific preparation */
864 arm64_prepare_for_sleep();
865 }
866 }
867
868 void
869 cpu_machine_idle_init(boolean_t from_boot)
870 {
871 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
872 cpu_data_t *cpu_data_ptr = getCpuDatap();
873
874 if (from_boot) {
875 unsigned long jtag = 0;
876 int wfi_tmp = 1;
877 uint32_t production = 1;
878 DTEntry entry;
879
880 if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
881 if (jtag != 0) {
882 idle_enable = FALSE;
883 } else {
884 idle_enable = TRUE;
885 }
886 } else {
887 idle_enable = TRUE;
888 }
889
890 PE_parse_boot_argn("wfi", &wfi_tmp, sizeof(wfi_tmp));
891
892 // bits 7..0 give the wfi type
893 switch (wfi_tmp & 0xff) {
894 case 0:
895 // disable wfi
896 wfi = 0;
897 break;
898
899 #if DEVELOPMENT || DEBUG
900 case 2:
901 // wfi overhead simulation
902 // 31..16 - wfi delay is us
903 // 15..8 - flags
904 // 7..0 - 2
905 wfi = 2;
906 wfi_flags = (wfi_tmp >> 8) & 0xFF;
907 nanoseconds_to_absolutetime(((wfi_tmp >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
908 break;
909 #endif /* DEVELOPMENT || DEBUG */
910
911 case 1:
912 default:
913 // do nothing
914 break;
915 }
916
917 PE_parse_boot_argn("idle_wfe_to_deadline", &idle_wfe_to_deadline, sizeof(idle_wfe_to_deadline));
918
919 ResetHandlerData.assist_reset_handler = 0;
920 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
921
922 #ifdef MONITOR
923 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
924 #elif !defined(NO_MONITOR)
925 #error MONITOR undefined, WFI power gating may not operate correctly
926 #endif /* MONITOR */
927
928 // Determine if we are on production or debug chip
929 if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) {
930 unsigned int size;
931 void *prop;
932
933 if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size)) {
934 if (size == 4) {
935 bcopy(prop, &production, size);
936 }
937 }
938 }
939 if (!production) {
940 #if defined(APPLE_ARM64_ARCH_FAMILY)
941 // Enable coresight debug registers on debug-fused chips
942 coresight_debug_enabled = TRUE;
943 #endif
944 }
945
946 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
947 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
948 }
949
950 #if WITH_CLASSIC_S2R
951 if (cpu_data_ptr == &BootCpuData) {
952 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
953 if (sleepTokenBuffer != (vm_offset_t) NULL) {
954 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
955 } else {
956 panic("No sleep token buffer");
957 }
958
959 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
960 SleepToken_low_paddr, sizeof(SleepToken));
961 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
962 }
963 ;
964 #endif
965
966 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
967 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
968 }
969
970 _Atomic uint32_t cpu_idle_count = 0;
971
972 void
973 machine_track_platform_idle(boolean_t entry)
974 {
975 if (entry) {
976 os_atomic_inc(&cpu_idle_count, relaxed);
977 } else {
978 os_atomic_dec(&cpu_idle_count, relaxed);
979 }
980 }
981
982 #if WITH_CLASSIC_S2R
983 void
984 sleep_token_buffer_init(void)
985 {
986 cpu_data_t *cpu_data_ptr = getCpuDatap();
987 DTEntry entry;
988 size_t size;
989 void **prop;
990
991 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
992 /* Find the stpage node in the device tree */
993 if (kSuccess != DTLookupEntry(0, "stram", &entry)) {
994 return;
995 }
996
997 if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size)) {
998 return;
999 }
1000
1001 /* Map the page into the kernel space */
1002 sleepTokenBuffer = ml_io_map(((vm_offset_t *)prop)[0], ((vm_size_t *)prop)[1]);
1003 }
1004 }
1005 #endif