]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/cpu.c
483d4673bda3b6a631120e912c1d18d8aaf8f3dd
[apple/xnu.git] / osfmk / arm64 / cpu.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <kern/timer_queue.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpuid.h>
42 #include <arm/caches_internal.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/cpu_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/machine_cpu.h>
47 #include <arm/rtclock.h>
48 #include <arm64/proc_reg.h>
49 #include <mach/processor_info.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <pexpert/arm/protos.h>
54 #include <pexpert/device_tree.h>
55 #include <sys/kdebug.h>
56 #include <arm/machine_routines.h>
57
58 #include <machine/atomic.h>
59
60 #include <san/kasan.h>
61
62 #if KPC
63 #include <kern/kpc.h>
64 #endif
65
66 #if MONOTONIC
67 #include <kern/monotonic.h>
68 #endif /* MONOTONIC */
69
70 extern boolean_t idle_enable;
71 extern uint64_t wake_abstime;
72
73 #if WITH_CLASSIC_S2R
74 void sleep_token_buffer_init(void);
75 #endif
76
77
78 extern uintptr_t resume_idle_cpu;
79 extern uintptr_t start_cpu;
80
81 #if __ARM_KERNEL_PROTECT__
82 extern void exc_vectors_table;
83 #endif /* __ARM_KERNEL_PROTECT__ */
84
85 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(void);
86 extern void arm64_force_wfi_clock_gate(void);
87 #if (defined(APPLECYCLONE) || defined(APPLETYPHOON))
88 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
89 extern void cyclone_typhoon_prepare_for_wfi(void);
90 extern void cyclone_typhoon_return_from_wfi(void);
91 #endif
92
93
94 vm_address_t start_cpu_paddr;
95
96 sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
97 .tcr_el1 = TCR_EL1_BOOT,
98 };
99
100
101 // wfi - wfi mode
102 // 0 : disabled
103 // 1 : normal
104 // 2 : overhead simulation (delay & flags)
105 static int wfi = 1;
106
107 #if DEVELOPMENT || DEBUG
108
109 // wfi_flags
110 // 1 << 0 : flush L1s
111 // 1 << 1 : flush TLBs
112 static int wfi_flags = 0;
113
114 // wfi_delay - delay ticks after wfi exit
115 static uint64_t wfi_delay = 0;
116
117 #endif /* DEVELOPMENT || DEBUG */
118
119 #if __ARM_GLOBAL_SLEEP_BIT__
120 volatile boolean_t arm64_stall_sleep = TRUE;
121 #endif
122
123 #if WITH_CLASSIC_S2R
124 /*
125 * These must be aligned to avoid issues with calling bcopy_phys on them before
126 * we are done with pmap initialization.
127 */
128 static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
129 static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
130 #endif
131
132 #if WITH_CLASSIC_S2R
133 static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
134 #endif
135 static boolean_t coresight_debug_enabled = FALSE;
136
137 #if defined(CONFIG_XNUPOST)
138 void arm64_ipi_test_callback(void *);
139
140 void
141 arm64_ipi_test_callback(void *parm)
142 {
143 volatile uint64_t *ipi_test_data = parm;
144 cpu_data_t *cpu_data;
145
146 cpu_data = getCpuDatap();
147
148 *ipi_test_data = cpu_data->cpu_number;
149 }
150
151 uint64_t arm64_ipi_test_data[MAX_CPUS];
152
153 void
154 arm64_ipi_test()
155 {
156 volatile uint64_t *ipi_test_data;
157 uint32_t timeout_ms = 100;
158 uint64_t then, now, delta;
159 int current_cpu_number = getCpuDatap()->cpu_number;
160
161 /*
162 * probably the only way to have this on most systems is with the
163 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
164 * IPI is not available
165 */
166 if (real_ncpus == 1) {
167 return;
168 }
169
170 for (unsigned int i = 0; i < MAX_CPUS; ++i) {
171 ipi_test_data = &arm64_ipi_test_data[i];
172 *ipi_test_data = ~i;
173 kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
174 if (error != KERN_SUCCESS) {
175 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
176 }
177
178 then = mach_absolute_time();
179
180 while (*ipi_test_data != i) {
181 now = mach_absolute_time();
182 absolutetime_to_nanoseconds(now - then, &delta);
183 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
184 panic("CPU %d tried to IPI CPU %d but didn't get correct response within %dms, respose: %llx", current_cpu_number, i, timeout_ms, *ipi_test_data);
185 }
186 }
187 }
188 }
189 #endif /* defined(CONFIG_XNUPOST) */
190
191 static void
192 configure_coresight_registers(cpu_data_t *cdp)
193 {
194 uint64_t addr;
195 int i;
196
197 assert(cdp);
198
199 /*
200 * ARMv8 coresight registers are optional. If the device tree did not
201 * provide cpu_regmap_paddr, assume that coresight registers are not
202 * supported.
203 */
204 if (cdp->cpu_regmap_paddr) {
205 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
206 /* Skip CTI; these registers are debug-only (they are
207 * not present on production hardware), and there is
208 * at least one known Cyclone errata involving CTI
209 * (rdar://12802966). We have no known clients that
210 * need the kernel to unlock CTI, so it is safer
211 * to avoid doing the access.
212 */
213 if (i == CORESIGHT_CTI) {
214 continue;
215 }
216 /* Skip debug-only registers on production chips */
217 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) {
218 continue;
219 }
220
221 if (!cdp->coresight_base[i]) {
222 addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
223 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
224
225 /*
226 * At this point, failing to io map the
227 * registers is considered as an error.
228 */
229 if (!cdp->coresight_base[i]) {
230 panic("unable to ml_io_map coresight regions");
231 }
232 }
233 /* Unlock EDLAR, CTILAR, PMLAR */
234 if (i != CORESIGHT_UTT) {
235 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
236 }
237 }
238 }
239 }
240
241
242 /*
243 * Routine: cpu_bootstrap
244 * Function:
245 */
246 void
247 cpu_bootstrap(void)
248 {
249 }
250
251 /*
252 * Routine: cpu_sleep
253 * Function:
254 */
255 void
256 cpu_sleep(void)
257 {
258 cpu_data_t *cpu_data_ptr = getCpuDatap();
259
260 pmap_switch_user_ttb(kernel_pmap);
261 cpu_data_ptr->cpu_active_thread = current_thread();
262 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
263 cpu_data_ptr->cpu_flags |= SleepState;
264 cpu_data_ptr->cpu_user_debug = NULL;
265 #if KPC
266 kpc_idle();
267 #endif /* KPC */
268 #if MONOTONIC
269 mt_cpu_down(cpu_data_ptr);
270 #endif /* MONOTONIC */
271
272 CleanPoC_Dcache();
273
274 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
275 }
276
277 /*
278 * Routine: cpu_idle
279 * Function:
280 */
281 void __attribute__((noreturn))
282 cpu_idle(void)
283 {
284 cpu_data_t *cpu_data_ptr = getCpuDatap();
285 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
286
287 if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) {
288 Idle_load_context();
289 }
290 if (!SetIdlePop()) {
291 Idle_load_context();
292 }
293 lastPop = cpu_data_ptr->rtcPop;
294
295 pmap_switch_user_ttb(kernel_pmap);
296 cpu_data_ptr->cpu_active_thread = current_thread();
297 if (cpu_data_ptr->cpu_user_debug) {
298 arm_debug_set(NULL);
299 }
300 cpu_data_ptr->cpu_user_debug = NULL;
301
302 if (cpu_data_ptr->cpu_idle_notify) {
303 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
304 }
305
306 if (cpu_data_ptr->idle_timer_notify != 0) {
307 if (new_idle_timeout_ticks == 0x0ULL) {
308 /* turn off the idle timer */
309 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
310 } else {
311 /* set the new idle timeout */
312 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
313 }
314 timer_resync_deadlines();
315 if (cpu_data_ptr->rtcPop != lastPop) {
316 SetIdlePop();
317 }
318 }
319
320 #if KPC
321 kpc_idle();
322 #endif
323 #if MONOTONIC
324 mt_cpu_idle(cpu_data_ptr);
325 #endif /* MONOTONIC */
326
327 if (wfi) {
328 platform_cache_idle_enter();
329
330 #if DEVELOPMENT || DEBUG
331 // When simulating wfi overhead,
332 // force wfi to clock gating only
333 if (wfi == 2) {
334 arm64_force_wfi_clock_gate();
335 }
336 #endif /* DEVELOPMENT || DEBUG */
337
338 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
339 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
340 cyclone_typhoon_prepare_for_wfi();
341 #endif
342 __builtin_arm_dsb(DSB_SY);
343 __builtin_arm_wfi();
344
345 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
346 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
347 cyclone_typhoon_return_from_wfi();
348 #endif
349
350 #if DEVELOPMENT || DEBUG
351 // Handle wfi overhead simulation
352 if (wfi == 2) {
353 uint64_t deadline;
354
355 // Calculate wfi delay deadline
356 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
357
358 // Flush L1 caches
359 if ((wfi_flags & 1) != 0) {
360 InvalidatePoU_Icache();
361 FlushPoC_Dcache();
362 }
363
364 // Flush TLBs
365 if ((wfi_flags & 2) != 0) {
366 flush_core_tlb();
367 }
368
369 // Wait for the ballance of the wfi delay
370 clock_delay_until(deadline);
371 }
372 #endif /* DEVELOPMENT || DEBUG */
373
374 platform_cache_idle_exit();
375 }
376
377 ClearIdlePop(TRUE);
378
379 cpu_idle_exit(FALSE);
380 }
381
382 /*
383 * Routine: cpu_idle_exit
384 * Function:
385 */
386 void
387 cpu_idle_exit(boolean_t from_reset)
388 {
389 uint64_t new_idle_timeout_ticks = 0x0ULL;
390 cpu_data_t *cpu_data_ptr = getCpuDatap();
391
392 assert(exception_stack_pointer() != 0);
393
394 /* Back from WFI, unlock OSLAR and EDLAR. */
395 if (from_reset) {
396 configure_coresight_registers(cpu_data_ptr);
397 }
398
399 #if KPC
400 kpc_idle_exit();
401 #endif
402
403 #if MONOTONIC
404 mt_cpu_run(cpu_data_ptr);
405 #endif /* MONOTONIC */
406
407 pmap_switch_user_ttb(cpu_data_ptr->cpu_active_thread->map->pmap);
408
409 if (cpu_data_ptr->cpu_idle_notify) {
410 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
411 }
412
413 if (cpu_data_ptr->idle_timer_notify != 0) {
414 if (new_idle_timeout_ticks == 0x0ULL) {
415 /* turn off the idle timer */
416 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
417 } else {
418 /* set the new idle timeout */
419 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
420 }
421 timer_resync_deadlines();
422 }
423
424 Idle_load_context();
425 }
426
427 void
428 cpu_init(void)
429 {
430 cpu_data_t *cdp = getCpuDatap();
431 arm_cpu_info_t *cpu_info_p;
432
433 assert(exception_stack_pointer() != 0);
434
435 if (cdp->cpu_type != CPU_TYPE_ARM64) {
436 cdp->cpu_type = CPU_TYPE_ARM64;
437
438 timer_call_queue_init(&cdp->rtclock_timer.queue);
439 cdp->rtclock_timer.deadline = EndOfAllTime;
440
441 if (cdp == &BootCpuData) {
442 do_cpuid();
443 do_cacheid();
444 do_mvfpid();
445 } else {
446 /*
447 * We initialize non-boot CPUs here; the boot CPU is
448 * dealt with as part of pmap_bootstrap.
449 */
450 pmap_cpu_data_init();
451 }
452 /* ARM_SMP: Assuming identical cpu */
453 do_debugid();
454
455 cpu_info_p = cpuid_info();
456
457 /* switch based on CPU's reported architecture */
458 switch (cpu_info_p->arm_info.arm_arch) {
459 case CPU_ARCH_ARMv8:
460 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
461 break;
462 default:
463 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
464 /* this panic doesn't work this early in startup */
465 panic("Unknown CPU subtype...");
466 break;
467 }
468
469 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
470 }
471 cdp->cpu_stat.irq_ex_cnt_wake = 0;
472 cdp->cpu_stat.ipi_cnt_wake = 0;
473 cdp->cpu_stat.timer_cnt_wake = 0;
474 cdp->cpu_stat.pmi_cnt_wake = 0;
475 cdp->cpu_running = TRUE;
476 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
477 cdp->cpu_sleep_token = 0x0UL;
478 #if KPC
479 kpc_idle_exit();
480 #endif /* KPC */
481 #if MONOTONIC
482 mt_cpu_up(cdp);
483 #endif /* MONOTONIC */
484 }
485
486 void
487 cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
488 {
489 vm_offset_t irq_stack = 0;
490 vm_offset_t exc_stack = 0;
491
492 kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
493 INTSTACK_SIZE + (2 * PAGE_SIZE),
494 PAGE_MASK,
495 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
496 VM_KERN_MEMORY_STACK);
497 if (kr != KERN_SUCCESS) {
498 panic("Unable to allocate cpu interrupt stack\n");
499 }
500
501 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
502 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
503
504 kr = kernel_memory_allocate(kernel_map, &exc_stack,
505 EXCEPSTACK_SIZE + (2 * PAGE_SIZE),
506 PAGE_MASK,
507 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
508 VM_KERN_MEMORY_STACK);
509 if (kr != KERN_SUCCESS) {
510 panic("Unable to allocate cpu exception stack\n");
511 }
512
513 cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
514 cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
515 }
516
517 void
518 cpu_data_free(cpu_data_t *cpu_data_ptr)
519 {
520 if (cpu_data_ptr == &BootCpuData) {
521 return;
522 }
523
524 cpu_processor_free( cpu_data_ptr->cpu_processor);
525 (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
526 (kfree)((void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE);
527 kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t));
528 }
529
530 void
531 cpu_data_init(cpu_data_t *cpu_data_ptr)
532 {
533 uint32_t i;
534
535 cpu_data_ptr->cpu_flags = 0;
536 cpu_data_ptr->interrupts_enabled = 0;
537 cpu_data_ptr->cpu_int_state = 0;
538 cpu_data_ptr->cpu_pending_ast = AST_NONE;
539 cpu_data_ptr->cpu_cache_dispatch = (void *) 0;
540 cpu_data_ptr->rtcPop = EndOfAllTime;
541 cpu_data_ptr->rtclock_datap = &RTClockData;
542 cpu_data_ptr->cpu_user_debug = NULL;
543
544
545 cpu_data_ptr->cpu_base_timebase = 0;
546 cpu_data_ptr->cpu_idle_notify = (void *) 0;
547 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
548 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
549 cpu_data_ptr->cpu_reset_type = 0x0UL;
550 cpu_data_ptr->cpu_reset_handler = 0x0UL;
551 cpu_data_ptr->cpu_reset_assist = 0x0UL;
552 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
553 cpu_data_ptr->cpu_phys_id = 0x0UL;
554 cpu_data_ptr->cpu_l2_access_penalty = 0;
555 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
556 cpu_data_ptr->cpu_cluster_id = 0;
557 cpu_data_ptr->cpu_l2_id = 0;
558 cpu_data_ptr->cpu_l2_size = 0;
559 cpu_data_ptr->cpu_l3_id = 0;
560 cpu_data_ptr->cpu_l3_size = 0;
561
562 cpu_data_ptr->cpu_signal = SIGPdisabled;
563
564 #if DEBUG || DEVELOPMENT
565 cpu_data_ptr->failed_xcall = NULL;
566 cpu_data_ptr->failed_signal = 0;
567 cpu_data_ptr->failed_signal_count = 0;
568 #endif
569
570 cpu_data_ptr->cpu_get_fiq_handler = NULL;
571 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
572 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
573 cpu_data_ptr->cpu_get_decrementer_func = NULL;
574 cpu_data_ptr->cpu_set_decrementer_func = NULL;
575 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
576 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
577 cpu_data_ptr->cpu_xcall_p0 = NULL;
578 cpu_data_ptr->cpu_xcall_p1 = NULL;
579
580 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
581 cpu_data_ptr->coresight_base[i] = 0;
582 }
583
584 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
585
586 pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
587 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
588
589 for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) {
590 pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0;
591 }
592 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
593 #if __ARM_KERNEL_PROTECT__
594 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
595 #endif /* __ARM_KERNEL_PROTECT__ */
596
597 }
598
599 kern_return_t
600 cpu_data_register(cpu_data_t *cpu_data_ptr)
601 {
602 int cpu = cpu_data_ptr->cpu_number;
603
604 #if KASAN
605 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
606 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
607 }
608 #endif
609
610 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
611 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
612 return KERN_SUCCESS;
613 }
614
615
616 kern_return_t
617 cpu_start(int cpu)
618 {
619 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
620
621 kprintf("cpu_start() cpu: %d\n", cpu);
622
623 if (cpu == cpu_number()) {
624 cpu_machine_init();
625 configure_coresight_registers(cpu_data_ptr);
626 } else {
627 thread_t first_thread;
628
629 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
630
631 cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
632
633 if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL) {
634 first_thread = cpu_data_ptr->cpu_processor->next_thread;
635 } else {
636 first_thread = cpu_data_ptr->cpu_processor->idle_thread;
637 }
638 cpu_data_ptr->cpu_active_thread = first_thread;
639 first_thread->machine.CpuDatap = cpu_data_ptr;
640
641 configure_coresight_registers(cpu_data_ptr);
642
643 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
644 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
645 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
646 }
647
648 return KERN_SUCCESS;
649 }
650
651
652 void
653 cpu_timebase_init(boolean_t from_boot)
654 {
655 cpu_data_t *cdp = getCpuDatap();
656
657 if (cdp->cpu_get_fiq_handler == NULL) {
658 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
659 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
660 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
661 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
662 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
663 }
664
665 if (!from_boot && (cdp == &BootCpuData)) {
666 /*
667 * When we wake from sleep, we have no guarantee about the state
668 * of the hardware timebase. It may have kept ticking across sleep, or
669 * it may have reset.
670 *
671 * To deal with this, we calculate an offset to the clock that will
672 * produce a timebase value wake_abstime at the point the boot
673 * CPU calls cpu_timebase_init on wake.
674 *
675 * This ensures that mach_absolute_time() stops ticking across sleep.
676 */
677 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
678 }
679
680 cdp->cpu_decrementer = 0x7FFFFFFFUL;
681 cdp->cpu_timebase = 0x0UL;
682 cdp->cpu_base_timebase = rtclock_base_abstime;
683 }
684
685 int
686 cpu_cluster_id(void)
687 {
688 return getCpuDatap()->cpu_cluster_id;
689 }
690
691 __attribute__((noreturn))
692 void
693 ml_arm_sleep(void)
694 {
695 cpu_data_t *cpu_data_ptr = getCpuDatap();
696
697 if (cpu_data_ptr == &BootCpuData) {
698 cpu_data_t *target_cdp;
699 int cpu;
700 int max_cpu;
701
702 max_cpu = ml_get_max_cpu_number();
703 for (cpu = 0; cpu <= max_cpu; cpu++) {
704 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
705
706 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) {
707 continue;
708 }
709
710 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
711 ;
712 }
713 }
714
715 /*
716 * Now that the other cores have entered the sleep path, set
717 * the abstime value we'll use when we resume.
718 */
719 wake_abstime = ml_get_timebase();
720 } else {
721 CleanPoU_Dcache();
722 }
723
724 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
725
726 if (cpu_data_ptr == &BootCpuData) {
727 #if WITH_CLASSIC_S2R
728 // Classic suspend to RAM writes the suspend signature into the
729 // sleep token buffer so that iBoot knows that it's on the warm
730 // boot (wake) path (as opposed to the cold boot path). Newer SoC
731 // do not go through SecureROM/iBoot on the warm boot path. The
732 // reconfig engine script brings the CPU out of reset at the kernel's
733 // reset vector which points to the warm boot initialization code.
734 if (sleepTokenBuffer != (vm_offset_t) NULL) {
735 platform_cache_shutdown();
736 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
737 } else {
738 panic("No sleep token buffer");
739 }
740 #endif
741
742 #if __ARM_GLOBAL_SLEEP_BIT__
743 /* Allow other CPUs to go to sleep. */
744 arm64_stall_sleep = FALSE;
745 __builtin_arm_dmb(DMB_ISH);
746 #endif
747
748 /* Architectural debug state: <rdar://problem/12390433>:
749 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
750 * tell debugger to not prevent power gating .
751 */
752 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
753 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
754 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
755 }
756
757 #if MONOTONIC
758 mt_sleep();
759 #endif /* MONOTONIC */
760 /* ARM64-specific preparation */
761 arm64_prepare_for_sleep();
762 } else {
763 #if __ARM_GLOBAL_SLEEP_BIT__
764 /*
765 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
766 * global register to manage entering deep sleep, as opposed to a per-CPU
767 * register. We cannot update this register until all CPUs are ready to enter
768 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
769 * (by idling), it will hang (due to the side effects of enabling deep sleep),
770 * which can hang the sleep process or cause memory corruption on wake.
771 *
772 * To avoid these issues, we'll stall on this global value, which CPU0 will
773 * manage.
774 */
775 while (arm64_stall_sleep) {
776 __builtin_arm_wfe();
777 }
778 #endif
779 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
780
781 /* Architectural debug state: <rdar://problem/12390433>:
782 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
783 * tell debugger to not prevent power gating .
784 */
785 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
786 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
787 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
788 }
789
790 /* ARM64-specific preparation */
791 arm64_prepare_for_sleep();
792 }
793 }
794
795 void
796 cpu_machine_idle_init(boolean_t from_boot)
797 {
798 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
799 cpu_data_t *cpu_data_ptr = getCpuDatap();
800
801 if (from_boot) {
802 unsigned long jtag = 0;
803 int wfi_tmp = 1;
804 uint32_t production = 1;
805 DTEntry entry;
806
807 if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
808 if (jtag != 0) {
809 idle_enable = FALSE;
810 } else {
811 idle_enable = TRUE;
812 }
813 } else {
814 idle_enable = TRUE;
815 }
816
817 PE_parse_boot_argn("wfi", &wfi_tmp, sizeof(wfi_tmp));
818
819 // bits 7..0 give the wfi type
820 switch (wfi_tmp & 0xff) {
821 case 0:
822 // disable wfi
823 wfi = 0;
824 break;
825
826 #if DEVELOPMENT || DEBUG
827 case 2:
828 // wfi overhead simulation
829 // 31..16 - wfi delay is us
830 // 15..8 - flags
831 // 7..0 - 2
832 wfi = 2;
833 wfi_flags = (wfi_tmp >> 8) & 0xFF;
834 nanoseconds_to_absolutetime(((wfi_tmp >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
835 break;
836 #endif /* DEVELOPMENT || DEBUG */
837
838 case 1:
839 default:
840 // do nothing
841 break;
842 }
843
844 ResetHandlerData.assist_reset_handler = 0;
845 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
846
847 #ifdef MONITOR
848 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
849 #elif !defined(NO_MONITOR)
850 #error MONITOR undefined, WFI power gating may not operate correctly
851 #endif /* MONITOR */
852
853 // Determine if we are on production or debug chip
854 if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) {
855 unsigned int size;
856 void *prop;
857
858 if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size)) {
859 if (size == 4) {
860 bcopy(prop, &production, size);
861 }
862 }
863 }
864 if (!production) {
865 #if defined(APPLE_ARM64_ARCH_FAMILY)
866 // Enable coresight debug registers on debug-fused chips
867 coresight_debug_enabled = TRUE;
868 #endif
869 }
870
871 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
872 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
873 }
874
875 #if WITH_CLASSIC_S2R
876 if (cpu_data_ptr == &BootCpuData) {
877 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
878 if (sleepTokenBuffer != (vm_offset_t) NULL) {
879 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
880 } else {
881 panic("No sleep token buffer");
882 }
883
884 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
885 SleepToken_low_paddr, sizeof(SleepToken));
886 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
887 }
888 ;
889 #endif
890
891 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
892 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
893 }
894
895 _Atomic uint32_t cpu_idle_count = 0;
896
897 void
898 machine_track_platform_idle(boolean_t entry)
899 {
900 if (entry) {
901 (void)__c11_atomic_fetch_add(&cpu_idle_count, 1, __ATOMIC_RELAXED);
902 } else {
903 (void)__c11_atomic_fetch_sub(&cpu_idle_count, 1, __ATOMIC_RELAXED);
904 }
905 }
906
907 #if WITH_CLASSIC_S2R
908 void
909 sleep_token_buffer_init(void)
910 {
911 cpu_data_t *cpu_data_ptr = getCpuDatap();
912 DTEntry entry;
913 size_t size;
914 void **prop;
915
916 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
917 /* Find the stpage node in the device tree */
918 if (kSuccess != DTLookupEntry(0, "stram", &entry)) {
919 return;
920 }
921
922 if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size)) {
923 return;
924 }
925
926 /* Map the page into the kernel space */
927 sleepTokenBuffer = ml_io_map(((vm_offset_t *)prop)[0], ((vm_size_t *)prop)[1]);
928 }
929 }
930 #endif