]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/cpu.c
xnu-4903.231.4.tar.gz
[apple/xnu.git] / osfmk / arm64 / cpu.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <kern/timer_queue.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpuid.h>
42 #include <arm/caches_internal.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/cpu_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/machine_cpu.h>
47 #include <arm/rtclock.h>
48 #include <arm64/proc_reg.h>
49 #include <mach/processor_info.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <pexpert/arm/protos.h>
54 #include <pexpert/device_tree.h>
55 #include <sys/kdebug.h>
56 #include <arm/machine_routines.h>
57
58 #include <machine/atomic.h>
59
60 #include <san/kasan.h>
61
62 #if KPC
63 #include <kern/kpc.h>
64 #endif
65
66 #if MONOTONIC
67 #include <kern/monotonic.h>
68 #endif /* MONOTONIC */
69
70 extern boolean_t idle_enable;
71 extern uint64_t wake_abstime;
72
73 #if WITH_CLASSIC_S2R
74 void sleep_token_buffer_init(void);
75 #endif
76
77
78 extern uintptr_t resume_idle_cpu;
79 extern uintptr_t start_cpu;
80
81 #if __ARM_KERNEL_PROTECT__
82 extern void exc_vectors_table;
83 #endif /* __ARM_KERNEL_PROTECT__ */
84
85 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(void);
86 extern void arm64_force_wfi_clock_gate(void);
87 #if (defined(APPLECYCLONE) || defined(APPLETYPHOON))
88 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
89 extern void cyclone_typhoon_prepare_for_wfi(void);
90 extern void cyclone_typhoon_return_from_wfi(void);
91 #endif
92
93
94 vm_address_t start_cpu_paddr;
95
96 sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
97 .tcr_el1 = TCR_EL1_BOOT,
98 };
99
100
101 // wfi - wfi mode
102 // 0 : disabled
103 // 1 : normal
104 // 2 : overhead simulation (delay & flags)
105 static int wfi = 1;
106
107 #if DEVELOPMENT || DEBUG
108
109 // wfi_flags
110 // 1 << 0 : flush L1s
111 // 1 << 1 : flush TLBs
112 static int wfi_flags = 0;
113
114 // wfi_delay - delay ticks after wfi exit
115 static uint64_t wfi_delay = 0;
116
117 #endif /* DEVELOPMENT || DEBUG */
118
119 #if __ARM_GLOBAL_SLEEP_BIT__
120 volatile boolean_t arm64_stall_sleep = TRUE;
121 #endif
122
123 #if WITH_CLASSIC_S2R
124 /*
125 * These must be aligned to avoid issues with calling bcopy_phys on them before
126 * we are done with pmap initialization.
127 */
128 static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
129 static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
130 #endif
131
132 #if WITH_CLASSIC_S2R
133 static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
134 #endif
135 static boolean_t coresight_debug_enabled = FALSE;
136
137 #if defined(CONFIG_XNUPOST)
138 void arm64_ipi_test_callback(void *);
139
140 void arm64_ipi_test_callback(void *parm) {
141 volatile uint64_t *ipi_test_data = parm;
142 cpu_data_t *cpu_data;
143
144 cpu_data = getCpuDatap();
145
146 *ipi_test_data = cpu_data->cpu_number;
147 }
148
149 uint64_t arm64_ipi_test_data[MAX_CPUS];
150
151 void arm64_ipi_test() {
152 volatile uint64_t *ipi_test_data;
153 uint32_t timeout_ms = 100;
154 uint64_t then, now, delta;
155 int current_cpu_number = getCpuDatap()->cpu_number;
156
157 /*
158 * probably the only way to have this on most systems is with the
159 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
160 * IPI is not available
161 */
162 if (real_ncpus == 1) {
163 return;
164 }
165
166 for (unsigned int i = 0; i < MAX_CPUS; ++i) {
167 ipi_test_data = &arm64_ipi_test_data[i];
168 *ipi_test_data = ~i;
169 kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
170 if (error != KERN_SUCCESS)
171 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
172
173 then = mach_absolute_time();
174
175 while (*ipi_test_data != i) {
176 now = mach_absolute_time();
177 absolutetime_to_nanoseconds(now-then, &delta);
178 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
179 panic("CPU %d tried to IPI CPU %d but didn't get correct response within %dms, respose: %llx", current_cpu_number, i, timeout_ms, *ipi_test_data);
180 }
181 }
182 }
183
184 }
185 #endif /* defined(CONFIG_XNUPOST) */
186
187 static void
188 configure_coresight_registers(cpu_data_t *cdp)
189 {
190 uint64_t addr;
191 int i;
192
193 assert(cdp);
194
195 /*
196 * ARMv8 coresight registers are optional. If the device tree did not
197 * provide cpu_regmap_paddr, assume that coresight registers are not
198 * supported.
199 */
200 if (cdp->cpu_regmap_paddr) {
201 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
202 /* Skip CTI; these registers are debug-only (they are
203 * not present on production hardware), and there is
204 * at least one known Cyclone errata involving CTI
205 * (rdar://12802966). We have no known clients that
206 * need the kernel to unlock CTI, so it is safer
207 * to avoid doing the access.
208 */
209 if (i == CORESIGHT_CTI)
210 continue;
211 /* Skip debug-only registers on production chips */
212 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled)
213 continue;
214
215 if (!cdp->coresight_base[i]) {
216 addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
217 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
218
219 /*
220 * At this point, failing to io map the
221 * registers is considered as an error.
222 */
223 if (!cdp->coresight_base[i]) {
224 panic("unable to ml_io_map coresight regions");
225 }
226 }
227 /* Unlock EDLAR, CTILAR, PMLAR */
228 if (i != CORESIGHT_UTT)
229 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
230 }
231 }
232 }
233
234
235 /*
236 * Routine: cpu_bootstrap
237 * Function:
238 */
239 void
240 cpu_bootstrap(void)
241 {
242 }
243
244 /*
245 * Routine: cpu_sleep
246 * Function:
247 */
248 void
249 cpu_sleep(void)
250 {
251 cpu_data_t *cpu_data_ptr = getCpuDatap();
252
253 pmap_switch_user_ttb(kernel_pmap);
254 cpu_data_ptr->cpu_active_thread = current_thread();
255 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
256 cpu_data_ptr->cpu_flags |= SleepState;
257 cpu_data_ptr->cpu_user_debug = NULL;
258 #if KPC
259 kpc_idle();
260 #endif /* KPC */
261 #if MONOTONIC
262 mt_cpu_down(cpu_data_ptr);
263 #endif /* MONOTONIC */
264
265 CleanPoC_Dcache();
266
267 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
268
269 }
270
271 /*
272 * Routine: cpu_idle
273 * Function:
274 */
275 void __attribute__((noreturn))
276 cpu_idle(void)
277 {
278 cpu_data_t *cpu_data_ptr = getCpuDatap();
279 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
280
281 if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled))
282 Idle_load_context();
283 if (!SetIdlePop())
284 Idle_load_context();
285 lastPop = cpu_data_ptr->rtcPop;
286
287 pmap_switch_user_ttb(kernel_pmap);
288 cpu_data_ptr->cpu_active_thread = current_thread();
289 if (cpu_data_ptr->cpu_user_debug)
290 arm_debug_set(NULL);
291 cpu_data_ptr->cpu_user_debug = NULL;
292
293 if (cpu_data_ptr->cpu_idle_notify)
294 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
295
296 if (cpu_data_ptr->idle_timer_notify != 0) {
297 if (new_idle_timeout_ticks == 0x0ULL) {
298 /* turn off the idle timer */
299 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
300 } else {
301 /* set the new idle timeout */
302 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
303 }
304 timer_resync_deadlines();
305 if (cpu_data_ptr->rtcPop != lastPop)
306 SetIdlePop();
307 }
308
309 #if KPC
310 kpc_idle();
311 #endif
312 #if MONOTONIC
313 mt_cpu_idle(cpu_data_ptr);
314 #endif /* MONOTONIC */
315
316 if (wfi) {
317 platform_cache_idle_enter();
318
319 #if DEVELOPMENT || DEBUG
320 // When simulating wfi overhead,
321 // force wfi to clock gating only
322 if (wfi == 2) {
323 arm64_force_wfi_clock_gate();
324 }
325 #endif /* DEVELOPMENT || DEBUG */
326
327 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
328 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
329 cyclone_typhoon_prepare_for_wfi();
330 #endif
331 __builtin_arm_dsb(DSB_SY);
332 __builtin_arm_wfi();
333
334 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
335 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
336 cyclone_typhoon_return_from_wfi();
337 #endif
338
339 #if DEVELOPMENT || DEBUG
340 // Handle wfi overhead simulation
341 if (wfi == 2) {
342 uint64_t deadline;
343
344 // Calculate wfi delay deadline
345 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
346
347 // Flush L1 caches
348 if ((wfi_flags & 1) != 0) {
349 InvalidatePoU_Icache();
350 FlushPoC_Dcache();
351 }
352
353 // Flush TLBs
354 if ((wfi_flags & 2) != 0) {
355 flush_core_tlb();
356 }
357
358 // Wait for the ballance of the wfi delay
359 clock_delay_until(deadline);
360 }
361 #endif /* DEVELOPMENT || DEBUG */
362
363 platform_cache_idle_exit();
364 }
365
366 ClearIdlePop(TRUE);
367
368 cpu_idle_exit(FALSE);
369 }
370
371 /*
372 * Routine: cpu_idle_exit
373 * Function:
374 */
375 void
376 cpu_idle_exit(boolean_t from_reset)
377 {
378 uint64_t new_idle_timeout_ticks = 0x0ULL;
379 cpu_data_t *cpu_data_ptr = getCpuDatap();
380
381 assert(exception_stack_pointer() != 0);
382
383 /* Back from WFI, unlock OSLAR and EDLAR. */
384 if (from_reset)
385 configure_coresight_registers(cpu_data_ptr);
386
387 #if KPC
388 kpc_idle_exit();
389 #endif
390
391 #if MONOTONIC
392 mt_cpu_run(cpu_data_ptr);
393 #endif /* MONOTONIC */
394
395 pmap_switch_user_ttb(cpu_data_ptr->cpu_active_thread->map->pmap);
396
397 if (cpu_data_ptr->cpu_idle_notify)
398 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
399
400 if (cpu_data_ptr->idle_timer_notify != 0) {
401 if (new_idle_timeout_ticks == 0x0ULL) {
402 /* turn off the idle timer */
403 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
404 } else {
405 /* set the new idle timeout */
406 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
407 }
408 timer_resync_deadlines();
409 }
410
411 Idle_load_context();
412 }
413
414 void
415 cpu_init(void)
416 {
417 cpu_data_t *cdp = getCpuDatap();
418 arm_cpu_info_t *cpu_info_p;
419
420 assert(exception_stack_pointer() != 0);
421
422 if (cdp->cpu_type != CPU_TYPE_ARM64) {
423
424 cdp->cpu_type = CPU_TYPE_ARM64;
425
426 timer_call_queue_init(&cdp->rtclock_timer.queue);
427 cdp->rtclock_timer.deadline = EndOfAllTime;
428
429 if (cdp == &BootCpuData) {
430 do_cpuid();
431 do_cacheid();
432 do_mvfpid();
433 } else {
434 /*
435 * We initialize non-boot CPUs here; the boot CPU is
436 * dealt with as part of pmap_bootstrap.
437 */
438 pmap_cpu_data_init();
439 }
440 /* ARM_SMP: Assuming identical cpu */
441 do_debugid();
442
443 cpu_info_p = cpuid_info();
444
445 /* switch based on CPU's reported architecture */
446 switch (cpu_info_p->arm_info.arm_arch) {
447 case CPU_ARCH_ARMv8:
448 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
449 break;
450 default:
451 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
452 /* this panic doesn't work this early in startup */
453 panic("Unknown CPU subtype...");
454 break;
455 }
456
457 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
458 }
459 cdp->cpu_stat.irq_ex_cnt_wake = 0;
460 cdp->cpu_stat.ipi_cnt_wake = 0;
461 cdp->cpu_stat.timer_cnt_wake = 0;
462 cdp->cpu_running = TRUE;
463 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
464 cdp->cpu_sleep_token = 0x0UL;
465 #if KPC
466 kpc_idle_exit();
467 #endif /* KPC */
468 #if MONOTONIC
469 mt_cpu_up(cdp);
470 #endif /* MONOTONIC */
471 }
472
473 void
474 cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
475 {
476 vm_offset_t irq_stack = 0;
477 vm_offset_t exc_stack = 0;
478
479 kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
480 INTSTACK_SIZE + (2 * PAGE_SIZE),
481 PAGE_MASK,
482 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
483 VM_KERN_MEMORY_STACK);
484 if (kr != KERN_SUCCESS)
485 panic("Unable to allocate cpu interrupt stack\n");
486
487 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
488 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
489
490 kr = kernel_memory_allocate(kernel_map, &exc_stack,
491 EXCEPSTACK_SIZE + (2 * PAGE_SIZE),
492 PAGE_MASK,
493 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
494 VM_KERN_MEMORY_STACK);
495 if (kr != KERN_SUCCESS)
496 panic("Unable to allocate cpu exception stack\n");
497
498 cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
499 cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
500 }
501
502 void
503 cpu_data_free(cpu_data_t *cpu_data_ptr)
504 {
505 if (cpu_data_ptr == &BootCpuData)
506 return;
507
508 cpu_processor_free( cpu_data_ptr->cpu_processor);
509 kfree( (void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
510 kfree( (void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE);
511 kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t));
512 }
513
514 void
515 cpu_data_init(cpu_data_t *cpu_data_ptr)
516 {
517 uint32_t i;
518
519 cpu_data_ptr->cpu_flags = 0;
520 cpu_data_ptr->interrupts_enabled = 0;
521 cpu_data_ptr->cpu_int_state = 0;
522 cpu_data_ptr->cpu_pending_ast = AST_NONE;
523 cpu_data_ptr->cpu_cache_dispatch = (void *) 0;
524 cpu_data_ptr->rtcPop = EndOfAllTime;
525 cpu_data_ptr->rtclock_datap = &RTClockData;
526 cpu_data_ptr->cpu_user_debug = NULL;
527
528
529 cpu_data_ptr->cpu_base_timebase = 0;
530 cpu_data_ptr->cpu_idle_notify = (void *) 0;
531 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
532 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
533 cpu_data_ptr->cpu_reset_type = 0x0UL;
534 cpu_data_ptr->cpu_reset_handler = 0x0UL;
535 cpu_data_ptr->cpu_reset_assist = 0x0UL;
536 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
537 cpu_data_ptr->cpu_phys_id = 0x0UL;
538 cpu_data_ptr->cpu_l2_access_penalty = 0;
539 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
540 cpu_data_ptr->cpu_cluster_id = 0;
541 cpu_data_ptr->cpu_l2_id = 0;
542 cpu_data_ptr->cpu_l2_size = 0;
543 cpu_data_ptr->cpu_l3_id = 0;
544 cpu_data_ptr->cpu_l3_size = 0;
545
546 cpu_data_ptr->cpu_signal = SIGPdisabled;
547
548 #if DEBUG || DEVELOPMENT
549 cpu_data_ptr->failed_xcall = NULL;
550 cpu_data_ptr->failed_signal = 0;
551 cpu_data_ptr->failed_signal_count = 0;
552 #endif
553
554 cpu_data_ptr->cpu_get_fiq_handler = NULL;
555 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
556 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
557 cpu_data_ptr->cpu_get_decrementer_func = NULL;
558 cpu_data_ptr->cpu_set_decrementer_func = NULL;
559 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
560 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
561 cpu_data_ptr->cpu_xcall_p0 = NULL;
562 cpu_data_ptr->cpu_xcall_p1 = NULL;
563
564 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
565 cpu_data_ptr->coresight_base[i] = 0;
566 }
567
568 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
569
570 pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
571 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
572
573 for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) {
574 pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0;
575 }
576 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
577 #if __ARM_KERNEL_PROTECT__
578 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
579 #endif /* __ARM_KERNEL_PROTECT__ */
580
581 }
582
583 kern_return_t
584 cpu_data_register(cpu_data_t *cpu_data_ptr)
585 {
586 int cpu = cpu_data_ptr->cpu_number;
587
588 #if KASAN
589 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
590 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
591 }
592 #endif
593
594 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
595 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys( (vm_offset_t)cpu_data_ptr);
596 return KERN_SUCCESS;
597
598 }
599
600
601 kern_return_t
602 cpu_start(int cpu)
603 {
604 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
605
606 kprintf("cpu_start() cpu: %d\n", cpu);
607
608 if (cpu == cpu_number()) {
609 cpu_machine_init();
610 configure_coresight_registers(cpu_data_ptr);
611 } else {
612 thread_t first_thread;
613
614 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
615
616 cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
617
618 if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL)
619 first_thread = cpu_data_ptr->cpu_processor->next_thread;
620 else
621 first_thread = cpu_data_ptr->cpu_processor->idle_thread;
622 cpu_data_ptr->cpu_active_thread = first_thread;
623 first_thread->machine.CpuDatap = cpu_data_ptr;
624
625 configure_coresight_registers(cpu_data_ptr);
626
627 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
628 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
629 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
630 }
631
632 return KERN_SUCCESS;
633 }
634
635
636 void
637 cpu_timebase_init(boolean_t from_boot)
638 {
639 cpu_data_t *cdp = getCpuDatap();
640
641 if (cdp->cpu_get_fiq_handler == NULL) {
642 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
643 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
644 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
645 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
646 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
647 }
648
649 if (!from_boot && (cdp == &BootCpuData)) {
650 /*
651 * When we wake from sleep, we have no guarantee about the state
652 * of the hardware timebase. It may have kept ticking across sleep, or
653 * it may have reset.
654 *
655 * To deal with this, we calculate an offset to the clock that will
656 * produce a timebase value wake_abstime at the point the boot
657 * CPU calls cpu_timebase_init on wake.
658 *
659 * This ensures that mach_absolute_time() stops ticking across sleep.
660 */
661 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
662 }
663
664 cdp->cpu_decrementer = 0x7FFFFFFFUL;
665 cdp->cpu_timebase = 0x0UL;
666 cdp->cpu_base_timebase = rtclock_base_abstime;
667 }
668
669 int
670 cpu_cluster_id(void)
671 {
672 return (getCpuDatap()->cpu_cluster_id);
673 }
674
675 __attribute__((noreturn))
676 void
677 ml_arm_sleep(void)
678 {
679 cpu_data_t *cpu_data_ptr = getCpuDatap();
680
681 if (cpu_data_ptr == &BootCpuData) {
682 cpu_data_t *target_cdp;
683 int cpu;
684 int max_cpu;
685
686 max_cpu = ml_get_max_cpu_number();
687 for (cpu=0; cpu <= max_cpu; cpu++) {
688 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
689
690 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr))
691 continue;
692
693 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH);
694 }
695
696 /*
697 * Now that the other cores have entered the sleep path, set
698 * the abstime value we'll use when we resume.
699 */
700 wake_abstime = ml_get_timebase();
701 } else {
702 CleanPoU_Dcache();
703 }
704
705 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
706
707 if (cpu_data_ptr == &BootCpuData) {
708 #if WITH_CLASSIC_S2R
709 // Classic suspend to RAM writes the suspend signature into the
710 // sleep token buffer so that iBoot knows that it's on the warm
711 // boot (wake) path (as opposed to the cold boot path). Newer SoC
712 // do not go through SecureROM/iBoot on the warm boot path. The
713 // reconfig engine script brings the CPU out of reset at the kernel's
714 // reset vector which points to the warm boot initialization code.
715 if(sleepTokenBuffer != (vm_offset_t) NULL) {
716 platform_cache_shutdown();
717 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
718 }
719 else {
720 panic("No sleep token buffer");
721 }
722 #endif
723
724 #if __ARM_GLOBAL_SLEEP_BIT__
725 /* Allow other CPUs to go to sleep. */
726 arm64_stall_sleep = FALSE;
727 __builtin_arm_dmb(DMB_ISH);
728 #endif
729
730 /* Architectural debug state: <rdar://problem/12390433>:
731 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
732 * tell debugger to not prevent power gating .
733 */
734 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
735 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
736 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
737 }
738
739 #if MONOTONIC
740 mt_sleep();
741 #endif /* MONOTONIC */
742 /* ARM64-specific preparation */
743 arm64_prepare_for_sleep();
744 } else {
745 #if __ARM_GLOBAL_SLEEP_BIT__
746 /*
747 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
748 * global register to manage entering deep sleep, as opposed to a per-CPU
749 * register. We cannot update this register until all CPUs are ready to enter
750 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
751 * (by idling), it will hang (due to the side effects of enabling deep sleep),
752 * which can hang the sleep process or cause memory corruption on wake.
753 *
754 * To avoid these issues, we'll stall on this global value, which CPU0 will
755 * manage.
756 */
757 while (arm64_stall_sleep) {
758 __builtin_arm_wfe();
759 }
760 #endif
761 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
762
763 /* Architectural debug state: <rdar://problem/12390433>:
764 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
765 * tell debugger to not prevent power gating .
766 */
767 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
768 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
769 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
770 }
771
772 /* ARM64-specific preparation */
773 arm64_prepare_for_sleep();
774 }
775 }
776
777 void
778 cpu_machine_idle_init(boolean_t from_boot)
779 {
780 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
781 cpu_data_t *cpu_data_ptr = getCpuDatap();
782
783 if (from_boot) {
784 unsigned long jtag = 0;
785 int wfi_tmp = 1;
786 uint32_t production = 1;
787 DTEntry entry;
788
789 if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) {
790 if (jtag != 0)
791 idle_enable = FALSE;
792 else
793 idle_enable = TRUE;
794 } else
795 idle_enable = TRUE;
796
797 PE_parse_boot_argn("wfi", &wfi_tmp, sizeof (wfi_tmp));
798
799 // bits 7..0 give the wfi type
800 switch (wfi_tmp & 0xff) {
801 case 0 :
802 // disable wfi
803 wfi = 0;
804 break;
805
806 #if DEVELOPMENT || DEBUG
807 case 2 :
808 // wfi overhead simulation
809 // 31..16 - wfi delay is us
810 // 15..8 - flags
811 // 7..0 - 2
812 wfi = 2;
813 wfi_flags = (wfi_tmp >> 8) & 0xFF;
814 nanoseconds_to_absolutetime(((wfi_tmp >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
815 break;
816 #endif /* DEVELOPMENT || DEBUG */
817
818 case 1 :
819 default :
820 // do nothing
821 break;
822 }
823
824 ResetHandlerData.assist_reset_handler = 0;
825 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
826
827 #ifdef MONITOR
828 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
829 #elif !defined(NO_MONITOR)
830 #error MONITOR undefined, WFI power gating may not operate correctly
831 #endif /* MONITOR */
832
833 // Determine if we are on production or debug chip
834 if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) {
835 unsigned int size;
836 void *prop;
837
838 if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size))
839 if (size == 4)
840 bcopy(prop, &production, size);
841 }
842 if (!production) {
843 #if defined(APPLE_ARM64_ARCH_FAMILY)
844 // Enable coresight debug registers on debug-fused chips
845 coresight_debug_enabled = TRUE;
846 #endif
847 }
848
849 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
850 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
851 }
852
853 #if WITH_CLASSIC_S2R
854 if (cpu_data_ptr == &BootCpuData) {
855 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
856 if (sleepTokenBuffer != (vm_offset_t) NULL) {
857 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
858 }
859 else {
860 panic("No sleep token buffer");
861 }
862
863 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
864 SleepToken_low_paddr, sizeof(SleepToken));
865 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
866 };
867 #endif
868
869 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
870 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
871 }
872
873 _Atomic uint32_t cpu_idle_count = 0;
874
875 void
876 machine_track_platform_idle(boolean_t entry)
877 {
878 if (entry)
879 (void)__c11_atomic_fetch_add(&cpu_idle_count, 1, __ATOMIC_RELAXED);
880 else
881 (void)__c11_atomic_fetch_sub(&cpu_idle_count, 1, __ATOMIC_RELAXED);
882 }
883
884 #if WITH_CLASSIC_S2R
885 void
886 sleep_token_buffer_init(void)
887 {
888 cpu_data_t *cpu_data_ptr = getCpuDatap();
889 DTEntry entry;
890 size_t size;
891 void **prop;
892
893 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
894 /* Find the stpage node in the device tree */
895 if (kSuccess != DTLookupEntry(0, "stram", &entry))
896 return;
897
898 if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size))
899 return;
900
901 /* Map the page into the kernel space */
902 sleepTokenBuffer = ml_io_map(((vm_offset_t *)prop)[0], ((vm_size_t *)prop)[1]);
903 }
904 }
905 #endif
906