]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/cpu.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / cpu.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <pexpert/arm64/board_config.h>
35 #include <kern/kalloc.h>
36 #include <kern/machine.h>
37 #include <kern/cpu_number.h>
38 #include <kern/thread.h>
39 #include <kern/timer_queue.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpuid.h>
42 #include <arm/caches_internal.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/cpu_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/machine_cpu.h>
47 #include <arm/rtclock.h>
48 #include <arm64/proc_reg.h>
49 #include <mach/processor_info.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <pexpert/arm/protos.h>
54 #include <pexpert/device_tree.h>
55 #include <sys/kdebug.h>
56 #include <arm/machine_routines.h>
57
58 #include <machine/atomic.h>
59
60 #include <san/kasan.h>
61
62 #if KPC
63 #include <kern/kpc.h>
64 #endif
65
66 #if MONOTONIC
67 #include <kern/monotonic.h>
68 #endif /* MONOTONIC */
69
70 extern boolean_t idle_enable;
71 extern uint64_t wake_abstime;
72
73 #if WITH_CLASSIC_S2R
74 void sleep_token_buffer_init(void);
75 #endif
76
77
78 extern uintptr_t resume_idle_cpu;
79 extern uintptr_t start_cpu;
80
81 #if __ARM_KERNEL_PROTECT__
82 extern void exc_vectors_table;
83 #endif /* __ARM_KERNEL_PROTECT__ */
84
85 extern void __attribute__((noreturn)) arm64_prepare_for_sleep(void);
86 extern void arm64_force_wfi_clock_gate(void);
87 #if (defined(APPLECYCLONE) || defined(APPLETYPHOON))
88 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
89 extern void cyclone_typhoon_prepare_for_wfi(void);
90 extern void cyclone_typhoon_return_from_wfi(void);
91 #endif
92
93
94 vm_address_t start_cpu_paddr;
95
96 sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
97 .tcr_el1 = TCR_EL1_BOOT,
98 };
99
100
101 // wfi - wfi mode
102 // 0 : disabled
103 // 1 : normal
104 // 2 : overhead simulation (delay & flags)
105 static int wfi = 1;
106
107 #if DEVELOPMENT || DEBUG
108
109 // wfi_flags
110 // 1 << 0 : flush L1s
111 // 1 << 1 : flush TLBs
112 static int wfi_flags = 0;
113
114 // wfi_delay - delay ticks after wfi exit
115 static uint64_t wfi_delay = 0;
116
117 #endif /* DEVELOPMENT || DEBUG */
118
119 #if __ARM_GLOBAL_SLEEP_BIT__
120 volatile boolean_t arm64_stall_sleep = TRUE;
121 #endif
122
123 #if WITH_CLASSIC_S2R
124 /*
125 * These must be aligned to avoid issues with calling bcopy_phys on them before
126 * we are done with pmap initialization.
127 */
128 static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
129 static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
130 #endif
131
132 #if WITH_CLASSIC_S2R
133 static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
134 #endif
135 static boolean_t coresight_debug_enabled = FALSE;
136
137
138 static void
139 configure_coresight_registers(cpu_data_t *cdp)
140 {
141 uint64_t addr;
142 int i;
143
144 assert(cdp);
145
146 /*
147 * ARMv8 coresight registers are optional. If the device tree did not
148 * provide cpu_regmap_paddr, assume that coresight registers are not
149 * supported.
150 */
151 if (cdp->cpu_regmap_paddr) {
152 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
153 /* Skip CTI; these registers are debug-only (they are
154 * not present on production hardware), and there is
155 * at least one known Cyclone errata involving CTI
156 * (rdar://12802966). We have no known clients that
157 * need the kernel to unlock CTI, so it is safer
158 * to avoid doing the access.
159 */
160 if (i == CORESIGHT_CTI)
161 continue;
162 /* Skip debug-only registers on production chips */
163 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled)
164 continue;
165
166 if (!cdp->coresight_base[i]) {
167 addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
168 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
169
170 /*
171 * At this point, failing to io map the
172 * registers is considered as an error.
173 */
174 if (!cdp->coresight_base[i]) {
175 panic("unable to ml_io_map coresight regions");
176 }
177 }
178 /* Unlock EDLAR, CTILAR, PMLAR */
179 if (i != CORESIGHT_UTT)
180 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
181 }
182 }
183 }
184
185
186 /*
187 * Routine: cpu_bootstrap
188 * Function:
189 */
190 void
191 cpu_bootstrap(void)
192 {
193 }
194
195 /*
196 * Routine: cpu_sleep
197 * Function:
198 */
199 void
200 cpu_sleep(void)
201 {
202 cpu_data_t *cpu_data_ptr = getCpuDatap();
203
204 pmap_switch_user_ttb(kernel_pmap);
205 cpu_data_ptr->cpu_active_thread = current_thread();
206 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
207 cpu_data_ptr->cpu_flags |= SleepState;
208 cpu_data_ptr->cpu_user_debug = NULL;
209 #if KPC
210 kpc_idle();
211 #endif /* KPC */
212 #if MONOTONIC
213 mt_cpu_down(cpu_data_ptr);
214 #endif /* MONOTONIC */
215
216 CleanPoC_Dcache();
217
218 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
219
220 }
221
222 /*
223 * Routine: cpu_idle
224 * Function:
225 */
226 void __attribute__((noreturn))
227 cpu_idle(void)
228 {
229 cpu_data_t *cpu_data_ptr = getCpuDatap();
230 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
231
232 if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled))
233 Idle_load_context();
234 if (!SetIdlePop())
235 Idle_load_context();
236 lastPop = cpu_data_ptr->rtcPop;
237
238 pmap_switch_user_ttb(kernel_pmap);
239 cpu_data_ptr->cpu_active_thread = current_thread();
240 if (cpu_data_ptr->cpu_user_debug)
241 arm_debug_set(NULL);
242 cpu_data_ptr->cpu_user_debug = NULL;
243
244 if (cpu_data_ptr->cpu_idle_notify)
245 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
246
247 if (cpu_data_ptr->idle_timer_notify != 0) {
248 if (new_idle_timeout_ticks == 0x0ULL) {
249 /* turn off the idle timer */
250 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
251 } else {
252 /* set the new idle timeout */
253 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
254 }
255 timer_resync_deadlines();
256 if (cpu_data_ptr->rtcPop != lastPop)
257 SetIdlePop();
258 }
259
260 #if KPC
261 kpc_idle();
262 #endif
263 #if MONOTONIC
264 mt_cpu_idle(cpu_data_ptr);
265 #endif /* MONOTONIC */
266
267 if (wfi) {
268 platform_cache_idle_enter();
269
270 #if DEVELOPMENT || DEBUG
271 // When simulating wfi overhead,
272 // force wfi to clock gating only
273 if (wfi == 2) {
274 arm64_force_wfi_clock_gate();
275 }
276 #endif /* DEVELOPMENT || DEBUG */
277
278 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
279 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
280 cyclone_typhoon_prepare_for_wfi();
281 #endif
282 __builtin_arm_dsb(DSB_SY);
283 __builtin_arm_wfi();
284
285 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
286 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
287 cyclone_typhoon_return_from_wfi();
288 #endif
289
290 #if DEVELOPMENT || DEBUG
291 // Handle wfi overhead simulation
292 if (wfi == 2) {
293 uint64_t deadline;
294
295 // Calculate wfi delay deadline
296 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
297
298 // Flush L1 caches
299 if ((wfi_flags & 1) != 0) {
300 InvalidatePoU_Icache();
301 FlushPoC_Dcache();
302 }
303
304 // Flush TLBs
305 if ((wfi_flags & 2) != 0) {
306 flush_core_tlb();
307 }
308
309 // Wait for the ballance of the wfi delay
310 clock_delay_until(deadline);
311 }
312 #endif /* DEVELOPMENT || DEBUG */
313
314 platform_cache_idle_exit();
315 }
316
317 ClearIdlePop(TRUE);
318
319 cpu_idle_exit();
320 }
321
322 /*
323 * Routine: cpu_idle_exit
324 * Function:
325 */
326 void
327 cpu_idle_exit(void)
328 {
329 uint64_t new_idle_timeout_ticks = 0x0ULL;
330 cpu_data_t *cpu_data_ptr = getCpuDatap();
331
332 assert(exception_stack_pointer() != 0);
333
334 /* Back from WFI, unlock OSLAR and EDLAR. */
335 configure_coresight_registers(cpu_data_ptr);
336
337 #if KPC
338 kpc_idle_exit();
339 #endif
340
341 #if MONOTONIC
342 mt_cpu_run(cpu_data_ptr);
343 #endif /* MONOTONIC */
344
345 pmap_switch_user_ttb(cpu_data_ptr->cpu_active_thread->map->pmap);
346
347 if (cpu_data_ptr->cpu_idle_notify)
348 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
349
350 if (cpu_data_ptr->idle_timer_notify != 0) {
351 if (new_idle_timeout_ticks == 0x0ULL) {
352 /* turn off the idle timer */
353 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
354 } else {
355 /* set the new idle timeout */
356 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
357 }
358 timer_resync_deadlines();
359 }
360
361 Idle_load_context();
362 }
363
364 void
365 cpu_init(void)
366 {
367 cpu_data_t *cdp = getCpuDatap();
368 arm_cpu_info_t *cpu_info_p;
369
370 assert(exception_stack_pointer() != 0);
371
372 if (cdp->cpu_type != CPU_TYPE_ARM64) {
373
374 cdp->cpu_type = CPU_TYPE_ARM64;
375
376 timer_call_queue_init(&cdp->rtclock_timer.queue);
377 cdp->rtclock_timer.deadline = EndOfAllTime;
378
379 if (cdp == &BootCpuData) {
380 do_cpuid();
381 do_cacheid();
382 do_mvfpid();
383 } else {
384 /*
385 * We initialize non-boot CPUs here; the boot CPU is
386 * dealt with as part of pmap_bootstrap.
387 */
388 pmap_cpu_data_init();
389 }
390 /* ARM_SMP: Assuming identical cpu */
391 do_debugid();
392
393 cpu_info_p = cpuid_info();
394
395 /* switch based on CPU's reported architecture */
396 switch (cpu_info_p->arm_info.arm_arch) {
397 case CPU_ARCH_ARMv8:
398 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
399 break;
400 default:
401 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
402 /* this panic doesn't work this early in startup */
403 panic("Unknown CPU subtype...");
404 break;
405 }
406
407 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
408 }
409 cdp->cpu_stat.irq_ex_cnt_wake = 0;
410 cdp->cpu_stat.ipi_cnt_wake = 0;
411 cdp->cpu_stat.timer_cnt_wake = 0;
412 cdp->cpu_running = TRUE;
413 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
414 cdp->cpu_sleep_token = 0x0UL;
415 #if KPC
416 kpc_idle_exit();
417 #endif /* KPC */
418 #if MONOTONIC
419 mt_cpu_up(cdp);
420 #endif /* MONOTONIC */
421 }
422
423 cpu_data_t *
424 cpu_data_alloc(boolean_t is_boot_cpu)
425 {
426 cpu_data_t *cpu_data_ptr = NULL;
427
428 if (is_boot_cpu)
429 cpu_data_ptr = &BootCpuData;
430 else {
431 void *irq_stack = NULL;
432 void *exc_stack = NULL;
433 void *fiq_stack = NULL;
434
435 if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS)
436 goto cpu_data_alloc_error;
437
438 bzero((void *)cpu_data_ptr, sizeof(cpu_data_t));
439
440 if ((irq_stack = kalloc(INTSTACK_SIZE)) == 0)
441 goto cpu_data_alloc_error;
442 cpu_data_ptr->intstack_top = (vm_offset_t)irq_stack + INTSTACK_SIZE ;
443 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
444
445 if ((exc_stack = kalloc(PAGE_SIZE)) == 0)
446 goto cpu_data_alloc_error;
447 cpu_data_ptr->excepstack_top = (vm_offset_t)exc_stack + PAGE_SIZE ;
448 cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
449
450 if ((fiq_stack = kalloc(PAGE_SIZE)) == 0)
451 goto cpu_data_alloc_error;
452 cpu_data_ptr->fiqstack_top = (vm_offset_t)fiq_stack + PAGE_SIZE ;
453 cpu_data_ptr->fiqstackptr = cpu_data_ptr->fiqstack_top;
454 }
455
456 cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu);
457 if (cpu_data_ptr->cpu_processor == (struct processor *)NULL)
458 goto cpu_data_alloc_error;
459
460 return cpu_data_ptr;
461
462 cpu_data_alloc_error:
463 panic("cpu_data_alloc() failed\n");
464 return (cpu_data_t *)NULL;
465 }
466
467
468 void
469 cpu_data_free(cpu_data_t *cpu_data_ptr)
470 {
471 if (cpu_data_ptr == &BootCpuData)
472 return;
473
474 cpu_processor_free( cpu_data_ptr->cpu_processor);
475 kfree( (void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
476 kfree( (void *)(cpu_data_ptr->fiqstack_top - PAGE_SIZE), PAGE_SIZE);
477 kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t));
478 }
479
480 void
481 cpu_data_init(cpu_data_t *cpu_data_ptr)
482 {
483 uint32_t i;
484
485 cpu_data_ptr->cpu_flags = 0;
486 cpu_data_ptr->interrupts_enabled = 0;
487 cpu_data_ptr->cpu_int_state = 0;
488 cpu_data_ptr->cpu_pending_ast = AST_NONE;
489 cpu_data_ptr->cpu_cache_dispatch = (void *) 0;
490 cpu_data_ptr->rtcPop = EndOfAllTime;
491 cpu_data_ptr->rtclock_datap = &RTClockData;
492 cpu_data_ptr->cpu_user_debug = NULL;
493
494
495 cpu_data_ptr->cpu_base_timebase = 0;
496 cpu_data_ptr->cpu_idle_notify = (void *) 0;
497 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
498 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
499 cpu_data_ptr->cpu_reset_type = 0x0UL;
500 cpu_data_ptr->cpu_reset_handler = 0x0UL;
501 cpu_data_ptr->cpu_reset_assist = 0x0UL;
502 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
503 cpu_data_ptr->cpu_phys_id = 0x0UL;
504 cpu_data_ptr->cpu_l2_access_penalty = 0;
505 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
506 cpu_data_ptr->cpu_cluster_id = 0;
507 cpu_data_ptr->cpu_l2_id = 0;
508 cpu_data_ptr->cpu_l2_size = 0;
509 cpu_data_ptr->cpu_l3_id = 0;
510 cpu_data_ptr->cpu_l3_size = 0;
511
512 cpu_data_ptr->cpu_signal = SIGPdisabled;
513
514 #if DEBUG || DEVELOPMENT
515 cpu_data_ptr->failed_xcall = NULL;
516 cpu_data_ptr->failed_signal = 0;
517 cpu_data_ptr->failed_signal_count = 0;
518 #endif
519
520 cpu_data_ptr->cpu_get_fiq_handler = NULL;
521 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
522 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
523 cpu_data_ptr->cpu_get_decrementer_func = NULL;
524 cpu_data_ptr->cpu_set_decrementer_func = NULL;
525 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
526 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
527 cpu_data_ptr->cpu_xcall_p0 = NULL;
528 cpu_data_ptr->cpu_xcall_p1 = NULL;
529
530 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
531 cpu_data_ptr->coresight_base[i] = 0;
532 }
533
534 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
535
536 pmap_cpu_data_ptr->cpu_user_pmap = (struct pmap *) NULL;
537 pmap_cpu_data_ptr->cpu_user_pmap_stamp = 0;
538 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
539
540 for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) {
541 pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0;
542 }
543 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
544 #if __ARM_KERNEL_PROTECT__
545 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
546 #endif /* __ARM_KERNEL_PROTECT__ */
547 }
548
549 kern_return_t
550 cpu_data_register(cpu_data_t *cpu_data_ptr)
551 {
552 int cpu = cpu_data_ptr->cpu_number;
553
554 #if KASAN
555 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
556 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
557 }
558 #endif
559
560 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
561 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys( (vm_offset_t)cpu_data_ptr);
562 return KERN_SUCCESS;
563
564 }
565
566 kern_return_t
567 cpu_start(int cpu)
568 {
569 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
570
571 kprintf("cpu_start() cpu: %d\n", cpu);
572
573 if (cpu == cpu_number()) {
574 cpu_machine_init();
575 configure_coresight_registers(cpu_data_ptr);
576 } else {
577 thread_t first_thread;
578
579 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
580
581 cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL;
582
583 if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL)
584 first_thread = cpu_data_ptr->cpu_processor->next_thread;
585 else
586 first_thread = cpu_data_ptr->cpu_processor->idle_thread;
587 cpu_data_ptr->cpu_active_thread = first_thread;
588 first_thread->machine.CpuDatap = cpu_data_ptr;
589
590 configure_coresight_registers(cpu_data_ptr);
591
592 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
593 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
594 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
595 }
596
597 return KERN_SUCCESS;
598 }
599
600
601 void
602 cpu_timebase_init(boolean_t from_boot)
603 {
604 cpu_data_t *cdp = getCpuDatap();
605
606 if (cdp->cpu_get_fiq_handler == NULL) {
607 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
608 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
609 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
610 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
611 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
612 }
613
614 if (!from_boot && (cdp == &BootCpuData)) {
615 /*
616 * When we wake from sleep, we have no guarantee about the state
617 * of the hardware timebase. It may have kept ticking across sleep, or
618 * it may have reset.
619 *
620 * To deal with this, we calculate an offset to the clock that will
621 * produce a timebase value wake_abstime at the point the boot
622 * CPU calls cpu_timebase_init on wake.
623 *
624 * This ensures that mach_absolute_time() stops ticking across sleep.
625 */
626 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
627 }
628
629 cdp->cpu_decrementer = 0x7FFFFFFFUL;
630 cdp->cpu_timebase = 0x0UL;
631 cdp->cpu_base_timebase = rtclock_base_abstime;
632 }
633
634 int
635 cpu_cluster_id(void)
636 {
637 return (getCpuDatap()->cpu_cluster_id);
638 }
639
640 __attribute__((noreturn))
641 void
642 ml_arm_sleep(void)
643 {
644 cpu_data_t *cpu_data_ptr = getCpuDatap();
645
646 if (cpu_data_ptr == &BootCpuData) {
647 cpu_data_t *target_cdp;
648 int cpu;
649 int max_cpu;
650
651 max_cpu = ml_get_max_cpu_number();
652 for (cpu=0; cpu <= max_cpu; cpu++) {
653 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
654
655 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr))
656 continue;
657
658 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH);
659 }
660
661 /*
662 * Now that the other cores have entered the sleep path, set
663 * the abstime value we'll use when we resume.
664 */
665 wake_abstime = ml_get_timebase();
666 } else {
667 CleanPoU_Dcache();
668 }
669
670 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
671
672 if (cpu_data_ptr == &BootCpuData) {
673 #if WITH_CLASSIC_S2R
674 // Classic suspend to RAM writes the suspend signature into the
675 // sleep token buffer so that iBoot knows that it's on the warm
676 // boot (wake) path (as opposed to the cold boot path). Newer SoC
677 // do not go through SecureROM/iBoot on the warm boot path. The
678 // reconfig engine script brings the CPU out of reset at the kernel's
679 // reset vector which points to the warm boot initialization code.
680 if(sleepTokenBuffer != (vm_offset_t) NULL) {
681 platform_cache_shutdown();
682 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
683 }
684 else {
685 panic("No sleep token buffer");
686 }
687 #endif
688
689 #if __ARM_GLOBAL_SLEEP_BIT__
690 /* Allow other CPUs to go to sleep. */
691 arm64_stall_sleep = FALSE;
692 __builtin_arm_dmb(DMB_ISH);
693 #endif
694
695 /* Architectural debug state: <rdar://problem/12390433>:
696 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
697 * tell debugger to not prevent power gating .
698 */
699 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
700 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
701 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
702 }
703
704 #if MONOTONIC
705 mt_sleep();
706 #endif /* MONOTONIC */
707 /* ARM64-specific preparation */
708 arm64_prepare_for_sleep();
709 } else {
710 #if __ARM_GLOBAL_SLEEP_BIT__
711 /*
712 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
713 * global register to manage entering deep sleep, as opposed to a per-CPU
714 * register. We cannot update this register until all CPUs are ready to enter
715 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
716 * (by idling), it will hang (due to the side effects of enabling deep sleep),
717 * which can hang the sleep process or cause memory corruption on wake.
718 *
719 * To avoid these issues, we'll stall on this global value, which CPU0 will
720 * manage.
721 */
722 while (arm64_stall_sleep) {
723 __builtin_arm_wfe();
724 }
725 #endif
726 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
727
728 /* Architectural debug state: <rdar://problem/12390433>:
729 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
730 * tell debugger to not prevent power gating .
731 */
732 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
733 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
734 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
735 }
736
737 /* ARM64-specific preparation */
738 arm64_prepare_for_sleep();
739 }
740 }
741
742 void
743 cpu_machine_idle_init(boolean_t from_boot)
744 {
745 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
746 cpu_data_t *cpu_data_ptr = getCpuDatap();
747
748 if (from_boot) {
749 unsigned long jtag = 0;
750 int wfi_tmp = 1;
751 uint32_t production = 1;
752 DTEntry entry;
753
754 if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) {
755 if (jtag != 0)
756 idle_enable = FALSE;
757 else
758 idle_enable = TRUE;
759 } else
760 idle_enable = TRUE;
761
762 PE_parse_boot_argn("wfi", &wfi_tmp, sizeof (wfi_tmp));
763
764 // bits 7..0 give the wfi type
765 switch (wfi_tmp & 0xff) {
766 case 0 :
767 // disable wfi
768 wfi = 0;
769 break;
770
771 #if DEVELOPMENT || DEBUG
772 case 2 :
773 // wfi overhead simulation
774 // 31..16 - wfi delay is us
775 // 15..8 - flags
776 // 7..0 - 2
777 wfi = 2;
778 wfi_flags = (wfi_tmp >> 8) & 0xFF;
779 nanoseconds_to_absolutetime(((wfi_tmp >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
780 break;
781 #endif /* DEVELOPMENT || DEBUG */
782
783 case 1 :
784 default :
785 // do nothing
786 break;
787 }
788
789 ResetHandlerData.assist_reset_handler = 0;
790 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
791
792 #ifdef MONITOR
793 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
794 #elif !defined(NO_MONITOR)
795 #error MONITOR undefined, WFI power gating may not operate correctly
796 #endif /* MONITOR */
797
798 // Determine if we are on production or debug chip
799 if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) {
800 unsigned int size;
801 void *prop;
802
803 if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size))
804 if (size == 4)
805 bcopy(prop, &production, size);
806 }
807 if (!production) {
808 #if defined(APPLE_ARM64_ARCH_FAMILY)
809 // Enable coresight debug registers on debug-fused chips
810 coresight_debug_enabled = TRUE;
811 #endif
812 }
813
814 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
815 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
816 }
817
818 #if WITH_CLASSIC_S2R
819 if (cpu_data_ptr == &BootCpuData) {
820 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
821 if (sleepTokenBuffer != (vm_offset_t) NULL) {
822 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
823 }
824 else {
825 panic("No sleep token buffer");
826 }
827
828 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
829 SleepToken_low_paddr, sizeof(SleepToken));
830 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
831 };
832 #endif
833
834 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
835 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
836 }
837
838 _Atomic uint32_t cpu_idle_count = 0;
839
840 void
841 machine_track_platform_idle(boolean_t entry)
842 {
843 if (entry)
844 (void)__c11_atomic_fetch_add(&cpu_idle_count, 1, __ATOMIC_RELAXED);
845 else
846 (void)__c11_atomic_fetch_sub(&cpu_idle_count, 1, __ATOMIC_RELAXED);
847 }
848
849 #if WITH_CLASSIC_S2R
850 void
851 sleep_token_buffer_init(void)
852 {
853 cpu_data_t *cpu_data_ptr = getCpuDatap();
854 DTEntry entry;
855 size_t size;
856 void **prop;
857
858 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
859 /* Find the stpage node in the device tree */
860 if (kSuccess != DTLookupEntry(0, "stram", &entry))
861 return;
862
863 if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size))
864 return;
865
866 /* Map the page into the kernel space */
867 sleepTokenBuffer = ml_io_map(((vm_offset_t *)prop)[0], ((vm_size_t *)prop)[1]);
868 }
869 }
870 #endif
871