]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/cpu.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / arm64 / cpu.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34#include <pexpert/arm64/board_config.h>
35#include <kern/kalloc.h>
36#include <kern/machine.h>
37#include <kern/cpu_number.h>
38#include <kern/thread.h>
39#include <kern/timer_queue.h>
40#include <arm/cpu_data.h>
41#include <arm/cpuid.h>
42#include <arm/caches_internal.h>
43#include <arm/cpu_data_internal.h>
44#include <arm/cpu_internal.h>
45#include <arm/misc_protos.h>
46#include <arm/machine_cpu.h>
47#include <arm/rtclock.h>
48#include <arm64/proc_reg.h>
49#include <mach/processor_info.h>
50#include <vm/pmap.h>
51#include <vm/vm_kern.h>
52#include <vm/vm_map.h>
53#include <pexpert/arm/protos.h>
54#include <pexpert/device_tree.h>
55#include <sys/kdebug.h>
56#include <arm/machine_routines.h>
57
58#include <machine/atomic.h>
59
60#include <san/kasan.h>
61
62#if KPC
63#include <kern/kpc.h>
64#endif
65
66#if MONOTONIC
67#include <kern/monotonic.h>
68#endif /* MONOTONIC */
69
70extern boolean_t idle_enable;
71extern uint64_t wake_abstime;
72
73#if WITH_CLASSIC_S2R
74void sleep_token_buffer_init(void);
75#endif
76
77
78extern uintptr_t resume_idle_cpu;
79extern uintptr_t start_cpu;
80
81extern void __attribute__((noreturn)) arm64_prepare_for_sleep(void);
82extern void arm64_force_wfi_clock_gate(void);
83#if (defined(APPLECYCLONE) || defined(APPLETYPHOON))
84// <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
85extern void cyclone_typhoon_prepare_for_wfi(void);
86extern void cyclone_typhoon_return_from_wfi(void);
87#endif
88
89
90vm_address_t start_cpu_paddr;
91
92sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
93 .tcr_el1 = TCR_EL1_BOOT,
94};
95
96
97// wfi - wfi mode
98// 0 : disabled
99// 1 : normal
100// 2 : overhead simulation (delay & flags)
101static int wfi = 1;
102
103#if DEVELOPMENT || DEBUG
104
105// wfi_flags
106// 1 << 0 : flush L1s
107// 1 << 1 : flush TLBs
108static int wfi_flags = 0;
109
110// wfi_delay - delay ticks after wfi exit
111static uint64_t wfi_delay = 0;
112
113#endif /* DEVELOPMENT || DEBUG */
114
115#if __ARM_GLOBAL_SLEEP_BIT__
116volatile boolean_t arm64_stall_sleep = TRUE;
117#endif
118
119#if WITH_CLASSIC_S2R
120/*
121 * These must be aligned to avoid issues with calling bcopy_phys on them before
122 * we are done with pmap initialization.
123 */
124static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
125static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
126#endif
127
128#if WITH_CLASSIC_S2R
129static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
130#endif
131static boolean_t coresight_debug_enabled = FALSE;
132
133
134static void
135configure_coresight_registers(cpu_data_t *cdp)
136{
137 uint64_t addr;
138 int i;
139
140 assert(cdp);
141
142 /*
143 * ARMv8 coresight registers are optional. If the device tree did not
144 * provide cpu_regmap_paddr, assume that coresight registers are not
145 * supported.
146 */
147 if (cdp->cpu_regmap_paddr) {
148 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
149 /* Skip CTI; these registers are debug-only (they are
150 * not present on production hardware), and there is
151 * at least one known Cyclone errata involving CTI
152 * (rdar://12802966). We have no known clients that
153 * need the kernel to unlock CTI, so it is safer
154 * to avoid doing the access.
155 */
156 if (i == CORESIGHT_CTI)
157 continue;
158 /* Skip debug-only registers on production chips */
159 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled)
160 continue;
161
162 if (!cdp->coresight_base[i]) {
163 addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
164 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
165
166 /*
167 * At this point, failing to io map the
168 * registers is considered as an error.
169 */
170 if (!cdp->coresight_base[i]) {
171 panic("unable to ml_io_map coresight regions");
172 }
173 }
174 /* Unlock EDLAR, CTILAR, PMLAR */
175 if (i != CORESIGHT_UTT)
176 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
177 }
178 }
179}
180
181
182/*
183 * Routine: cpu_bootstrap
184 * Function:
185 */
186void
187cpu_bootstrap(void)
188{
189}
190
191/*
192 * Routine: cpu_sleep
193 * Function:
194 */
195void
196cpu_sleep(void)
197{
198 cpu_data_t *cpu_data_ptr = getCpuDatap();
199
200 pmap_switch_user_ttb(kernel_pmap);
201 cpu_data_ptr->cpu_active_thread = current_thread();
202 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
203 cpu_data_ptr->cpu_flags |= SleepState;
204 cpu_data_ptr->cpu_user_debug = NULL;
205#if KPC
206 kpc_idle();
207#endif /* KPC */
208#if MONOTONIC
209 mt_cpu_down(cpu_data_ptr);
210#endif /* MONOTONIC */
211
212 CleanPoC_Dcache();
213
214 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
215
216}
217
218/*
219 * Routine: cpu_idle
220 * Function:
221 */
222void __attribute__((noreturn))
223cpu_idle(void)
224{
225 cpu_data_t *cpu_data_ptr = getCpuDatap();
226 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
227
228 if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled))
229 Idle_load_context();
230 if (!SetIdlePop())
231 Idle_load_context();
232 lastPop = cpu_data_ptr->rtcPop;
233
234 pmap_switch_user_ttb(kernel_pmap);
235 cpu_data_ptr->cpu_active_thread = current_thread();
236 if (cpu_data_ptr->cpu_user_debug)
237 arm_debug_set(NULL);
238 cpu_data_ptr->cpu_user_debug = NULL;
239
240 if (cpu_data_ptr->cpu_idle_notify)
241 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
242
243 if (cpu_data_ptr->idle_timer_notify != 0) {
244 if (new_idle_timeout_ticks == 0x0ULL) {
245 /* turn off the idle timer */
246 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
247 } else {
248 /* set the new idle timeout */
249 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
250 }
251 timer_resync_deadlines();
252 if (cpu_data_ptr->rtcPop != lastPop)
253 SetIdlePop();
254 }
255
256#if KPC
257 kpc_idle();
258#endif
259#if MONOTONIC
260 mt_cpu_idle(cpu_data_ptr);
261#endif /* MONOTONIC */
262
263 if (wfi) {
264 platform_cache_idle_enter();
265
266#if DEVELOPMENT || DEBUG
267 // When simulating wfi overhead,
268 // force wfi to clock gating only
269 if (wfi == 2) {
270 arm64_force_wfi_clock_gate();
271 }
272#endif /* DEVELOPMENT || DEBUG */
273
274#if defined(APPLECYCLONE) || defined(APPLETYPHOON)
275 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
276 cyclone_typhoon_prepare_for_wfi();
277#endif
278 __builtin_arm_dsb(DSB_SY);
279 __builtin_arm_wfi();
280
281#if defined(APPLECYCLONE) || defined(APPLETYPHOON)
282 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
283 cyclone_typhoon_return_from_wfi();
284#endif
285
286#if DEVELOPMENT || DEBUG
287 // Handle wfi overhead simulation
288 if (wfi == 2) {
289 uint64_t deadline;
290
291 // Calculate wfi delay deadline
292 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
293
294 // Flush L1 caches
295 if ((wfi_flags & 1) != 0) {
296 InvalidatePoU_Icache();
297 FlushPoC_Dcache();
298 }
299
300 // Flush TLBs
301 if ((wfi_flags & 2) != 0) {
302 flush_core_tlb();
303 }
304
305 // Wait for the ballance of the wfi delay
306 clock_delay_until(deadline);
307 }
308#endif /* DEVELOPMENT || DEBUG */
309
310 platform_cache_idle_exit();
311 }
312
313 ClearIdlePop(TRUE);
314
315 cpu_idle_exit();
316}
317
318/*
319 * Routine: cpu_idle_exit
320 * Function:
321 */
322void
323cpu_idle_exit(void)
324{
325 uint64_t new_idle_timeout_ticks = 0x0ULL;
326 cpu_data_t *cpu_data_ptr = getCpuDatap();
327
328 assert(exception_stack_pointer() != 0);
329
330 /* Back from WFI, unlock OSLAR and EDLAR. */
331 configure_coresight_registers(cpu_data_ptr);
332
333#if KPC
334 kpc_idle_exit();
335#endif
336
337#if MONOTONIC
338 mt_cpu_run(cpu_data_ptr);
339#endif /* MONOTONIC */
340
341 pmap_switch_user_ttb(cpu_data_ptr->cpu_active_thread->map->pmap);
342
343 if (cpu_data_ptr->cpu_idle_notify)
344 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
345
346 if (cpu_data_ptr->idle_timer_notify != 0) {
347 if (new_idle_timeout_ticks == 0x0ULL) {
348 /* turn off the idle timer */
349 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
350 } else {
351 /* set the new idle timeout */
352 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
353 }
354 timer_resync_deadlines();
355 }
356
357 Idle_load_context();
358}
359
360void
361cpu_init(void)
362{
363 cpu_data_t *cdp = getCpuDatap();
364 arm_cpu_info_t *cpu_info_p;
365
366 assert(exception_stack_pointer() != 0);
367
368 if (cdp->cpu_type != CPU_TYPE_ARM64) {
369
370 cdp->cpu_type = CPU_TYPE_ARM64;
371
372 timer_call_queue_init(&cdp->rtclock_timer.queue);
373 cdp->rtclock_timer.deadline = EndOfAllTime;
374
375 if (cdp == &BootCpuData) {
376 do_cpuid();
377 do_cacheid();
378 do_mvfpid();
379 } else {
380 /*
381 * We initialize non-boot CPUs here; the boot CPU is
382 * dealt with as part of pmap_bootstrap.
383 */
384 pmap_cpu_data_init();
385 }
386 /* ARM_SMP: Assuming identical cpu */
387 do_debugid();
388
389 cpu_info_p = cpuid_info();
390
391 /* switch based on CPU's reported architecture */
392 switch (cpu_info_p->arm_info.arm_arch) {
393 case CPU_ARCH_ARMv8:
394 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
395 break;
396 default:
397 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
398 /* this panic doesn't work this early in startup */
399 panic("Unknown CPU subtype...");
400 break;
401 }
402
403 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
404 }
405 cdp->cpu_stat.irq_ex_cnt_wake = 0;
406 cdp->cpu_stat.ipi_cnt_wake = 0;
407 cdp->cpu_stat.timer_cnt_wake = 0;
408 cdp->cpu_running = TRUE;
409 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
410 cdp->cpu_sleep_token = 0x0UL;
411#if KPC
412 kpc_idle_exit();
413#endif /* KPC */
414#if MONOTONIC
415 mt_cpu_up(cdp);
416#endif /* MONOTONIC */
417}
418
419cpu_data_t *
420cpu_data_alloc(boolean_t is_boot_cpu)
421{
422 cpu_data_t *cpu_data_ptr = NULL;
423
424 if (is_boot_cpu)
425 cpu_data_ptr = &BootCpuData;
426 else {
427 void *irq_stack = NULL;
428 void *exc_stack = NULL;
429 void *fiq_stack = NULL;
430
431 if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS)
432 goto cpu_data_alloc_error;
433
434 bzero((void *)cpu_data_ptr, sizeof(cpu_data_t));
435
436 if ((irq_stack = kalloc(INTSTACK_SIZE)) == 0)
437 goto cpu_data_alloc_error;
438 cpu_data_ptr->intstack_top = (vm_offset_t)irq_stack + INTSTACK_SIZE ;
439 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
440
441 if ((exc_stack = kalloc(PAGE_SIZE)) == 0)
442 goto cpu_data_alloc_error;
443 cpu_data_ptr->excepstack_top = (vm_offset_t)exc_stack + PAGE_SIZE ;
444 cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
445
446 if ((fiq_stack = kalloc(PAGE_SIZE)) == 0)
447 goto cpu_data_alloc_error;
448 cpu_data_ptr->fiqstack_top = (vm_offset_t)fiq_stack + PAGE_SIZE ;
449 cpu_data_ptr->fiqstackptr = cpu_data_ptr->fiqstack_top;
450 }
451
452 cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu);
453 if (cpu_data_ptr->cpu_processor == (struct processor *)NULL)
454 goto cpu_data_alloc_error;
455
456 return cpu_data_ptr;
457
458cpu_data_alloc_error:
459 panic("cpu_data_alloc() failed\n");
460 return (cpu_data_t *)NULL;
461}
462
463
464void
465cpu_data_free(cpu_data_t *cpu_data_ptr)
466{
467 if (cpu_data_ptr == &BootCpuData)
468 return;
469
470 cpu_processor_free( cpu_data_ptr->cpu_processor);
471 kfree( (void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
472 kfree( (void *)(cpu_data_ptr->fiqstack_top - PAGE_SIZE), PAGE_SIZE);
473 kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t));
474}
475
476void
477cpu_data_init(cpu_data_t *cpu_data_ptr)
478{
479 uint32_t i;
480
481 cpu_data_ptr->cpu_flags = 0;
482 cpu_data_ptr->interrupts_enabled = 0;
483 cpu_data_ptr->cpu_int_state = 0;
484 cpu_data_ptr->cpu_pending_ast = AST_NONE;
485 cpu_data_ptr->cpu_cache_dispatch = (void *) 0;
486 cpu_data_ptr->rtcPop = EndOfAllTime;
487 cpu_data_ptr->rtclock_datap = &RTClockData;
488 cpu_data_ptr->cpu_user_debug = NULL;
489
490
491 cpu_data_ptr->cpu_base_timebase = 0;
492 cpu_data_ptr->cpu_idle_notify = (void *) 0;
493 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
494 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
495 cpu_data_ptr->cpu_reset_type = 0x0UL;
496 cpu_data_ptr->cpu_reset_handler = 0x0UL;
497 cpu_data_ptr->cpu_reset_assist = 0x0UL;
498 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
499 cpu_data_ptr->cpu_phys_id = 0x0UL;
500 cpu_data_ptr->cpu_l2_access_penalty = 0;
501 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
502 cpu_data_ptr->cpu_cluster_id = 0;
503 cpu_data_ptr->cpu_l2_id = 0;
504 cpu_data_ptr->cpu_l2_size = 0;
505 cpu_data_ptr->cpu_l3_id = 0;
506 cpu_data_ptr->cpu_l3_size = 0;
507
508 cpu_data_ptr->cpu_signal = SIGPdisabled;
509
510#if DEBUG || DEVELOPMENT
511 cpu_data_ptr->failed_xcall = NULL;
512 cpu_data_ptr->failed_signal = 0;
513 cpu_data_ptr->failed_signal_count = 0;
514#endif
515
516 cpu_data_ptr->cpu_get_fiq_handler = NULL;
517 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
518 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
519 cpu_data_ptr->cpu_get_decrementer_func = NULL;
520 cpu_data_ptr->cpu_set_decrementer_func = NULL;
521 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
522 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
523 cpu_data_ptr->cpu_xcall_p0 = NULL;
524 cpu_data_ptr->cpu_xcall_p1 = NULL;
525
526 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
527 cpu_data_ptr->coresight_base[i] = 0;
528 }
529
530 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
531
532 pmap_cpu_data_ptr->cpu_user_pmap = (struct pmap *) NULL;
533 pmap_cpu_data_ptr->cpu_user_pmap_stamp = 0;
534 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
535
536 for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) {
537 pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0;
538 }
539 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
540}
541
542kern_return_t
543cpu_data_register(cpu_data_t *cpu_data_ptr)
544{
545 int cpu = cpu_data_ptr->cpu_number;
546
547#if KASAN
548 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
549 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
550 }
551#endif
552
553 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
554 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys( (vm_offset_t)cpu_data_ptr);
555 return KERN_SUCCESS;
556
557}
558
559kern_return_t
560cpu_start(int cpu)
561{
562 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
563
564 kprintf("cpu_start() cpu: %d\n", cpu);
565
566 if (cpu == cpu_number()) {
567 cpu_machine_init();
568 configure_coresight_registers(cpu_data_ptr);
569 } else {
570 thread_t first_thread;
571
572 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
573
574 cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL;
575
576 if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL)
577 first_thread = cpu_data_ptr->cpu_processor->next_thread;
578 else
579 first_thread = cpu_data_ptr->cpu_processor->idle_thread;
580 cpu_data_ptr->cpu_active_thread = first_thread;
581 first_thread->machine.CpuDatap = cpu_data_ptr;
582
583 configure_coresight_registers(cpu_data_ptr);
584
585 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
586 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
587 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
588 }
589
590 return KERN_SUCCESS;
591}
592
593
594void
595cpu_timebase_init(boolean_t from_boot)
596{
597 cpu_data_t *cdp = getCpuDatap();
598
599 if (cdp->cpu_get_fiq_handler == NULL) {
600 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
601 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
602 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
603 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
604 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
605 }
606
607 if (!from_boot && (cdp == &BootCpuData)) {
608 /*
609 * When we wake from sleep, we have no guarantee about the state
610 * of the hardware timebase. It may have kept ticking across sleep, or
611 * it may have reset.
612 *
613 * To deal with this, we calculate an offset to the clock that will
614 * produce a timebase value wake_abstime at the point the boot
615 * CPU calls cpu_timebase_init on wake.
616 *
617 * This ensures that mach_absolute_time() stops ticking across sleep.
618 */
619 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
620 }
621
622 cdp->cpu_decrementer = 0x7FFFFFFFUL;
623 cdp->cpu_timebase = 0x0UL;
624 cdp->cpu_base_timebase = rtclock_base_abstime;
625}
626
627int
628cpu_cluster_id(void)
629{
630 return (getCpuDatap()->cpu_cluster_id);
631}
632
633__attribute__((noreturn))
634void
635ml_arm_sleep(void)
636{
637 cpu_data_t *cpu_data_ptr = getCpuDatap();
638
639 if (cpu_data_ptr == &BootCpuData) {
640 cpu_data_t *target_cdp;
641 int cpu;
642 int max_cpu;
643
644 max_cpu = ml_get_max_cpu_number();
645 for (cpu=0; cpu <= max_cpu; cpu++) {
646 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
647
648 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr))
649 continue;
650
651 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH);
652 }
653
654 /*
655 * Now that the other cores have entered the sleep path, set
656 * the abstime value we'll use when we resume.
657 */
658 wake_abstime = ml_get_timebase();
659 } else {
660 CleanPoU_Dcache();
661 }
662
663 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
664
665 if (cpu_data_ptr == &BootCpuData) {
666#if WITH_CLASSIC_S2R
667 // Classic suspend to RAM writes the suspend signature into the
668 // sleep token buffer so that iBoot knows that it's on the warm
669 // boot (wake) path (as opposed to the cold boot path). Newer SoC
670 // do not go through SecureROM/iBoot on the warm boot path. The
671 // reconfig engine script brings the CPU out of reset at the kernel's
672 // reset vector which points to the warm boot initialization code.
673 if(sleepTokenBuffer != (vm_offset_t) NULL) {
674 platform_cache_shutdown();
675 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
676 }
677 else {
678 panic("No sleep token buffer");
679 }
680#endif
681
682#if __ARM_GLOBAL_SLEEP_BIT__
683 /* Allow other CPUs to go to sleep. */
684 arm64_stall_sleep = FALSE;
685 __builtin_arm_dmb(DMB_ISH);
686#endif
687
688 /* Architectural debug state: <rdar://problem/12390433>:
689 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
690 * tell debugger to not prevent power gating .
691 */
692 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
693 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
694 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
695 }
696
697#if MONOTONIC
698 mt_sleep();
699#endif /* MONOTONIC */
700 /* ARM64-specific preparation */
701 arm64_prepare_for_sleep();
702 } else {
703#if __ARM_GLOBAL_SLEEP_BIT__
704 /*
705 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
706 * global register to manage entering deep sleep, as opposed to a per-CPU
707 * register. We cannot update this register until all CPUs are ready to enter
708 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
709 * (by idling), it will hang (due to the side effects of enabling deep sleep),
710 * which can hang the sleep process or cause memory corruption on wake.
711 *
712 * To avoid these issues, we'll stall on this global value, which CPU0 will
713 * manage.
714 */
715 while (arm64_stall_sleep) {
716 __builtin_arm_wfe();
717 }
718#endif
719 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
720
721 /* Architectural debug state: <rdar://problem/12390433>:
722 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
723 * tell debugger to not prevent power gating .
724 */
725 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
726 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
727 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
728 }
729
730 /* ARM64-specific preparation */
731 arm64_prepare_for_sleep();
732 }
733}
734
735void
736cpu_machine_idle_init(boolean_t from_boot)
737{
738 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
739 cpu_data_t *cpu_data_ptr = getCpuDatap();
740
741 if (from_boot) {
742 unsigned long jtag = 0;
743 int wfi_tmp = 1;
744 uint32_t production = 1;
745 DTEntry entry;
746
747 if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) {
748 if (jtag != 0)
749 idle_enable = FALSE;
750 else
751 idle_enable = TRUE;
752 } else
753 idle_enable = TRUE;
754
755 PE_parse_boot_argn("wfi", &wfi_tmp, sizeof (wfi_tmp));
756
757 // bits 7..0 give the wfi type
758 switch (wfi_tmp & 0xff) {
759 case 0 :
760 // disable wfi
761 wfi = 0;
762 break;
763
764#if DEVELOPMENT || DEBUG
765 case 2 :
766 // wfi overhead simulation
767 // 31..16 - wfi delay is us
768 // 15..8 - flags
769 // 7..0 - 2
770 wfi = 2;
771 wfi_flags = (wfi_tmp >> 8) & 0xFF;
772 nanoseconds_to_absolutetime(((wfi_tmp >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
773 break;
774#endif /* DEVELOPMENT || DEBUG */
775
776 case 1 :
777 default :
778 // do nothing
779 break;
780 }
781
782 ResetHandlerData.assist_reset_handler = 0;
783 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
784
785#ifdef MONITOR
786 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
787#elif !defined(NO_MONITOR)
788#error MONITOR undefined, WFI power gating may not operate correctly
789#endif /* MONITOR */
790
791 // Determine if we are on production or debug chip
792 if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) {
793 unsigned int size;
794 void *prop;
795
796 if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size))
797 if (size == 4)
798 bcopy(prop, &production, size);
799 }
800 if (!production) {
801#if defined(APPLE_ARM64_ARCH_FAMILY)
802 // Enable coresight debug registers on debug-fused chips
803 coresight_debug_enabled = TRUE;
804#endif
805 }
806
807 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
808 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
809 }
810
811#if WITH_CLASSIC_S2R
812 if (cpu_data_ptr == &BootCpuData) {
813 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
814 if (sleepTokenBuffer != (vm_offset_t) NULL) {
815 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
816 }
817 else {
818 panic("No sleep token buffer");
819 }
820
821 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
822 SleepToken_low_paddr, sizeof(SleepToken));
823 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
824 };
825#endif
826
827 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
828 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
829}
830
831_Atomic uint32_t cpu_idle_count = 0;
832
833void
834machine_track_platform_idle(boolean_t entry)
835{
836 if (entry)
837 (void)__c11_atomic_fetch_add(&cpu_idle_count, 1, __ATOMIC_RELAXED);
838 else
839 (void)__c11_atomic_fetch_sub(&cpu_idle_count, 1, __ATOMIC_RELAXED);
840}
841
842#if WITH_CLASSIC_S2R
843void
844sleep_token_buffer_init(void)
845{
846 cpu_data_t *cpu_data_ptr = getCpuDatap();
847 DTEntry entry;
848 size_t size;
849 void **prop;
850
851 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
852 /* Find the stpage node in the device tree */
853 if (kSuccess != DTLookupEntry(0, "stram", &entry))
854 return;
855
856 if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size))
857 return;
858
859 /* Map the page into the kernel space */
860 sleepTokenBuffer = ml_io_map(((vm_offset_t *)prop)[0], ((vm_size_t *)prop)[1]);
861 }
862}
863#endif
864