]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/cpu.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / arm64 / cpu.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * File: arm64/cpu.c
30 *
31 * cpu specific routines
32 */
33
34#include <pexpert/arm64/board_config.h>
35#include <kern/kalloc.h>
36#include <kern/machine.h>
37#include <kern/cpu_number.h>
f427ee49 38#include <kern/percpu.h>
5ba3f43e
A
39#include <kern/thread.h>
40#include <kern/timer_queue.h>
41#include <arm/cpu_data.h>
42#include <arm/cpuid.h>
43#include <arm/caches_internal.h>
44#include <arm/cpu_data_internal.h>
45#include <arm/cpu_internal.h>
46#include <arm/misc_protos.h>
47#include <arm/machine_cpu.h>
48#include <arm/rtclock.h>
49#include <arm64/proc_reg.h>
50#include <mach/processor_info.h>
51#include <vm/pmap.h>
52#include <vm/vm_kern.h>
53#include <vm/vm_map.h>
54#include <pexpert/arm/protos.h>
55#include <pexpert/device_tree.h>
56#include <sys/kdebug.h>
57#include <arm/machine_routines.h>
58
59#include <machine/atomic.h>
60
61#include <san/kasan.h>
62
63#if KPC
64#include <kern/kpc.h>
65#endif
66
67#if MONOTONIC
68#include <kern/monotonic.h>
69#endif /* MONOTONIC */
70
f427ee49
A
71#if HIBERNATION
72#include <IOKit/IOPlatformExpert.h>
73#include <IOKit/IOHibernatePrivate.h>
74#endif /* HIBERNATION */
75
76
77#include <libkern/section_keywords.h>
78
0a7de745
A
79extern boolean_t idle_enable;
80extern uint64_t wake_abstime;
5ba3f43e
A
81
82#if WITH_CLASSIC_S2R
83void sleep_token_buffer_init(void);
84#endif
85
86
87extern uintptr_t resume_idle_cpu;
88extern uintptr_t start_cpu;
89
5c9f4661
A
90#if __ARM_KERNEL_PROTECT__
91extern void exc_vectors_table;
92#endif /* __ARM_KERNEL_PROTECT__ */
93
f427ee49 94extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep);
5ba3f43e 95extern void arm64_force_wfi_clock_gate(void);
cb323159
A
96#if defined(APPLETYPHOON)
97// <rdar://problem/15827409>
98extern void typhoon_prepare_for_wfi(void);
99extern void typhoon_return_from_wfi(void);
5ba3f43e
A
100#endif
101
c6bf4f31
A
102#if HAS_RETENTION_STATE
103extern void arm64_retention_wfi(void);
104#endif
5ba3f43e
A
105
106vm_address_t start_cpu_paddr;
107
108sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
109 .tcr_el1 = TCR_EL1_BOOT,
110};
111
112
113// wfi - wfi mode
114// 0 : disabled
115// 1 : normal
116// 2 : overhead simulation (delay & flags)
117static int wfi = 1;
118
119#if DEVELOPMENT || DEBUG
120
121// wfi_flags
122// 1 << 0 : flush L1s
123// 1 << 1 : flush TLBs
124static int wfi_flags = 0;
125
126// wfi_delay - delay ticks after wfi exit
127static uint64_t wfi_delay = 0;
128
129#endif /* DEVELOPMENT || DEBUG */
f427ee49
A
130#if DEVELOPMENT || DEBUG
131static bool idle_proximate_timer_wfe = true;
132static bool idle_proximate_io_wfe = true;
133#define CPUPM_IDLE_WFE 0x5310300
134#else
135static const bool idle_proximate_timer_wfe = true;
136static const bool idle_proximate_io_wfe = true;
137#endif
cb323159 138
5ba3f43e
A
139#if __ARM_GLOBAL_SLEEP_BIT__
140volatile boolean_t arm64_stall_sleep = TRUE;
141#endif
142
143#if WITH_CLASSIC_S2R
144/*
145 * These must be aligned to avoid issues with calling bcopy_phys on them before
146 * we are done with pmap initialization.
147 */
148static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
149static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
150#endif
151
152#if WITH_CLASSIC_S2R
153static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
154#endif
155static boolean_t coresight_debug_enabled = FALSE;
156
d9a64523
A
157#if defined(CONFIG_XNUPOST)
158void arm64_ipi_test_callback(void *);
cb323159 159void arm64_immediate_ipi_test_callback(void *);
d9a64523 160
0a7de745
A
161void
162arm64_ipi_test_callback(void *parm)
163{
d9a64523
A
164 volatile uint64_t *ipi_test_data = parm;
165 cpu_data_t *cpu_data;
166
167 cpu_data = getCpuDatap();
168
169 *ipi_test_data = cpu_data->cpu_number;
170}
171
cb323159
A
172void
173arm64_immediate_ipi_test_callback(void *parm)
174{
175 volatile uint64_t *ipi_test_data = parm;
176 cpu_data_t *cpu_data;
177
178 cpu_data = getCpuDatap();
179
180 *ipi_test_data = cpu_data->cpu_number + MAX_CPUS;
181}
182
183uint64_t arm64_ipi_test_data[MAX_CPUS * 2];
d9a64523 184
0a7de745
A
185void
186arm64_ipi_test()
187{
cb323159 188 volatile uint64_t *ipi_test_data, *immediate_ipi_test_data;
d9a64523
A
189 uint32_t timeout_ms = 100;
190 uint64_t then, now, delta;
191 int current_cpu_number = getCpuDatap()->cpu_number;
192
193 /*
194 * probably the only way to have this on most systems is with the
195 * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active,
196 * IPI is not available
197 */
198 if (real_ncpus == 1) {
199 return;
200 }
201
f427ee49
A
202 const unsigned int max_cpu_id = ml_get_max_cpu_number();
203 for (unsigned int i = 0; i <= max_cpu_id; ++i) {
d9a64523 204 ipi_test_data = &arm64_ipi_test_data[i];
cb323159 205 immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS];
d9a64523
A
206 *ipi_test_data = ~i;
207 kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
0a7de745 208 if (error != KERN_SUCCESS) {
d9a64523 209 panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
0a7de745 210 }
d9a64523 211
cb323159
A
212 while ((error = cpu_immediate_xcall((int)i, (void *)arm64_immediate_ipi_test_callback,
213 (void *)(uintptr_t)immediate_ipi_test_data)) == KERN_ALREADY_WAITING) {
214 now = mach_absolute_time();
215 absolutetime_to_nanoseconds(now - then, &delta);
216 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
217 panic("CPU %d was unable to immediate-IPI CPU %u within %dms", current_cpu_number, i, timeout_ms);
218 }
219 }
220
221 if (error != KERN_SUCCESS) {
222 panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number, i, error);
223 }
224
d9a64523
A
225 then = mach_absolute_time();
226
cb323159 227 while ((*ipi_test_data != i) || (*immediate_ipi_test_data != (i + MAX_CPUS))) {
d9a64523 228 now = mach_absolute_time();
0a7de745 229 absolutetime_to_nanoseconds(now - then, &delta);
d9a64523 230 if ((delta / NSEC_PER_MSEC) > timeout_ms) {
cb323159
A
231 panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %dms, responses: %llx, %llx",
232 current_cpu_number, i, timeout_ms, *ipi_test_data, *immediate_ipi_test_data);
d9a64523
A
233 }
234 }
235 }
d9a64523
A
236}
237#endif /* defined(CONFIG_XNUPOST) */
5ba3f43e
A
238
239static void
240configure_coresight_registers(cpu_data_t *cdp)
241{
0a7de745 242 int i;
5ba3f43e
A
243
244 assert(cdp);
f427ee49 245 vm_offset_t coresight_regs = ml_get_topology_info()->cpus[cdp->cpu_number].coresight_regs;
5ba3f43e
A
246
247 /*
248 * ARMv8 coresight registers are optional. If the device tree did not
f427ee49
A
249 * provide either cpu_regmap_paddr (from the legacy "reg-private" EDT property)
250 * or coresight_regs (from the new "coresight-reg" property), assume that
251 * coresight registers are not supported.
5ba3f43e 252 */
f427ee49 253 if (cdp->cpu_regmap_paddr || coresight_regs) {
5ba3f43e 254 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
0a7de745 255 if (i == CORESIGHT_CTI) {
5ba3f43e 256 continue;
0a7de745 257 }
5ba3f43e 258 /* Skip debug-only registers on production chips */
0a7de745 259 if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) {
5ba3f43e 260 continue;
0a7de745 261 }
5ba3f43e
A
262
263 if (!cdp->coresight_base[i]) {
f427ee49
A
264 if (coresight_regs) {
265 cdp->coresight_base[i] = coresight_regs + CORESIGHT_OFFSET(i);
266 } else {
267 uint64_t addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
268 cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
269 }
5ba3f43e
A
270
271 /*
272 * At this point, failing to io map the
273 * registers is considered as an error.
274 */
275 if (!cdp->coresight_base[i]) {
276 panic("unable to ml_io_map coresight regions");
277 }
278 }
279 /* Unlock EDLAR, CTILAR, PMLAR */
0a7de745 280 if (i != CORESIGHT_UTT) {
5ba3f43e 281 *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
0a7de745 282 }
5ba3f43e
A
283 }
284 }
285}
286
287
288/*
289 * Routine: cpu_bootstrap
290 * Function:
291 */
292void
293cpu_bootstrap(void)
294{
295}
296
297/*
298 * Routine: cpu_sleep
299 * Function:
300 */
301void
302cpu_sleep(void)
303{
304 cpu_data_t *cpu_data_ptr = getCpuDatap();
305
306 pmap_switch_user_ttb(kernel_pmap);
307 cpu_data_ptr->cpu_active_thread = current_thread();
308 cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
309 cpu_data_ptr->cpu_flags |= SleepState;
310 cpu_data_ptr->cpu_user_debug = NULL;
311#if KPC
312 kpc_idle();
313#endif /* KPC */
314#if MONOTONIC
315 mt_cpu_down(cpu_data_ptr);
316#endif /* MONOTONIC */
317
318 CleanPoC_Dcache();
319
f427ee49
A
320#if USE_APPLEARMSMP
321 if (ml_is_quiescing()) {
322 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
323 } else {
324 bool deep_sleep = PE_cpu_down(cpu_data_ptr->cpu_id);
325 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
326 // hang CPU on spurious wakeup
327 cpu_data_ptr->cpu_reset_handler = (uintptr_t)0;
328 __builtin_arm_dsb(DSB_ISH);
329 CleanPoU_Dcache();
330 arm64_prepare_for_sleep(deep_sleep);
331 }
332#else
5ba3f43e 333 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
f427ee49 334#endif
cb323159
A
335 /*NOTREACHED*/
336}
337
338/*
339 * Routine: cpu_interrupt_is_pending
340 * Function: Returns the value of ISR. Due to how this register is
341 * is implemented, this returns 0 if there are no
342 * interrupts pending, so it can be used as a boolean test.
343 */
f427ee49 344int
cb323159
A
345cpu_interrupt_is_pending(void)
346{
347 uint64_t isr_value;
348 isr_value = __builtin_arm_rsr64("ISR_EL1");
349 return (int)isr_value;
5ba3f43e
A
350}
351
f427ee49
A
352static bool
353cpu_proximate_timer(void)
354{
355 return !SetIdlePop();
356}
357
358static bool
359wfe_to_deadline_or_interrupt(uint32_t cid, uint64_t wfe_deadline, __unused cpu_data_t *cdp)
360{
361 bool ipending = false;
362 while ((ipending = (cpu_interrupt_is_pending() != 0)) == false) {
363 /* Assumes event stream enablement
364 * TODO: evaluate temporarily stretching the per-CPU event
365 * interval to a larger value for possible efficiency
366 * improvements.
367 */
368 __builtin_arm_wfe();
369#if DEVELOPMENT || DEBUG
370 cdp->wfe_count++;
371#endif
372 if (wfe_deadline != ~0ULL) {
373#if DEVELOPMENT || DEBUG
374 cdp->wfe_deadline_checks++;
375#endif
376 /* Check if the WFE recommendation has expired.
377 * We do not recompute the deadline here.
378 */
379 if ((ml_cluster_wfe_timeout(cid) == 0) ||
380 mach_absolute_time() >= wfe_deadline) {
381#if DEVELOPMENT || DEBUG
382 cdp->wfe_terminations++;
383#endif
384 break;
385 }
386 }
387 }
388 /* TODO: worth refreshing pending interrupt status? */
389 return ipending;
390}
391
5ba3f43e
A
392/*
393 * Routine: cpu_idle
394 * Function:
395 */
396void __attribute__((noreturn))
397cpu_idle(void)
398{
399 cpu_data_t *cpu_data_ptr = getCpuDatap();
0a7de745 400 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
f427ee49 401 bool idle_disallowed = false;
5ba3f43e 402
f427ee49
A
403 if (__improbable((!idle_enable))) {
404 idle_disallowed = true;
405 } else if (__improbable(cpu_data_ptr->cpu_signal & SIGPdisabled)) {
406 idle_disallowed = true;
407 }
408
409 if (__improbable(idle_disallowed)) {
5ba3f43e 410 Idle_load_context();
0a7de745 411 }
cb323159 412
f427ee49
A
413 bool ipending = false;
414 uint32_t cid = ~0U;
415
416 if (__probable(idle_proximate_io_wfe == true)) {
417 uint64_t wfe_deadline = 0;
418 /* Check for an active perf. controller generated
419 * WFE recommendation for this cluster.
420 */
421 cid = cpu_data_ptr->cpu_cluster_id;
422 uint64_t wfe_ttd = 0;
423 if ((wfe_ttd = ml_cluster_wfe_timeout(cid)) != 0) {
424 wfe_deadline = mach_absolute_time() + wfe_ttd;
425 }
426
427 if (wfe_deadline != 0) {
428 /* Poll issuing event-bounded WFEs until an interrupt
429 * arrives or the WFE recommendation expires
430 */
431 ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline, cpu_data_ptr);
432#if DEVELOPMENT || DEBUG
433 KDBG(CPUPM_IDLE_WFE, ipending, cpu_data_ptr->wfe_count, wfe_deadline, 0);
434#endif
435 if (ipending == true) {
436 /* Back to machine_idle() */
437 Idle_load_context();
cb323159
A
438 }
439 }
f427ee49 440 }
cb323159 441
f427ee49
A
442 if (__improbable(cpu_proximate_timer())) {
443 if (idle_proximate_timer_wfe == true) {
444 /* Poll issuing WFEs until the expected
445 * timer FIQ arrives.
446 */
447 ipending = wfe_to_deadline_or_interrupt(cid, ~0ULL, cpu_data_ptr);
448 assert(ipending == true);
449 }
5ba3f43e 450 Idle_load_context();
0a7de745 451 }
cb323159 452
5ba3f43e
A
453 lastPop = cpu_data_ptr->rtcPop;
454
5ba3f43e 455 cpu_data_ptr->cpu_active_thread = current_thread();
0a7de745 456 if (cpu_data_ptr->cpu_user_debug) {
5ba3f43e 457 arm_debug_set(NULL);
0a7de745 458 }
5ba3f43e
A
459 cpu_data_ptr->cpu_user_debug = NULL;
460
f427ee49
A
461 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
462 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
0a7de745 463 }
5ba3f43e 464
f427ee49 465 if (cpu_data_ptr->idle_timer_notify != NULL) {
5ba3f43e
A
466 if (new_idle_timeout_ticks == 0x0ULL) {
467 /* turn off the idle timer */
468 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
469 } else {
470 /* set the new idle timeout */
471 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
472 }
473 timer_resync_deadlines();
0a7de745 474 if (cpu_data_ptr->rtcPop != lastPop) {
5ba3f43e 475 SetIdlePop();
0a7de745 476 }
5ba3f43e
A
477 }
478
479#if KPC
480 kpc_idle();
481#endif
482#if MONOTONIC
483 mt_cpu_idle(cpu_data_ptr);
484#endif /* MONOTONIC */
485
486 if (wfi) {
f427ee49 487#if !defined(APPLE_ARM64_ARCH_FAMILY)
5ba3f43e 488 platform_cache_idle_enter();
f427ee49 489#endif
5ba3f43e
A
490
491#if DEVELOPMENT || DEBUG
492 // When simulating wfi overhead,
493 // force wfi to clock gating only
494 if (wfi == 2) {
495 arm64_force_wfi_clock_gate();
496 }
497#endif /* DEVELOPMENT || DEBUG */
498
cb323159 499#if defined(APPLETYPHOON)
5ba3f43e 500 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
cb323159 501 typhoon_prepare_for_wfi();
5ba3f43e
A
502#endif
503 __builtin_arm_dsb(DSB_SY);
c6bf4f31
A
504#if HAS_RETENTION_STATE
505 arm64_retention_wfi();
506#else
5ba3f43e 507 __builtin_arm_wfi();
c6bf4f31 508#endif
5ba3f43e 509
cb323159 510#if defined(APPLETYPHOON)
5ba3f43e 511 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
cb323159 512 typhoon_return_from_wfi();
5ba3f43e
A
513#endif
514
515#if DEVELOPMENT || DEBUG
516 // Handle wfi overhead simulation
517 if (wfi == 2) {
518 uint64_t deadline;
519
520 // Calculate wfi delay deadline
521 clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
522
523 // Flush L1 caches
524 if ((wfi_flags & 1) != 0) {
525 InvalidatePoU_Icache();
526 FlushPoC_Dcache();
527 }
528
529 // Flush TLBs
530 if ((wfi_flags & 2) != 0) {
531 flush_core_tlb();
532 }
533
534 // Wait for the ballance of the wfi delay
535 clock_delay_until(deadline);
536 }
537#endif /* DEVELOPMENT || DEBUG */
f427ee49 538#if !defined(APPLE_ARM64_ARCH_FAMILY)
5ba3f43e 539 platform_cache_idle_exit();
f427ee49 540#endif
5ba3f43e
A
541 }
542
543 ClearIdlePop(TRUE);
544
d9a64523 545 cpu_idle_exit(FALSE);
5ba3f43e
A
546}
547
548/*
549 * Routine: cpu_idle_exit
550 * Function:
551 */
552void
d9a64523 553cpu_idle_exit(boolean_t from_reset)
5ba3f43e 554{
0a7de745 555 uint64_t new_idle_timeout_ticks = 0x0ULL;
5ba3f43e
A
556 cpu_data_t *cpu_data_ptr = getCpuDatap();
557
558 assert(exception_stack_pointer() != 0);
559
560 /* Back from WFI, unlock OSLAR and EDLAR. */
0a7de745 561 if (from_reset) {
d9a64523 562 configure_coresight_registers(cpu_data_ptr);
0a7de745 563 }
5ba3f43e
A
564
565#if KPC
566 kpc_idle_exit();
567#endif
568
569#if MONOTONIC
570 mt_cpu_run(cpu_data_ptr);
571#endif /* MONOTONIC */
572
f427ee49
A
573 if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) {
574 cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
0a7de745 575 }
5ba3f43e 576
f427ee49 577 if (cpu_data_ptr->idle_timer_notify != NULL) {
5ba3f43e
A
578 if (new_idle_timeout_ticks == 0x0ULL) {
579 /* turn off the idle timer */
580 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
581 } else {
582 /* set the new idle timeout */
583 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
584 }
585 timer_resync_deadlines();
586 }
587
588 Idle_load_context();
589}
590
591void
592cpu_init(void)
593{
594 cpu_data_t *cdp = getCpuDatap();
595 arm_cpu_info_t *cpu_info_p;
596
597 assert(exception_stack_pointer() != 0);
598
599 if (cdp->cpu_type != CPU_TYPE_ARM64) {
5ba3f43e
A
600 cdp->cpu_type = CPU_TYPE_ARM64;
601
602 timer_call_queue_init(&cdp->rtclock_timer.queue);
603 cdp->rtclock_timer.deadline = EndOfAllTime;
604
605 if (cdp == &BootCpuData) {
606 do_cpuid();
607 do_cacheid();
608 do_mvfpid();
609 } else {
610 /*
611 * We initialize non-boot CPUs here; the boot CPU is
612 * dealt with as part of pmap_bootstrap.
613 */
614 pmap_cpu_data_init();
615 }
616 /* ARM_SMP: Assuming identical cpu */
617 do_debugid();
618
619 cpu_info_p = cpuid_info();
620
621 /* switch based on CPU's reported architecture */
622 switch (cpu_info_p->arm_info.arm_arch) {
623 case CPU_ARCH_ARMv8:
624 cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
625 break;
2a1bd2d3
A
626 case CPU_ARCH_ARMv8E:
627 cdp->cpu_subtype = CPU_SUBTYPE_ARM64E;
628 break;
5ba3f43e
A
629 default:
630 //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
631 /* this panic doesn't work this early in startup */
632 panic("Unknown CPU subtype...");
633 break;
634 }
635
636 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
637 }
638 cdp->cpu_stat.irq_ex_cnt_wake = 0;
639 cdp->cpu_stat.ipi_cnt_wake = 0;
cb323159 640#if MONOTONIC
0a7de745 641 cdp->cpu_stat.pmi_cnt_wake = 0;
cb323159 642#endif /* MONOTONIC */
5ba3f43e
A
643 cdp->cpu_running = TRUE;
644 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
645 cdp->cpu_sleep_token = 0x0UL;
646#if KPC
647 kpc_idle_exit();
648#endif /* KPC */
649#if MONOTONIC
650 mt_cpu_up(cdp);
651#endif /* MONOTONIC */
652}
653
d9a64523
A
654void
655cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
5ba3f43e 656{
0a7de745
A
657 vm_offset_t irq_stack = 0;
658 vm_offset_t exc_stack = 0;
d9a64523
A
659
660 kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
0a7de745
A
661 INTSTACK_SIZE + (2 * PAGE_SIZE),
662 PAGE_MASK,
663 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
664 VM_KERN_MEMORY_STACK);
665 if (kr != KERN_SUCCESS) {
d9a64523 666 panic("Unable to allocate cpu interrupt stack\n");
0a7de745 667 }
d9a64523
A
668
669 cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
670 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
671
672 kr = kernel_memory_allocate(kernel_map, &exc_stack,
0a7de745
A
673 EXCEPSTACK_SIZE + (2 * PAGE_SIZE),
674 PAGE_MASK,
675 KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
676 VM_KERN_MEMORY_STACK);
677 if (kr != KERN_SUCCESS) {
d9a64523 678 panic("Unable to allocate cpu exception stack\n");
0a7de745 679 }
d9a64523
A
680
681 cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
682 cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
5ba3f43e
A
683}
684
5ba3f43e
A
685void
686cpu_data_free(cpu_data_t *cpu_data_ptr)
687{
cb323159 688 if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) {
0a7de745
A
689 return;
690 }
5ba3f43e 691
f427ee49
A
692 int cpu_number = cpu_data_ptr->cpu_number;
693
694 if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) {
695 CpuDataEntries[cpu_number].cpu_data_vaddr = NULL;
696 CpuDataEntries[cpu_number].cpu_data_paddr = 0;
cb323159
A
697 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible
698 }
0a7de745
A
699 (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
700 (kfree)((void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE);
5ba3f43e
A
701}
702
703void
704cpu_data_init(cpu_data_t *cpu_data_ptr)
705{
706 uint32_t i;
707
708 cpu_data_ptr->cpu_flags = 0;
5ba3f43e
A
709 cpu_data_ptr->cpu_int_state = 0;
710 cpu_data_ptr->cpu_pending_ast = AST_NONE;
f427ee49 711 cpu_data_ptr->cpu_cache_dispatch = NULL;
5ba3f43e
A
712 cpu_data_ptr->rtcPop = EndOfAllTime;
713 cpu_data_ptr->rtclock_datap = &RTClockData;
714 cpu_data_ptr->cpu_user_debug = NULL;
715
716
717 cpu_data_ptr->cpu_base_timebase = 0;
f427ee49 718 cpu_data_ptr->cpu_idle_notify = NULL;
5ba3f43e
A
719 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
720 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
721 cpu_data_ptr->cpu_reset_type = 0x0UL;
722 cpu_data_ptr->cpu_reset_handler = 0x0UL;
723 cpu_data_ptr->cpu_reset_assist = 0x0UL;
724 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
725 cpu_data_ptr->cpu_phys_id = 0x0UL;
726 cpu_data_ptr->cpu_l2_access_penalty = 0;
727 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
728 cpu_data_ptr->cpu_cluster_id = 0;
729 cpu_data_ptr->cpu_l2_id = 0;
730 cpu_data_ptr->cpu_l2_size = 0;
731 cpu_data_ptr->cpu_l3_id = 0;
732 cpu_data_ptr->cpu_l3_size = 0;
733
734 cpu_data_ptr->cpu_signal = SIGPdisabled;
735
5ba3f43e
A
736 cpu_data_ptr->cpu_get_fiq_handler = NULL;
737 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
738 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
739 cpu_data_ptr->cpu_get_decrementer_func = NULL;
740 cpu_data_ptr->cpu_set_decrementer_func = NULL;
741 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
742 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
743 cpu_data_ptr->cpu_xcall_p0 = NULL;
744 cpu_data_ptr->cpu_xcall_p1 = NULL;
cb323159
A
745 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
746 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
5ba3f43e
A
747
748 for (i = 0; i < CORESIGHT_REGIONS; ++i) {
749 cpu_data_ptr->coresight_base[i] = 0;
750 }
751
c6bf4f31 752#if !XNU_MONITOR
5ba3f43e
A
753 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
754
d9a64523 755 pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
5ba3f43e 756 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
f427ee49
A
757 pmap_cpu_data_ptr->pv_free.list = NULL;
758 pmap_cpu_data_ptr->pv_free.count = 0;
759 pmap_cpu_data_ptr->pv_free_tail = NULL;
5ba3f43e 760
f427ee49 761 bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids));
c6bf4f31 762#endif
5ba3f43e 763 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
5c9f4661
A
764#if __ARM_KERNEL_PROTECT__
765 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
766#endif /* __ARM_KERNEL_PROTECT__ */
d9a64523 767
cb323159
A
768#if defined(HAS_APPLE_PAC)
769 cpu_data_ptr->rop_key = 0;
f427ee49 770 cpu_data_ptr->jop_key = ml_default_jop_pid();
cb323159 771#endif
f427ee49 772
5ba3f43e
A
773}
774
775kern_return_t
776cpu_data_register(cpu_data_t *cpu_data_ptr)
777{
0a7de745 778 int cpu = cpu_data_ptr->cpu_number;
5ba3f43e
A
779
780#if KASAN
781 for (int i = 0; i < CPUWINDOWS_MAX; i++) {
782 kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
783 }
784#endif
785
cb323159 786 __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
5ba3f43e 787 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
0a7de745 788 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
5ba3f43e 789 return KERN_SUCCESS;
5ba3f43e
A
790}
791
c6bf4f31 792#if defined(KERNEL_INTEGRITY_CTRR)
f427ee49
A
793/* Hibernation needs to reset this state, so data and text are in the hib segment;
794 * this allows them be accessed and executed early.
795 */
796LCK_GRP_DECLARE(ctrr_cpu_start_lock_grp, "ctrr_cpu_start_lock");
797LCK_SPIN_DECLARE(ctrr_cpu_start_lck, &ctrr_cpu_start_lock_grp);
798enum ctrr_cluster_states ctrr_cluster_locked[MAX_CPU_CLUSTERS] MARK_AS_HIBERNATE_DATA;
c6bf4f31 799
f427ee49 800MARK_AS_HIBERNATE_TEXT
c6bf4f31 801void
f427ee49 802init_ctrr_cluster_states(void)
c6bf4f31 803{
f427ee49
A
804 for (int i = 0; i < MAX_CPU_CLUSTERS; i++) {
805 ctrr_cluster_locked[i] = CTRR_UNLOCKED;
806 }
c6bf4f31 807}
c6bf4f31 808#endif
d9a64523 809
5ba3f43e
A
810kern_return_t
811cpu_start(int cpu)
812{
813 cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
814
815 kprintf("cpu_start() cpu: %d\n", cpu);
816
817 if (cpu == cpu_number()) {
818 cpu_machine_init();
819 configure_coresight_registers(cpu_data_ptr);
820 } else {
821 thread_t first_thread;
f427ee49 822 processor_t processor;
5ba3f43e
A
823
824 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
825
c6bf4f31 826#if !XNU_MONITOR
d9a64523 827 cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
c6bf4f31 828#endif
5ba3f43e 829
f427ee49
A
830 processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
831 if (processor->startup_thread != THREAD_NULL) {
832 first_thread = processor->startup_thread;
0a7de745 833 } else {
f427ee49 834 first_thread = processor->idle_thread;
0a7de745 835 }
5ba3f43e
A
836 cpu_data_ptr->cpu_active_thread = first_thread;
837 first_thread->machine.CpuDatap = cpu_data_ptr;
f427ee49
A
838 first_thread->machine.pcpu_data_base =
839 (vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data);
5ba3f43e
A
840
841 configure_coresight_registers(cpu_data_ptr);
842
843 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
844 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
c6bf4f31 845#if defined(KERNEL_INTEGRITY_CTRR)
f427ee49
A
846
847 /* First CPU being started within a cluster goes ahead to lock CTRR for cluster;
848 * other CPUs block until cluster is locked. */
849 lck_spin_lock(&ctrr_cpu_start_lck);
850 switch (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]) {
851 case CTRR_UNLOCKED:
852 ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKING;
853 lck_spin_unlock(&ctrr_cpu_start_lck);
854 break;
855 case CTRR_LOCKING:
856 assert_wait(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id], THREAD_UNINT);
857 lck_spin_unlock(&ctrr_cpu_start_lck);
858 thread_block(THREAD_CONTINUE_NULL);
859 assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKING);
860 break;
861 default: // CTRR_LOCKED
862 lck_spin_unlock(&ctrr_cpu_start_lck);
863 break;
c6bf4f31
A
864 }
865#endif
5ba3f43e
A
866 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
867 }
868
869 return KERN_SUCCESS;
870}
871
872
873void
874cpu_timebase_init(boolean_t from_boot)
875{
876 cpu_data_t *cdp = getCpuDatap();
877
878 if (cdp->cpu_get_fiq_handler == NULL) {
879 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
880 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
881 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
882 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
883 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
884 }
885
886 if (!from_boot && (cdp == &BootCpuData)) {
887 /*
888 * When we wake from sleep, we have no guarantee about the state
889 * of the hardware timebase. It may have kept ticking across sleep, or
890 * it may have reset.
891 *
892 * To deal with this, we calculate an offset to the clock that will
893 * produce a timebase value wake_abstime at the point the boot
894 * CPU calls cpu_timebase_init on wake.
895 *
896 * This ensures that mach_absolute_time() stops ticking across sleep.
897 */
898 rtclock_base_abstime = wake_abstime - ml_get_hwclock();
cb323159
A
899 } else if (from_boot) {
900 /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */
901 ml_set_reset_time(ml_get_hwclock());
5ba3f43e
A
902 }
903
904 cdp->cpu_decrementer = 0x7FFFFFFFUL;
905 cdp->cpu_timebase = 0x0UL;
906 cdp->cpu_base_timebase = rtclock_base_abstime;
907}
908
909int
910cpu_cluster_id(void)
911{
0a7de745 912 return getCpuDatap()->cpu_cluster_id;
5ba3f43e
A
913}
914
915__attribute__((noreturn))
916void
917ml_arm_sleep(void)
918{
0a7de745 919 cpu_data_t *cpu_data_ptr = getCpuDatap();
5ba3f43e
A
920
921 if (cpu_data_ptr == &BootCpuData) {
922 cpu_data_t *target_cdp;
0a7de745
A
923 int cpu;
924 int max_cpu;
5ba3f43e
A
925
926 max_cpu = ml_get_max_cpu_number();
0a7de745 927 for (cpu = 0; cpu <= max_cpu; cpu++) {
5ba3f43e
A
928 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
929
0a7de745 930 if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) {
5ba3f43e 931 continue;
0a7de745 932 }
5ba3f43e 933
0a7de745
A
934 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
935 ;
936 }
5ba3f43e
A
937 }
938
939 /*
940 * Now that the other cores have entered the sleep path, set
941 * the abstime value we'll use when we resume.
942 */
943 wake_abstime = ml_get_timebase();
cb323159 944 ml_set_reset_time(UINT64_MAX);
5ba3f43e
A
945 } else {
946 CleanPoU_Dcache();
947 }
948
949 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
950
951 if (cpu_data_ptr == &BootCpuData) {
952#if WITH_CLASSIC_S2R
953 // Classic suspend to RAM writes the suspend signature into the
954 // sleep token buffer so that iBoot knows that it's on the warm
955 // boot (wake) path (as opposed to the cold boot path). Newer SoC
956 // do not go through SecureROM/iBoot on the warm boot path. The
957 // reconfig engine script brings the CPU out of reset at the kernel's
958 // reset vector which points to the warm boot initialization code.
0a7de745 959 if (sleepTokenBuffer != (vm_offset_t) NULL) {
5ba3f43e
A
960 platform_cache_shutdown();
961 bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
0a7de745 962 } else {
5ba3f43e
A
963 panic("No sleep token buffer");
964 }
965#endif
966
967#if __ARM_GLOBAL_SLEEP_BIT__
968 /* Allow other CPUs to go to sleep. */
969 arm64_stall_sleep = FALSE;
970 __builtin_arm_dmb(DMB_ISH);
971#endif
972
973 /* Architectural debug state: <rdar://problem/12390433>:
0a7de745
A
974 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
975 * tell debugger to not prevent power gating .
5ba3f43e
A
976 */
977 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
978 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
979 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
980 }
981
f427ee49
A
982#if HIBERNATION
983 uint32_t mode = hibernate_write_image();
984 if (mode == kIOHibernatePostWriteHalt) {
985 HIBLOG("powering off after writing hibernation image\n");
986 int halt_result = -1;
987 if (PE_halt_restart) {
988 halt_result = (*PE_halt_restart)(kPEHaltCPU);
989 }
990 panic("can't shutdown: PE_halt_restart returned %d", halt_result);
991 }
992#endif /* HIBERNATION */
993
5ba3f43e
A
994#if MONOTONIC
995 mt_sleep();
996#endif /* MONOTONIC */
997 /* ARM64-specific preparation */
f427ee49 998 arm64_prepare_for_sleep(true);
5ba3f43e
A
999 } else {
1000#if __ARM_GLOBAL_SLEEP_BIT__
1001 /*
1002 * With the exception of the CPU revisions listed above, our ARM64 CPUs have a
1003 * global register to manage entering deep sleep, as opposed to a per-CPU
1004 * register. We cannot update this register until all CPUs are ready to enter
1005 * deep sleep, because if a CPU executes WFI outside of the deep sleep context
1006 * (by idling), it will hang (due to the side effects of enabling deep sleep),
1007 * which can hang the sleep process or cause memory corruption on wake.
1008 *
1009 * To avoid these issues, we'll stall on this global value, which CPU0 will
1010 * manage.
1011 */
1012 while (arm64_stall_sleep) {
1013 __builtin_arm_wfe();
1014 }
1015#endif
1016 CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
1017
1018 /* Architectural debug state: <rdar://problem/12390433>:
0a7de745
A
1019 * Grab debug lock EDLAR and clear bit 0 in EDPRCR,
1020 * tell debugger to not prevent power gating .
5ba3f43e
A
1021 */
1022 if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
1023 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
1024 *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
1025 }
1026
1027 /* ARM64-specific preparation */
f427ee49 1028 arm64_prepare_for_sleep(true);
5ba3f43e
A
1029 }
1030}
1031
1032void
1033cpu_machine_idle_init(boolean_t from_boot)
1034{
0a7de745
A
1035 static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
1036 cpu_data_t *cpu_data_ptr = getCpuDatap();
5ba3f43e
A
1037
1038 if (from_boot) {
0a7de745
A
1039 int wfi_tmp = 1;
1040 uint32_t production = 1;
1041 DTEntry entry;
5ba3f43e 1042
f427ee49
A
1043 unsigned long jtag = 0;
1044
0a7de745
A
1045 if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
1046 if (jtag != 0) {
5ba3f43e 1047 idle_enable = FALSE;
0a7de745 1048 } else {
5ba3f43e 1049 idle_enable = TRUE;
0a7de745
A
1050 }
1051 } else {
5ba3f43e 1052 idle_enable = TRUE;
0a7de745 1053 }
5ba3f43e 1054
f427ee49
A
1055#if DEVELOPMENT || DEBUG
1056 uint32_t wfe_mode = 0;
1057 if (PE_parse_boot_argn("wfe_mode", &wfe_mode, sizeof(wfe_mode))) {
1058 idle_proximate_timer_wfe = ((wfe_mode & 1) == 1);
1059 idle_proximate_io_wfe = ((wfe_mode & 2) == 2);
1060 }
1061#endif
0a7de745 1062 PE_parse_boot_argn("wfi", &wfi_tmp, sizeof(wfi_tmp));
5ba3f43e
A
1063
1064 // bits 7..0 give the wfi type
1065 switch (wfi_tmp & 0xff) {
0a7de745 1066 case 0:
5ba3f43e
A
1067 // disable wfi
1068 wfi = 0;
1069 break;
1070
1071#if DEVELOPMENT || DEBUG
0a7de745 1072 case 2:
5ba3f43e
A
1073 // wfi overhead simulation
1074 // 31..16 - wfi delay is us
1075 // 15..8 - flags
1076 // 7..0 - 2
1077 wfi = 2;
1078 wfi_flags = (wfi_tmp >> 8) & 0xFF;
1079 nanoseconds_to_absolutetime(((wfi_tmp >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
1080 break;
1081#endif /* DEVELOPMENT || DEBUG */
1082
0a7de745
A
1083 case 1:
1084 default:
5ba3f43e
A
1085 // do nothing
1086 break;
1087 }
1088
1089 ResetHandlerData.assist_reset_handler = 0;
1090 ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
1091
1092#ifdef MONITOR
1093 monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
1094#elif !defined(NO_MONITOR)
1095#error MONITOR undefined, WFI power gating may not operate correctly
1096#endif /* MONITOR */
1097
1098 // Determine if we are on production or debug chip
f427ee49 1099 if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) {
0a7de745 1100 unsigned int size;
f427ee49 1101 void const *prop;
5ba3f43e 1102
f427ee49 1103 if (kSuccess == SecureDTGetProperty(entry, "effective-production-status-ap", &prop, &size)) {
0a7de745 1104 if (size == 4) {
5ba3f43e 1105 bcopy(prop, &production, size);
0a7de745
A
1106 }
1107 }
5ba3f43e
A
1108 }
1109 if (!production) {
1110#if defined(APPLE_ARM64_ARCH_FAMILY)
1111 // Enable coresight debug registers on debug-fused chips
1112 coresight_debug_enabled = TRUE;
1113#endif
1114 }
1115
1116 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
1117 resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
1118 }
1119
1120#if WITH_CLASSIC_S2R
1121 if (cpu_data_ptr == &BootCpuData) {
1122 static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
1123 if (sleepTokenBuffer != (vm_offset_t) NULL) {
1124 SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
0a7de745 1125 } else {
5ba3f43e
A
1126 panic("No sleep token buffer");
1127 }
1128
1129 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
0a7de745 1130 SleepToken_low_paddr, sizeof(SleepToken));
5ba3f43e 1131 flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
0a7de745
A
1132 }
1133 ;
5ba3f43e
A
1134#endif
1135
1136 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
1137 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
1138}
1139
1140_Atomic uint32_t cpu_idle_count = 0;
1141
1142void
1143machine_track_platform_idle(boolean_t entry)
1144{
0a7de745 1145 if (entry) {
cb323159 1146 os_atomic_inc(&cpu_idle_count, relaxed);
0a7de745 1147 } else {
cb323159 1148 os_atomic_dec(&cpu_idle_count, relaxed);
0a7de745 1149 }
5ba3f43e
A
1150}
1151
1152#if WITH_CLASSIC_S2R
1153void
1154sleep_token_buffer_init(void)
1155{
0a7de745
A
1156 cpu_data_t *cpu_data_ptr = getCpuDatap();
1157 DTEntry entry;
1158 size_t size;
f427ee49 1159 void const * const *prop;
5ba3f43e
A
1160
1161 if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
1162 /* Find the stpage node in the device tree */
f427ee49 1163 if (kSuccess != SecureDTLookupEntry(0, "stram", &entry)) {
5ba3f43e 1164 return;
0a7de745 1165 }
5ba3f43e 1166
f427ee49 1167 if (kSuccess != SecureDTGetProperty(entry, "reg", (const void **)&prop, (unsigned int *)&size)) {
5ba3f43e 1168 return;
0a7de745 1169 }
5ba3f43e
A
1170
1171 /* Map the page into the kernel space */
f427ee49 1172 sleepTokenBuffer = ml_io_map(((vm_offset_t const *)prop)[0], ((vm_size_t const *)prop)[1]);
5ba3f43e
A
1173 }
1174}
1175#endif