]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2007-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * File: arm64/cpu.c | |
30 | * | |
31 | * cpu specific routines | |
32 | */ | |
33 | ||
34 | #include <pexpert/arm64/board_config.h> | |
35 | #include <kern/kalloc.h> | |
36 | #include <kern/machine.h> | |
37 | #include <kern/cpu_number.h> | |
38 | #include <kern/thread.h> | |
39 | #include <kern/timer_queue.h> | |
40 | #include <arm/cpu_data.h> | |
41 | #include <arm/cpuid.h> | |
42 | #include <arm/caches_internal.h> | |
43 | #include <arm/cpu_data_internal.h> | |
44 | #include <arm/cpu_internal.h> | |
45 | #include <arm/misc_protos.h> | |
46 | #include <arm/machine_cpu.h> | |
47 | #include <arm/rtclock.h> | |
48 | #include <arm64/proc_reg.h> | |
49 | #include <mach/processor_info.h> | |
50 | #include <vm/pmap.h> | |
51 | #include <vm/vm_kern.h> | |
52 | #include <vm/vm_map.h> | |
53 | #include <pexpert/arm/protos.h> | |
54 | #include <pexpert/device_tree.h> | |
55 | #include <sys/kdebug.h> | |
56 | #include <arm/machine_routines.h> | |
57 | ||
58 | #include <machine/atomic.h> | |
59 | ||
60 | #include <san/kasan.h> | |
61 | ||
62 | #if KPC | |
63 | #include <kern/kpc.h> | |
64 | #endif | |
65 | ||
66 | #if MONOTONIC | |
67 | #include <kern/monotonic.h> | |
68 | #endif /* MONOTONIC */ | |
69 | ||
70 | extern boolean_t idle_enable; | |
71 | extern uint64_t wake_abstime; | |
72 | ||
73 | #if WITH_CLASSIC_S2R | |
74 | void sleep_token_buffer_init(void); | |
75 | #endif | |
76 | ||
77 | ||
78 | extern uintptr_t resume_idle_cpu; | |
79 | extern uintptr_t start_cpu; | |
80 | ||
81 | #if __ARM_KERNEL_PROTECT__ | |
82 | extern void exc_vectors_table; | |
83 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
84 | ||
85 | extern void __attribute__((noreturn)) arm64_prepare_for_sleep(void); | |
86 | extern void arm64_force_wfi_clock_gate(void); | |
87 | #if defined(APPLETYPHOON) | |
88 | // <rdar://problem/15827409> | |
89 | extern void typhoon_prepare_for_wfi(void); | |
90 | extern void typhoon_return_from_wfi(void); | |
91 | #endif | |
92 | ||
93 | #if HAS_RETENTION_STATE | |
94 | extern void arm64_retention_wfi(void); | |
95 | #endif | |
96 | ||
97 | vm_address_t start_cpu_paddr; | |
98 | ||
99 | sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = { | |
100 | .tcr_el1 = TCR_EL1_BOOT, | |
101 | }; | |
102 | ||
103 | ||
104 | // wfi - wfi mode | |
105 | // 0 : disabled | |
106 | // 1 : normal | |
107 | // 2 : overhead simulation (delay & flags) | |
108 | static int wfi = 1; | |
109 | ||
110 | #if DEVELOPMENT || DEBUG | |
111 | ||
112 | // wfi_flags | |
113 | // 1 << 0 : flush L1s | |
114 | // 1 << 1 : flush TLBs | |
115 | static int wfi_flags = 0; | |
116 | ||
117 | // wfi_delay - delay ticks after wfi exit | |
118 | static uint64_t wfi_delay = 0; | |
119 | ||
120 | #endif /* DEVELOPMENT || DEBUG */ | |
121 | ||
122 | static bool idle_wfe_to_deadline = false; | |
123 | ||
124 | #if __ARM_GLOBAL_SLEEP_BIT__ | |
125 | volatile boolean_t arm64_stall_sleep = TRUE; | |
126 | #endif | |
127 | ||
128 | #if WITH_CLASSIC_S2R | |
129 | /* | |
130 | * These must be aligned to avoid issues with calling bcopy_phys on them before | |
131 | * we are done with pmap initialization. | |
132 | */ | |
133 | static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'}; | |
134 | static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'}; | |
135 | #endif | |
136 | ||
137 | #if WITH_CLASSIC_S2R | |
138 | static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL; | |
139 | #endif | |
140 | static boolean_t coresight_debug_enabled = FALSE; | |
141 | ||
142 | #if defined(CONFIG_XNUPOST) | |
143 | void arm64_ipi_test_callback(void *); | |
144 | void arm64_immediate_ipi_test_callback(void *); | |
145 | ||
146 | void | |
147 | arm64_ipi_test_callback(void *parm) | |
148 | { | |
149 | volatile uint64_t *ipi_test_data = parm; | |
150 | cpu_data_t *cpu_data; | |
151 | ||
152 | cpu_data = getCpuDatap(); | |
153 | ||
154 | *ipi_test_data = cpu_data->cpu_number; | |
155 | } | |
156 | ||
157 | void | |
158 | arm64_immediate_ipi_test_callback(void *parm) | |
159 | { | |
160 | volatile uint64_t *ipi_test_data = parm; | |
161 | cpu_data_t *cpu_data; | |
162 | ||
163 | cpu_data = getCpuDatap(); | |
164 | ||
165 | *ipi_test_data = cpu_data->cpu_number + MAX_CPUS; | |
166 | } | |
167 | ||
168 | uint64_t arm64_ipi_test_data[MAX_CPUS * 2]; | |
169 | ||
170 | void | |
171 | arm64_ipi_test() | |
172 | { | |
173 | volatile uint64_t *ipi_test_data, *immediate_ipi_test_data; | |
174 | uint32_t timeout_ms = 100; | |
175 | uint64_t then, now, delta; | |
176 | int current_cpu_number = getCpuDatap()->cpu_number; | |
177 | ||
178 | /* | |
179 | * probably the only way to have this on most systems is with the | |
180 | * cpus=1 boot-arg, but nonetheless, if we only have 1 CPU active, | |
181 | * IPI is not available | |
182 | */ | |
183 | if (real_ncpus == 1) { | |
184 | return; | |
185 | } | |
186 | ||
187 | for (unsigned int i = 0; i < MAX_CPUS; ++i) { | |
188 | ipi_test_data = &arm64_ipi_test_data[i]; | |
189 | immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS]; | |
190 | *ipi_test_data = ~i; | |
191 | kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data); | |
192 | if (error != KERN_SUCCESS) { | |
193 | panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error); | |
194 | } | |
195 | ||
196 | while ((error = cpu_immediate_xcall((int)i, (void *)arm64_immediate_ipi_test_callback, | |
197 | (void *)(uintptr_t)immediate_ipi_test_data)) == KERN_ALREADY_WAITING) { | |
198 | now = mach_absolute_time(); | |
199 | absolutetime_to_nanoseconds(now - then, &delta); | |
200 | if ((delta / NSEC_PER_MSEC) > timeout_ms) { | |
201 | panic("CPU %d was unable to immediate-IPI CPU %u within %dms", current_cpu_number, i, timeout_ms); | |
202 | } | |
203 | } | |
204 | ||
205 | if (error != KERN_SUCCESS) { | |
206 | panic("CPU %d was unable to immediate-IPI CPU %u: error %d", current_cpu_number, i, error); | |
207 | } | |
208 | ||
209 | then = mach_absolute_time(); | |
210 | ||
211 | while ((*ipi_test_data != i) || (*immediate_ipi_test_data != (i + MAX_CPUS))) { | |
212 | now = mach_absolute_time(); | |
213 | absolutetime_to_nanoseconds(now - then, &delta); | |
214 | if ((delta / NSEC_PER_MSEC) > timeout_ms) { | |
215 | panic("CPU %d tried to IPI CPU %d but didn't get correct responses within %dms, responses: %llx, %llx", | |
216 | current_cpu_number, i, timeout_ms, *ipi_test_data, *immediate_ipi_test_data); | |
217 | } | |
218 | } | |
219 | } | |
220 | } | |
221 | #endif /* defined(CONFIG_XNUPOST) */ | |
222 | ||
223 | static void | |
224 | configure_coresight_registers(cpu_data_t *cdp) | |
225 | { | |
226 | uint64_t addr; | |
227 | int i; | |
228 | ||
229 | assert(cdp); | |
230 | ||
231 | /* | |
232 | * ARMv8 coresight registers are optional. If the device tree did not | |
233 | * provide cpu_regmap_paddr, assume that coresight registers are not | |
234 | * supported. | |
235 | */ | |
236 | if (cdp->cpu_regmap_paddr) { | |
237 | for (i = 0; i < CORESIGHT_REGIONS; ++i) { | |
238 | /* Skip CTI; these registers are debug-only (they are | |
239 | * not present on production hardware), and there is | |
240 | * at least one known Cyclone errata involving CTI | |
241 | * (rdar://12802966). We have no known clients that | |
242 | * need the kernel to unlock CTI, so it is safer | |
243 | * to avoid doing the access. | |
244 | */ | |
245 | if (i == CORESIGHT_CTI) { | |
246 | continue; | |
247 | } | |
248 | /* Skip debug-only registers on production chips */ | |
249 | if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) { | |
250 | continue; | |
251 | } | |
252 | ||
253 | if (!cdp->coresight_base[i]) { | |
254 | addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i); | |
255 | cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE); | |
256 | ||
257 | /* | |
258 | * At this point, failing to io map the | |
259 | * registers is considered as an error. | |
260 | */ | |
261 | if (!cdp->coresight_base[i]) { | |
262 | panic("unable to ml_io_map coresight regions"); | |
263 | } | |
264 | } | |
265 | /* Unlock EDLAR, CTILAR, PMLAR */ | |
266 | if (i != CORESIGHT_UTT) { | |
267 | *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY; | |
268 | } | |
269 | } | |
270 | } | |
271 | } | |
272 | ||
273 | ||
274 | /* | |
275 | * Routine: cpu_bootstrap | |
276 | * Function: | |
277 | */ | |
278 | void | |
279 | cpu_bootstrap(void) | |
280 | { | |
281 | } | |
282 | ||
283 | /* | |
284 | * Routine: cpu_sleep | |
285 | * Function: | |
286 | */ | |
287 | void | |
288 | cpu_sleep(void) | |
289 | { | |
290 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
291 | ||
292 | pmap_switch_user_ttb(kernel_pmap); | |
293 | cpu_data_ptr->cpu_active_thread = current_thread(); | |
294 | cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr; | |
295 | cpu_data_ptr->cpu_flags |= SleepState; | |
296 | cpu_data_ptr->cpu_user_debug = NULL; | |
297 | #if KPC | |
298 | kpc_idle(); | |
299 | #endif /* KPC */ | |
300 | #if MONOTONIC | |
301 | mt_cpu_down(cpu_data_ptr); | |
302 | #endif /* MONOTONIC */ | |
303 | ||
304 | CleanPoC_Dcache(); | |
305 | ||
306 | /* This calls: | |
307 | * | |
308 | * IOCPURunPlatformQuiesceActions when sleeping the boot cpu | |
309 | * ml_arm_sleep() on all CPUs | |
310 | * | |
311 | * It does not return. | |
312 | */ | |
313 | PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id); | |
314 | /*NOTREACHED*/ | |
315 | } | |
316 | ||
317 | /* | |
318 | * Routine: cpu_interrupt_is_pending | |
319 | * Function: Returns the value of ISR. Due to how this register is | |
320 | * is implemented, this returns 0 if there are no | |
321 | * interrupts pending, so it can be used as a boolean test. | |
322 | */ | |
323 | static int | |
324 | cpu_interrupt_is_pending(void) | |
325 | { | |
326 | uint64_t isr_value; | |
327 | isr_value = __builtin_arm_rsr64("ISR_EL1"); | |
328 | return (int)isr_value; | |
329 | } | |
330 | ||
331 | /* | |
332 | * Routine: cpu_idle | |
333 | * Function: | |
334 | */ | |
335 | void __attribute__((noreturn)) | |
336 | cpu_idle(void) | |
337 | { | |
338 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
339 | uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop; | |
340 | ||
341 | if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) { | |
342 | Idle_load_context(); | |
343 | } | |
344 | ||
345 | if (!SetIdlePop()) { | |
346 | /* If a deadline is pending, wait for it to elapse. */ | |
347 | if (idle_wfe_to_deadline) { | |
348 | if (arm64_wfe_allowed()) { | |
349 | while (!cpu_interrupt_is_pending()) { | |
350 | __builtin_arm_wfe(); | |
351 | } | |
352 | } | |
353 | } | |
354 | ||
355 | Idle_load_context(); | |
356 | } | |
357 | ||
358 | lastPop = cpu_data_ptr->rtcPop; | |
359 | ||
360 | pmap_switch_user_ttb(kernel_pmap); | |
361 | cpu_data_ptr->cpu_active_thread = current_thread(); | |
362 | if (cpu_data_ptr->cpu_user_debug) { | |
363 | arm_debug_set(NULL); | |
364 | } | |
365 | cpu_data_ptr->cpu_user_debug = NULL; | |
366 | ||
367 | if (cpu_data_ptr->cpu_idle_notify) { | |
368 | ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); | |
369 | } | |
370 | ||
371 | if (cpu_data_ptr->idle_timer_notify != 0) { | |
372 | if (new_idle_timeout_ticks == 0x0ULL) { | |
373 | /* turn off the idle timer */ | |
374 | cpu_data_ptr->idle_timer_deadline = 0x0ULL; | |
375 | } else { | |
376 | /* set the new idle timeout */ | |
377 | clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); | |
378 | } | |
379 | timer_resync_deadlines(); | |
380 | if (cpu_data_ptr->rtcPop != lastPop) { | |
381 | SetIdlePop(); | |
382 | } | |
383 | } | |
384 | ||
385 | #if KPC | |
386 | kpc_idle(); | |
387 | #endif | |
388 | #if MONOTONIC | |
389 | mt_cpu_idle(cpu_data_ptr); | |
390 | #endif /* MONOTONIC */ | |
391 | ||
392 | if (wfi) { | |
393 | platform_cache_idle_enter(); | |
394 | ||
395 | #if DEVELOPMENT || DEBUG | |
396 | // When simulating wfi overhead, | |
397 | // force wfi to clock gating only | |
398 | if (wfi == 2) { | |
399 | arm64_force_wfi_clock_gate(); | |
400 | } | |
401 | #endif /* DEVELOPMENT || DEBUG */ | |
402 | ||
403 | #if defined(APPLETYPHOON) | |
404 | // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch | |
405 | typhoon_prepare_for_wfi(); | |
406 | #endif | |
407 | __builtin_arm_dsb(DSB_SY); | |
408 | #if HAS_RETENTION_STATE | |
409 | arm64_retention_wfi(); | |
410 | #else | |
411 | __builtin_arm_wfi(); | |
412 | #endif | |
413 | ||
414 | #if defined(APPLETYPHOON) | |
415 | // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch | |
416 | typhoon_return_from_wfi(); | |
417 | #endif | |
418 | ||
419 | #if DEVELOPMENT || DEBUG | |
420 | // Handle wfi overhead simulation | |
421 | if (wfi == 2) { | |
422 | uint64_t deadline; | |
423 | ||
424 | // Calculate wfi delay deadline | |
425 | clock_absolutetime_interval_to_deadline(wfi_delay, &deadline); | |
426 | ||
427 | // Flush L1 caches | |
428 | if ((wfi_flags & 1) != 0) { | |
429 | InvalidatePoU_Icache(); | |
430 | FlushPoC_Dcache(); | |
431 | } | |
432 | ||
433 | // Flush TLBs | |
434 | if ((wfi_flags & 2) != 0) { | |
435 | flush_core_tlb(); | |
436 | } | |
437 | ||
438 | // Wait for the ballance of the wfi delay | |
439 | clock_delay_until(deadline); | |
440 | } | |
441 | #endif /* DEVELOPMENT || DEBUG */ | |
442 | ||
443 | platform_cache_idle_exit(); | |
444 | } | |
445 | ||
446 | ClearIdlePop(TRUE); | |
447 | ||
448 | cpu_idle_exit(FALSE); | |
449 | } | |
450 | ||
451 | /* | |
452 | * Routine: cpu_idle_exit | |
453 | * Function: | |
454 | */ | |
455 | void | |
456 | cpu_idle_exit(boolean_t from_reset) | |
457 | { | |
458 | uint64_t new_idle_timeout_ticks = 0x0ULL; | |
459 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
460 | ||
461 | assert(exception_stack_pointer() != 0); | |
462 | ||
463 | /* Back from WFI, unlock OSLAR and EDLAR. */ | |
464 | if (from_reset) { | |
465 | configure_coresight_registers(cpu_data_ptr); | |
466 | } | |
467 | ||
468 | #if KPC | |
469 | kpc_idle_exit(); | |
470 | #endif | |
471 | ||
472 | #if MONOTONIC | |
473 | mt_cpu_run(cpu_data_ptr); | |
474 | #endif /* MONOTONIC */ | |
475 | ||
476 | pmap_switch_user_ttb(cpu_data_ptr->cpu_active_thread->map->pmap); | |
477 | ||
478 | if (cpu_data_ptr->cpu_idle_notify) { | |
479 | ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); | |
480 | } | |
481 | ||
482 | if (cpu_data_ptr->idle_timer_notify != 0) { | |
483 | if (new_idle_timeout_ticks == 0x0ULL) { | |
484 | /* turn off the idle timer */ | |
485 | cpu_data_ptr->idle_timer_deadline = 0x0ULL; | |
486 | } else { | |
487 | /* set the new idle timeout */ | |
488 | clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); | |
489 | } | |
490 | timer_resync_deadlines(); | |
491 | } | |
492 | ||
493 | Idle_load_context(); | |
494 | } | |
495 | ||
496 | void | |
497 | cpu_init(void) | |
498 | { | |
499 | cpu_data_t *cdp = getCpuDatap(); | |
500 | arm_cpu_info_t *cpu_info_p; | |
501 | ||
502 | assert(exception_stack_pointer() != 0); | |
503 | ||
504 | if (cdp->cpu_type != CPU_TYPE_ARM64) { | |
505 | cdp->cpu_type = CPU_TYPE_ARM64; | |
506 | ||
507 | timer_call_queue_init(&cdp->rtclock_timer.queue); | |
508 | cdp->rtclock_timer.deadline = EndOfAllTime; | |
509 | ||
510 | if (cdp == &BootCpuData) { | |
511 | do_cpuid(); | |
512 | do_cacheid(); | |
513 | do_mvfpid(); | |
514 | } else { | |
515 | /* | |
516 | * We initialize non-boot CPUs here; the boot CPU is | |
517 | * dealt with as part of pmap_bootstrap. | |
518 | */ | |
519 | pmap_cpu_data_init(); | |
520 | } | |
521 | /* ARM_SMP: Assuming identical cpu */ | |
522 | do_debugid(); | |
523 | ||
524 | cpu_info_p = cpuid_info(); | |
525 | ||
526 | /* switch based on CPU's reported architecture */ | |
527 | switch (cpu_info_p->arm_info.arm_arch) { | |
528 | case CPU_ARCH_ARMv8: | |
529 | cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8; | |
530 | break; | |
531 | default: | |
532 | //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL; | |
533 | /* this panic doesn't work this early in startup */ | |
534 | panic("Unknown CPU subtype..."); | |
535 | break; | |
536 | } | |
537 | ||
538 | cdp->cpu_threadtype = CPU_THREADTYPE_NONE; | |
539 | } | |
540 | cdp->cpu_stat.irq_ex_cnt_wake = 0; | |
541 | cdp->cpu_stat.ipi_cnt_wake = 0; | |
542 | cdp->cpu_stat.timer_cnt_wake = 0; | |
543 | #if MONOTONIC | |
544 | cdp->cpu_stat.pmi_cnt_wake = 0; | |
545 | #endif /* MONOTONIC */ | |
546 | cdp->cpu_running = TRUE; | |
547 | cdp->cpu_sleep_token_last = cdp->cpu_sleep_token; | |
548 | cdp->cpu_sleep_token = 0x0UL; | |
549 | #if KPC | |
550 | kpc_idle_exit(); | |
551 | #endif /* KPC */ | |
552 | #if MONOTONIC | |
553 | mt_cpu_up(cdp); | |
554 | #endif /* MONOTONIC */ | |
555 | } | |
556 | ||
557 | void | |
558 | cpu_stack_alloc(cpu_data_t *cpu_data_ptr) | |
559 | { | |
560 | vm_offset_t irq_stack = 0; | |
561 | vm_offset_t exc_stack = 0; | |
562 | ||
563 | kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack, | |
564 | INTSTACK_SIZE + (2 * PAGE_SIZE), | |
565 | PAGE_MASK, | |
566 | KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, | |
567 | VM_KERN_MEMORY_STACK); | |
568 | if (kr != KERN_SUCCESS) { | |
569 | panic("Unable to allocate cpu interrupt stack\n"); | |
570 | } | |
571 | ||
572 | cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE; | |
573 | cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top; | |
574 | ||
575 | kr = kernel_memory_allocate(kernel_map, &exc_stack, | |
576 | EXCEPSTACK_SIZE + (2 * PAGE_SIZE), | |
577 | PAGE_MASK, | |
578 | KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, | |
579 | VM_KERN_MEMORY_STACK); | |
580 | if (kr != KERN_SUCCESS) { | |
581 | panic("Unable to allocate cpu exception stack\n"); | |
582 | } | |
583 | ||
584 | cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE; | |
585 | cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top; | |
586 | } | |
587 | ||
588 | void | |
589 | cpu_data_free(cpu_data_t *cpu_data_ptr) | |
590 | { | |
591 | if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) { | |
592 | return; | |
593 | } | |
594 | ||
595 | cpu_processor_free( cpu_data_ptr->cpu_processor); | |
596 | if (CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr == cpu_data_ptr) { | |
597 | CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr = NULL; | |
598 | CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_paddr = 0; | |
599 | __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible | |
600 | } | |
601 | (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE); | |
602 | (kfree)((void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE); | |
603 | kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t)); | |
604 | } | |
605 | ||
606 | void | |
607 | cpu_data_init(cpu_data_t *cpu_data_ptr) | |
608 | { | |
609 | uint32_t i; | |
610 | ||
611 | cpu_data_ptr->cpu_flags = 0; | |
612 | cpu_data_ptr->interrupts_enabled = 0; | |
613 | cpu_data_ptr->cpu_int_state = 0; | |
614 | cpu_data_ptr->cpu_pending_ast = AST_NONE; | |
615 | cpu_data_ptr->cpu_cache_dispatch = (void *) 0; | |
616 | cpu_data_ptr->rtcPop = EndOfAllTime; | |
617 | cpu_data_ptr->rtclock_datap = &RTClockData; | |
618 | cpu_data_ptr->cpu_user_debug = NULL; | |
619 | ||
620 | ||
621 | cpu_data_ptr->cpu_base_timebase = 0; | |
622 | cpu_data_ptr->cpu_idle_notify = (void *) 0; | |
623 | cpu_data_ptr->cpu_idle_latency = 0x0ULL; | |
624 | cpu_data_ptr->cpu_idle_pop = 0x0ULL; | |
625 | cpu_data_ptr->cpu_reset_type = 0x0UL; | |
626 | cpu_data_ptr->cpu_reset_handler = 0x0UL; | |
627 | cpu_data_ptr->cpu_reset_assist = 0x0UL; | |
628 | cpu_data_ptr->cpu_regmap_paddr = 0x0ULL; | |
629 | cpu_data_ptr->cpu_phys_id = 0x0UL; | |
630 | cpu_data_ptr->cpu_l2_access_penalty = 0; | |
631 | cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP; | |
632 | cpu_data_ptr->cpu_cluster_id = 0; | |
633 | cpu_data_ptr->cpu_l2_id = 0; | |
634 | cpu_data_ptr->cpu_l2_size = 0; | |
635 | cpu_data_ptr->cpu_l3_id = 0; | |
636 | cpu_data_ptr->cpu_l3_size = 0; | |
637 | ||
638 | cpu_data_ptr->cpu_signal = SIGPdisabled; | |
639 | ||
640 | cpu_data_ptr->cpu_get_fiq_handler = NULL; | |
641 | cpu_data_ptr->cpu_tbd_hardware_addr = NULL; | |
642 | cpu_data_ptr->cpu_tbd_hardware_val = NULL; | |
643 | cpu_data_ptr->cpu_get_decrementer_func = NULL; | |
644 | cpu_data_ptr->cpu_set_decrementer_func = NULL; | |
645 | cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH; | |
646 | cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL; | |
647 | cpu_data_ptr->cpu_xcall_p0 = NULL; | |
648 | cpu_data_ptr->cpu_xcall_p1 = NULL; | |
649 | cpu_data_ptr->cpu_imm_xcall_p0 = NULL; | |
650 | cpu_data_ptr->cpu_imm_xcall_p1 = NULL; | |
651 | ||
652 | for (i = 0; i < CORESIGHT_REGIONS; ++i) { | |
653 | cpu_data_ptr->coresight_base[i] = 0; | |
654 | } | |
655 | ||
656 | #if !XNU_MONITOR | |
657 | pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data; | |
658 | ||
659 | pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL; | |
660 | pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM; | |
661 | ||
662 | for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) { | |
663 | pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0; | |
664 | } | |
665 | #endif | |
666 | cpu_data_ptr->halt_status = CPU_NOT_HALTED; | |
667 | #if __ARM_KERNEL_PROTECT__ | |
668 | cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table; | |
669 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
670 | ||
671 | #if defined(HAS_APPLE_PAC) | |
672 | cpu_data_ptr->rop_key = 0; | |
673 | #endif | |
674 | } | |
675 | ||
676 | kern_return_t | |
677 | cpu_data_register(cpu_data_t *cpu_data_ptr) | |
678 | { | |
679 | int cpu = cpu_data_ptr->cpu_number; | |
680 | ||
681 | #if KASAN | |
682 | for (int i = 0; i < CPUWINDOWS_MAX; i++) { | |
683 | kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE); | |
684 | } | |
685 | #endif | |
686 | ||
687 | __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible | |
688 | CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr; | |
689 | CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr); | |
690 | return KERN_SUCCESS; | |
691 | } | |
692 | ||
693 | #if defined(KERNEL_INTEGRITY_CTRR) | |
694 | ||
695 | lck_spin_t ctrr_cpu_start_lck; | |
696 | bool ctrr_cluster_locked[__ARM_CLUSTER_COUNT__]; | |
697 | ||
698 | void | |
699 | init_ctrr_cpu_start_lock(void) | |
700 | { | |
701 | lck_grp_t *ctrr_cpu_start_lock_grp = lck_grp_alloc_init("ctrr_cpu_start_lock", 0); | |
702 | assert(ctrr_cpu_start_lock_grp); | |
703 | lck_spin_init(&ctrr_cpu_start_lck, ctrr_cpu_start_lock_grp, NULL); | |
704 | } | |
705 | ||
706 | #endif | |
707 | ||
708 | kern_return_t | |
709 | cpu_start(int cpu) | |
710 | { | |
711 | cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr; | |
712 | ||
713 | kprintf("cpu_start() cpu: %d\n", cpu); | |
714 | ||
715 | if (cpu == cpu_number()) { | |
716 | cpu_machine_init(); | |
717 | configure_coresight_registers(cpu_data_ptr); | |
718 | } else { | |
719 | thread_t first_thread; | |
720 | ||
721 | cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr; | |
722 | ||
723 | #if !XNU_MONITOR | |
724 | cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL; | |
725 | #endif | |
726 | ||
727 | if (cpu_data_ptr->cpu_processor->startup_thread != THREAD_NULL) { | |
728 | first_thread = cpu_data_ptr->cpu_processor->startup_thread; | |
729 | } else { | |
730 | first_thread = cpu_data_ptr->cpu_processor->idle_thread; | |
731 | } | |
732 | cpu_data_ptr->cpu_active_thread = first_thread; | |
733 | first_thread->machine.CpuDatap = cpu_data_ptr; | |
734 | ||
735 | configure_coresight_registers(cpu_data_ptr); | |
736 | ||
737 | flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE); | |
738 | flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); | |
739 | #if defined(KERNEL_INTEGRITY_CTRR) | |
740 | /* first time CPU starts, if not cluster master, and if cluster is not already locked, | |
741 | * block until cluster becomes locked. */ | |
742 | if (cpu_data_ptr->cpu_processor->active_thread == THREAD_NULL | |
743 | && !cpu_data_ptr->cluster_master) { | |
744 | lck_spin_lock(&ctrr_cpu_start_lck); | |
745 | if (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] == 0) { | |
746 | assert_wait(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id], THREAD_UNINT); | |
747 | lck_spin_unlock(&ctrr_cpu_start_lck); | |
748 | thread_block(THREAD_CONTINUE_NULL); | |
749 | assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] == 1); | |
750 | } else { | |
751 | lck_spin_unlock(&ctrr_cpu_start_lck); | |
752 | } | |
753 | } | |
754 | #endif | |
755 | (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL); | |
756 | } | |
757 | ||
758 | return KERN_SUCCESS; | |
759 | } | |
760 | ||
761 | ||
762 | void | |
763 | cpu_timebase_init(boolean_t from_boot) | |
764 | { | |
765 | cpu_data_t *cdp = getCpuDatap(); | |
766 | ||
767 | if (cdp->cpu_get_fiq_handler == NULL) { | |
768 | cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler; | |
769 | cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer; | |
770 | cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer; | |
771 | cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr; | |
772 | cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val; | |
773 | } | |
774 | ||
775 | if (!from_boot && (cdp == &BootCpuData)) { | |
776 | /* | |
777 | * When we wake from sleep, we have no guarantee about the state | |
778 | * of the hardware timebase. It may have kept ticking across sleep, or | |
779 | * it may have reset. | |
780 | * | |
781 | * To deal with this, we calculate an offset to the clock that will | |
782 | * produce a timebase value wake_abstime at the point the boot | |
783 | * CPU calls cpu_timebase_init on wake. | |
784 | * | |
785 | * This ensures that mach_absolute_time() stops ticking across sleep. | |
786 | */ | |
787 | rtclock_base_abstime = wake_abstime - ml_get_hwclock(); | |
788 | } else if (from_boot) { | |
789 | /* On initial boot, initialize time_since_reset to CNTPCT_EL0. */ | |
790 | ml_set_reset_time(ml_get_hwclock()); | |
791 | } | |
792 | ||
793 | cdp->cpu_decrementer = 0x7FFFFFFFUL; | |
794 | cdp->cpu_timebase = 0x0UL; | |
795 | cdp->cpu_base_timebase = rtclock_base_abstime; | |
796 | } | |
797 | ||
798 | int | |
799 | cpu_cluster_id(void) | |
800 | { | |
801 | return getCpuDatap()->cpu_cluster_id; | |
802 | } | |
803 | ||
804 | __attribute__((noreturn)) | |
805 | void | |
806 | ml_arm_sleep(void) | |
807 | { | |
808 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
809 | ||
810 | if (cpu_data_ptr == &BootCpuData) { | |
811 | cpu_data_t *target_cdp; | |
812 | int cpu; | |
813 | int max_cpu; | |
814 | ||
815 | max_cpu = ml_get_max_cpu_number(); | |
816 | for (cpu = 0; cpu <= max_cpu; cpu++) { | |
817 | target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; | |
818 | ||
819 | if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) { | |
820 | continue; | |
821 | } | |
822 | ||
823 | while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) { | |
824 | ; | |
825 | } | |
826 | } | |
827 | ||
828 | /* | |
829 | * Now that the other cores have entered the sleep path, set | |
830 | * the abstime value we'll use when we resume. | |
831 | */ | |
832 | wake_abstime = ml_get_timebase(); | |
833 | ml_set_reset_time(UINT64_MAX); | |
834 | } else { | |
835 | CleanPoU_Dcache(); | |
836 | } | |
837 | ||
838 | cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH; | |
839 | ||
840 | if (cpu_data_ptr == &BootCpuData) { | |
841 | #if WITH_CLASSIC_S2R | |
842 | // Classic suspend to RAM writes the suspend signature into the | |
843 | // sleep token buffer so that iBoot knows that it's on the warm | |
844 | // boot (wake) path (as opposed to the cold boot path). Newer SoC | |
845 | // do not go through SecureROM/iBoot on the warm boot path. The | |
846 | // reconfig engine script brings the CPU out of reset at the kernel's | |
847 | // reset vector which points to the warm boot initialization code. | |
848 | if (sleepTokenBuffer != (vm_offset_t) NULL) { | |
849 | platform_cache_shutdown(); | |
850 | bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken)); | |
851 | } else { | |
852 | panic("No sleep token buffer"); | |
853 | } | |
854 | #endif | |
855 | ||
856 | #if __ARM_GLOBAL_SLEEP_BIT__ | |
857 | /* Allow other CPUs to go to sleep. */ | |
858 | arm64_stall_sleep = FALSE; | |
859 | __builtin_arm_dmb(DMB_ISH); | |
860 | #endif | |
861 | ||
862 | /* Architectural debug state: <rdar://problem/12390433>: | |
863 | * Grab debug lock EDLAR and clear bit 0 in EDPRCR, | |
864 | * tell debugger to not prevent power gating . | |
865 | */ | |
866 | if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) { | |
867 | *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY; | |
868 | *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0; | |
869 | } | |
870 | ||
871 | #if MONOTONIC | |
872 | mt_sleep(); | |
873 | #endif /* MONOTONIC */ | |
874 | /* ARM64-specific preparation */ | |
875 | arm64_prepare_for_sleep(); | |
876 | } else { | |
877 | #if __ARM_GLOBAL_SLEEP_BIT__ | |
878 | /* | |
879 | * With the exception of the CPU revisions listed above, our ARM64 CPUs have a | |
880 | * global register to manage entering deep sleep, as opposed to a per-CPU | |
881 | * register. We cannot update this register until all CPUs are ready to enter | |
882 | * deep sleep, because if a CPU executes WFI outside of the deep sleep context | |
883 | * (by idling), it will hang (due to the side effects of enabling deep sleep), | |
884 | * which can hang the sleep process or cause memory corruption on wake. | |
885 | * | |
886 | * To avoid these issues, we'll stall on this global value, which CPU0 will | |
887 | * manage. | |
888 | */ | |
889 | while (arm64_stall_sleep) { | |
890 | __builtin_arm_wfe(); | |
891 | } | |
892 | #endif | |
893 | CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t)); | |
894 | ||
895 | /* Architectural debug state: <rdar://problem/12390433>: | |
896 | * Grab debug lock EDLAR and clear bit 0 in EDPRCR, | |
897 | * tell debugger to not prevent power gating . | |
898 | */ | |
899 | if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) { | |
900 | *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY; | |
901 | *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0; | |
902 | } | |
903 | ||
904 | /* ARM64-specific preparation */ | |
905 | arm64_prepare_for_sleep(); | |
906 | } | |
907 | } | |
908 | ||
909 | void | |
910 | cpu_machine_idle_init(boolean_t from_boot) | |
911 | { | |
912 | static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL; | |
913 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
914 | ||
915 | if (from_boot) { | |
916 | unsigned long jtag = 0; | |
917 | int wfi_tmp = 1; | |
918 | uint32_t production = 1; | |
919 | DTEntry entry; | |
920 | ||
921 | if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) { | |
922 | if (jtag != 0) { | |
923 | idle_enable = FALSE; | |
924 | } else { | |
925 | idle_enable = TRUE; | |
926 | } | |
927 | } else { | |
928 | idle_enable = TRUE; | |
929 | } | |
930 | ||
931 | PE_parse_boot_argn("wfi", &wfi_tmp, sizeof(wfi_tmp)); | |
932 | ||
933 | // bits 7..0 give the wfi type | |
934 | switch (wfi_tmp & 0xff) { | |
935 | case 0: | |
936 | // disable wfi | |
937 | wfi = 0; | |
938 | break; | |
939 | ||
940 | #if DEVELOPMENT || DEBUG | |
941 | case 2: | |
942 | // wfi overhead simulation | |
943 | // 31..16 - wfi delay is us | |
944 | // 15..8 - flags | |
945 | // 7..0 - 2 | |
946 | wfi = 2; | |
947 | wfi_flags = (wfi_tmp >> 8) & 0xFF; | |
948 | nanoseconds_to_absolutetime(((wfi_tmp >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay); | |
949 | break; | |
950 | #endif /* DEVELOPMENT || DEBUG */ | |
951 | ||
952 | case 1: | |
953 | default: | |
954 | // do nothing | |
955 | break; | |
956 | } | |
957 | ||
958 | PE_parse_boot_argn("idle_wfe_to_deadline", &idle_wfe_to_deadline, sizeof(idle_wfe_to_deadline)); | |
959 | ||
960 | ResetHandlerData.assist_reset_handler = 0; | |
961 | ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries); | |
962 | ||
963 | #ifdef MONITOR | |
964 | monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0); | |
965 | #elif !defined(NO_MONITOR) | |
966 | #error MONITOR undefined, WFI power gating may not operate correctly | |
967 | #endif /* MONITOR */ | |
968 | ||
969 | // Determine if we are on production or debug chip | |
970 | if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) { | |
971 | unsigned int size; | |
972 | void *prop; | |
973 | ||
974 | if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size)) { | |
975 | if (size == 4) { | |
976 | bcopy(prop, &production, size); | |
977 | } | |
978 | } | |
979 | } | |
980 | if (!production) { | |
981 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
982 | // Enable coresight debug registers on debug-fused chips | |
983 | coresight_debug_enabled = TRUE; | |
984 | #endif | |
985 | } | |
986 | ||
987 | start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu); | |
988 | resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu); | |
989 | } | |
990 | ||
991 | #if WITH_CLASSIC_S2R | |
992 | if (cpu_data_ptr == &BootCpuData) { | |
993 | static addr64_t SleepToken_low_paddr = (addr64_t)NULL; | |
994 | if (sleepTokenBuffer != (vm_offset_t) NULL) { | |
995 | SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer); | |
996 | } else { | |
997 | panic("No sleep token buffer"); | |
998 | } | |
999 | ||
1000 | bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature), | |
1001 | SleepToken_low_paddr, sizeof(SleepToken)); | |
1002 | flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE); | |
1003 | } | |
1004 | ; | |
1005 | #endif | |
1006 | ||
1007 | cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr; | |
1008 | clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); | |
1009 | } | |
1010 | ||
1011 | _Atomic uint32_t cpu_idle_count = 0; | |
1012 | ||
1013 | void | |
1014 | machine_track_platform_idle(boolean_t entry) | |
1015 | { | |
1016 | if (entry) { | |
1017 | os_atomic_inc(&cpu_idle_count, relaxed); | |
1018 | } else { | |
1019 | os_atomic_dec(&cpu_idle_count, relaxed); | |
1020 | } | |
1021 | } | |
1022 | ||
1023 | #if WITH_CLASSIC_S2R | |
1024 | void | |
1025 | sleep_token_buffer_init(void) | |
1026 | { | |
1027 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
1028 | DTEntry entry; | |
1029 | size_t size; | |
1030 | void **prop; | |
1031 | ||
1032 | if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) { | |
1033 | /* Find the stpage node in the device tree */ | |
1034 | if (kSuccess != DTLookupEntry(0, "stram", &entry)) { | |
1035 | return; | |
1036 | } | |
1037 | ||
1038 | if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size)) { | |
1039 | return; | |
1040 | } | |
1041 | ||
1042 | /* Map the page into the kernel space */ | |
1043 | sleepTokenBuffer = ml_io_map(((vm_offset_t *)prop)[0], ((vm_size_t *)prop)[1]); | |
1044 | } | |
1045 | } | |
1046 | #endif |