Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2017 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm64/proc_reg.h> | |
30 | #include <arm/machine_cpu.h> | |
31 | #include <arm/cpu_internal.h> | |
32 | #include <arm/cpuid.h> | |
33 | #include <arm/io_map_entries.h> | |
34 | #include <arm/cpu_data.h> | |
35 | #include <arm/cpu_data_internal.h> | |
36 | #include <arm/caches_internal.h> | |
37 | #include <arm/misc_protos.h> | |
38 | #include <arm/machdep_call.h> | |
39 | #include <arm/rtclock.h> | |
40 | #include <console/serial_protos.h> | |
41 | #include <kern/machine.h> | |
42 | #include <prng/random.h> | |
43 | #include <kern/startup.h> | |
44 | #include <kern/thread.h> | |
45 | #include <mach/machine.h> | |
46 | #include <machine/atomic.h> | |
47 | #include <vm/pmap.h> | |
48 | #include <vm/vm_page.h> | |
49 | #include <sys/kdebug.h> | |
50 | #include <kern/coalition.h> | |
51 | #include <pexpert/device_tree.h> | |
52 | ||
53 | #include <IOKit/IOPlatformExpert.h> | |
5ba3f43e A |
54 | |
55 | #if defined(KERNEL_INTEGRITY_KTRR) | |
56 | #include <libkern/kernel_mach_header.h> | |
57 | #endif | |
58 | ||
59 | #if KPC | |
60 | #include <kern/kpc.h> | |
61 | #endif | |
62 | ||
63 | ||
64 | static int max_cpus_initialized = 0; | |
65 | #define MAX_CPUS_SET 0x1 | |
66 | #define MAX_CPUS_WAIT 0x2 | |
67 | ||
68 | uint32_t LockTimeOut; | |
69 | uint32_t LockTimeOutUsec; | |
70 | uint64_t MutexSpin; | |
71 | boolean_t is_clock_configured = FALSE; | |
72 | ||
73 | extern int mach_assert; | |
74 | extern volatile uint32_t debug_enabled; | |
5ba3f43e A |
75 | |
76 | ||
77 | void machine_conf(void); | |
78 | ||
79 | thread_t Idle_context(void); | |
80 | ||
81 | static uint32_t cpu_phys_ids[MAX_CPUS] = {[0 ... MAX_CPUS - 1] = (uint32_t)-1}; | |
82 | static unsigned int avail_cpus = 0; | |
83 | static int boot_cpu = -1; | |
84 | static int max_cpu_number = 0; | |
85 | cluster_type_t boot_cluster = CLUSTER_TYPE_SMP; | |
86 | ||
87 | lockdown_handler_t lockdown_handler; | |
88 | void *lockdown_this; | |
89 | lck_mtx_t lockdown_handler_lck; | |
90 | lck_grp_t *lockdown_handler_grp; | |
91 | int lockdown_done; | |
92 | ||
93 | void ml_lockdown_init(void); | |
94 | void ml_lockdown_run_handler(void); | |
95 | uint32_t get_arm_cpu_version(void); | |
96 | ||
97 | ||
98 | void ml_cpu_signal(unsigned int cpu_id __unused) | |
99 | { | |
100 | panic("Platform does not support ACC Fast IPI"); | |
101 | } | |
102 | ||
103 | void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs) { | |
104 | (void)nanosecs; | |
105 | panic("Platform does not support ACC Fast IPI"); | |
106 | } | |
107 | ||
108 | uint64_t ml_cpu_signal_deferred_get_timer() { | |
109 | return 0; | |
110 | } | |
111 | ||
112 | void ml_cpu_signal_deferred(unsigned int cpu_id __unused) | |
113 | { | |
114 | panic("Platform does not support ACC Fast IPI deferral"); | |
115 | } | |
116 | ||
117 | void ml_cpu_signal_retract(unsigned int cpu_id __unused) | |
118 | { | |
119 | panic("Platform does not support ACC Fast IPI retraction"); | |
120 | } | |
121 | ||
122 | void machine_idle(void) | |
123 | { | |
124 | __asm__ volatile ("msr DAIFSet, %[mask]" ::[mask] "i" (DAIFSC_IRQF | DAIFSC_FIQF)); | |
125 | Idle_context(); | |
126 | __asm__ volatile ("msr DAIFClr, %[mask]" ::[mask] "i" (DAIFSC_IRQF | DAIFSC_FIQF)); | |
127 | } | |
128 | ||
129 | void init_vfp(void) | |
130 | { | |
131 | return; | |
132 | } | |
133 | ||
134 | boolean_t get_vfp_enabled(void) | |
135 | { | |
136 | return TRUE; | |
137 | } | |
138 | ||
139 | void OSSynchronizeIO(void) | |
140 | { | |
141 | __builtin_arm_dsb(DSB_SY); | |
142 | } | |
143 | ||
144 | uint64_t get_aux_control(void) | |
145 | { | |
146 | uint64_t value; | |
147 | ||
148 | MRS(value, "ACTLR_EL1"); | |
149 | return value; | |
150 | } | |
151 | ||
152 | uint64_t get_mmu_control(void) | |
153 | { | |
154 | uint64_t value; | |
155 | ||
156 | MRS(value, "SCTLR_EL1"); | |
157 | return value; | |
158 | } | |
159 | ||
160 | uint64_t get_tcr(void) | |
161 | { | |
162 | uint64_t value; | |
163 | ||
164 | MRS(value, "TCR_EL1"); | |
165 | return value; | |
166 | } | |
167 | ||
168 | boolean_t ml_get_interrupts_enabled(void) | |
169 | { | |
170 | uint64_t value; | |
171 | ||
172 | MRS(value, "DAIF"); | |
173 | if (value & DAIF_IRQF) | |
174 | return FALSE; | |
175 | return TRUE; | |
176 | } | |
177 | ||
178 | pmap_paddr_t get_mmu_ttb(void) | |
179 | { | |
180 | pmap_paddr_t value; | |
181 | ||
182 | MRS(value, "TTBR0_EL1"); | |
183 | return value; | |
184 | } | |
185 | ||
186 | MARK_AS_PMAP_TEXT | |
187 | void set_mmu_ttb(pmap_paddr_t value) | |
188 | { | |
5c9f4661 A |
189 | #if __ARM_KERNEL_PROTECT__ |
190 | /* All EL1-mode ASIDs are odd. */ | |
191 | value |= (1ULL << TTBR_ASID_SHIFT); | |
192 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
193 | ||
5ba3f43e A |
194 | __builtin_arm_dsb(DSB_ISH); |
195 | MSR("TTBR0_EL1", value); | |
196 | __builtin_arm_isb(ISB_SY); | |
197 | } | |
198 | ||
199 | static uint32_t get_midr_el1(void) | |
200 | { | |
201 | uint64_t value; | |
202 | ||
203 | MRS(value, "MIDR_EL1"); | |
204 | ||
205 | /* This is a 32-bit register. */ | |
206 | return (uint32_t) value; | |
207 | } | |
208 | ||
209 | uint32_t get_arm_cpu_version(void) | |
210 | { | |
211 | uint32_t value = get_midr_el1(); | |
212 | ||
213 | /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */ | |
214 | return ((value & MIDR_EL1_REV_MASK) >> MIDR_EL1_REV_SHIFT) | ((value & MIDR_EL1_VAR_MASK) >> (MIDR_EL1_VAR_SHIFT - 4)); | |
215 | } | |
216 | ||
217 | /* | |
218 | * user_cont_hwclock_allowed() | |
219 | * | |
220 | * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0) | |
221 | * as a continuous time source (e.g. from mach_continuous_time) | |
222 | */ | |
223 | boolean_t user_cont_hwclock_allowed(void) | |
224 | { | |
225 | return FALSE; | |
226 | } | |
227 | ||
228 | /* | |
229 | * user_timebase_allowed() | |
230 | * | |
231 | * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0). | |
232 | */ | |
233 | boolean_t user_timebase_allowed(void) | |
234 | { | |
235 | return TRUE; | |
236 | } | |
237 | ||
238 | boolean_t arm64_wfe_allowed(void) | |
239 | { | |
240 | return TRUE; | |
241 | } | |
242 | ||
243 | #if defined(KERNEL_INTEGRITY_KTRR) | |
244 | ||
245 | uint64_t rorgn_begin __attribute__((section("__DATA, __const"))) = 0; | |
246 | uint64_t rorgn_end __attribute__((section("__DATA, __const"))) = 0; | |
247 | vm_offset_t amcc_base; | |
248 | ||
249 | static void assert_unlocked(void); | |
250 | static void assert_amcc_cache_disabled(void); | |
251 | static void lock_amcc(void); | |
252 | static void lock_mmu(uint64_t begin, uint64_t end); | |
253 | ||
254 | void rorgn_stash_range(void) | |
255 | { | |
256 | ||
257 | #if DEVELOPMENT || DEBUG | |
258 | boolean_t rorgn_disable = FALSE; | |
259 | ||
260 | PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable, sizeof(rorgn_disable)); | |
261 | ||
262 | if (rorgn_disable) { | |
263 | /* take early out if boot arg present, don't query any machine registers to avoid | |
264 | * dependency on amcc DT entry | |
265 | */ | |
266 | return; | |
267 | } | |
268 | #endif | |
269 | ||
270 | /* Get the AMC values, and stash them into rorgn_begin, rorgn_end. */ | |
271 | ||
272 | #if defined(KERNEL_INTEGRITY_KTRR) | |
273 | uint64_t soc_base = 0; | |
274 | DTEntry entryP = NULL; | |
275 | uintptr_t *reg_prop = NULL; | |
276 | uint32_t prop_size = 0; | |
277 | int rc; | |
278 | ||
279 | soc_base = pe_arm_get_soc_base_phys(); | |
280 | rc = DTFindEntry("name", "mcc", &entryP); | |
281 | assert(rc == kSuccess); | |
282 | rc = DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); | |
283 | assert(rc == kSuccess); | |
284 | amcc_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); | |
285 | #else | |
286 | #error "KERNEL_INTEGRITY config error" | |
287 | #endif | |
288 | ||
289 | #if defined(KERNEL_INTEGRITY_KTRR) | |
290 | assert(rRORGNENDADDR > rRORGNBASEADDR); | |
291 | rorgn_begin = (rRORGNBASEADDR << ARM_PGSHIFT) + gPhysBase; | |
292 | rorgn_end = (rRORGNENDADDR << ARM_PGSHIFT) + gPhysBase; | |
293 | #else | |
294 | #error KERNEL_INTEGRITY config error | |
295 | #endif /* defined (KERNEL_INTEGRITY_KTRR) */ | |
296 | } | |
297 | ||
298 | static void assert_unlocked() { | |
299 | uint64_t ktrr_lock = 0; | |
300 | uint32_t rorgn_lock = 0; | |
301 | ||
302 | assert(amcc_base); | |
303 | #if defined(KERNEL_INTEGRITY_KTRR) | |
304 | rorgn_lock = rRORGNLOCK; | |
305 | ktrr_lock = __builtin_arm_rsr64(ARM64_REG_KTRR_LOCK_EL1); | |
306 | #else | |
307 | #error KERNEL_INTEGRITY config error | |
308 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
309 | ||
310 | assert(!ktrr_lock); | |
311 | assert(!rorgn_lock); | |
312 | } | |
313 | ||
314 | static void lock_amcc() { | |
315 | #if defined(KERNEL_INTEGRITY_KTRR) | |
316 | rRORGNLOCK = 1; | |
317 | __builtin_arm_isb(ISB_SY); | |
318 | #else | |
319 | #error KERNEL_INTEGRITY config error | |
320 | #endif | |
321 | } | |
322 | ||
323 | static void lock_mmu(uint64_t begin, uint64_t end) { | |
324 | ||
325 | #if defined(KERNEL_INTEGRITY_KTRR) | |
326 | ||
327 | __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin); | |
328 | __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end); | |
329 | __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL); | |
330 | ||
331 | /* flush TLB */ | |
332 | ||
333 | __builtin_arm_isb(ISB_SY); | |
334 | flush_mmu_tlb(); | |
335 | ||
336 | #else | |
337 | #error KERNEL_INTEGRITY config error | |
338 | #endif | |
339 | ||
340 | } | |
341 | ||
342 | static void assert_amcc_cache_disabled() { | |
343 | #if defined(KERNEL_INTEGRITY_KTRR) | |
344 | assert((rMCCGEN & 1) == 0); /* assert M$ disabled or LLC clean will be unreliable */ | |
345 | #else | |
346 | #error KERNEL_INTEGRITY config error | |
347 | #endif | |
348 | } | |
349 | ||
350 | /* | |
351 | * void rorgn_lockdown(void) | |
352 | * | |
353 | * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked | |
354 | * | |
355 | * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in | |
356 | * start.s:start_cpu() for subsequent wake/resume of all cores | |
357 | */ | |
358 | void rorgn_lockdown(void) | |
359 | { | |
360 | vm_offset_t ktrr_begin, ktrr_end; | |
361 | unsigned long plt_segsz, last_segsz; | |
362 | ||
363 | #if DEVELOPMENT || DEBUG | |
364 | boolean_t ktrr_disable = FALSE; | |
365 | ||
366 | PE_parse_boot_argn("-unsafe_kernel_text", &ktrr_disable, sizeof(ktrr_disable)); | |
367 | ||
368 | if (ktrr_disable) { | |
369 | /* | |
370 | * take early out if boot arg present, since we may not have amcc DT entry present | |
371 | * we can't assert that iboot hasn't programmed the RO region lockdown registers | |
372 | */ | |
373 | goto out; | |
374 | } | |
375 | #endif /* DEVELOPMENT || DEBUG */ | |
376 | ||
377 | assert_unlocked(); | |
378 | ||
379 | /* [x] - Use final method of determining all kernel text range or expect crashes */ | |
380 | ||
381 | ktrr_begin = (uint64_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &plt_segsz); | |
382 | assert(ktrr_begin && gVirtBase && gPhysBase); | |
383 | ||
384 | ktrr_begin = kvtophys(ktrr_begin); | |
385 | ||
386 | /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC KTRR region) */ | |
387 | ktrr_end = (uint64_t) getsegdatafromheader(&_mh_execute_header, "__LAST", &last_segsz); | |
388 | ktrr_end = (kvtophys(ktrr_end) - 1) & ~PAGE_MASK; | |
389 | ||
390 | /* ensure that iboot and xnu agree on the ktrr range */ | |
391 | assert(rorgn_begin == ktrr_begin && rorgn_end == (ktrr_end + last_segsz)); | |
392 | /* assert that __LAST segment containing privileged insns is only a single page */ | |
393 | assert(last_segsz == PAGE_SIZE); | |
394 | ||
395 | #if DEBUG | |
396 | printf("KTRR Begin: %p End: %p, setting lockdown\n", (void *)ktrr_begin, (void *)ktrr_end); | |
397 | #endif | |
398 | ||
399 | /* [x] - ensure all in flight writes are flushed to AMCC before enabling RO Region Lock */ | |
400 | ||
401 | assert_amcc_cache_disabled(); | |
402 | ||
403 | CleanPoC_DcacheRegion_Force(phystokv(ktrr_begin), | |
404 | (unsigned)((ktrr_end + last_segsz) - ktrr_begin + PAGE_MASK)); | |
405 | ||
406 | lock_amcc(); | |
407 | ||
408 | lock_mmu(ktrr_begin, ktrr_end); | |
409 | ||
410 | #if DEVELOPMENT || DEBUG | |
411 | out: | |
412 | #endif | |
413 | ||
414 | /* now we can run lockdown handler */ | |
415 | ml_lockdown_run_handler(); | |
416 | } | |
417 | ||
418 | #endif /* defined(KERNEL_INTEGRITY_KTRR)*/ | |
419 | ||
420 | void | |
421 | machine_startup(__unused boot_args * args) | |
422 | { | |
423 | int boot_arg; | |
424 | ||
425 | ||
5ba3f43e A |
426 | PE_parse_boot_argn("assert", &mach_assert, sizeof (mach_assert)); |
427 | ||
428 | if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) { | |
429 | default_preemption_rate = boot_arg; | |
430 | } | |
431 | if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof (boot_arg))) { | |
432 | default_bg_preemption_rate = boot_arg; | |
433 | } | |
434 | ||
435 | machine_conf(); | |
436 | ||
437 | /* | |
438 | * Kick off the kernel bootstrap. | |
439 | */ | |
440 | kernel_bootstrap(); | |
441 | /* NOTREACHED */ | |
442 | } | |
443 | ||
444 | void machine_lockdown_preflight(void) | |
445 | { | |
446 | #if CONFIG_KERNEL_INTEGRITY | |
447 | ||
448 | #if defined(KERNEL_INTEGRITY_KTRR) | |
449 | rorgn_stash_range(); | |
450 | #endif | |
451 | ||
452 | #endif | |
453 | } | |
454 | ||
455 | void machine_lockdown(void) | |
456 | { | |
457 | #if CONFIG_KERNEL_INTEGRITY | |
458 | #if KERNEL_INTEGRITY_WT | |
459 | /* Watchtower | |
460 | * | |
461 | * Notify the monitor about the completion of early kernel bootstrap. | |
462 | * From this point forward it will enforce the integrity of kernel text, | |
463 | * rodata and page tables. | |
464 | */ | |
465 | ||
466 | #ifdef MONITOR | |
467 | monitor_call(MONITOR_LOCKDOWN, 0, 0, 0); | |
468 | #endif | |
469 | #endif /* KERNEL_INTEGRITY_WT */ | |
470 | ||
471 | ||
472 | #if defined(KERNEL_INTEGRITY_KTRR) | |
473 | /* KTRR | |
474 | * | |
475 | * Lock physical KTRR region. KTRR region is read-only. Memory outside | |
476 | * the region is not executable at EL1. | |
477 | */ | |
478 | ||
479 | rorgn_lockdown(); | |
480 | #endif /* defined(KERNEL_INTEGRITY_KTRR)*/ | |
481 | ||
482 | ||
483 | #endif /* CONFIG_KERNEL_INTEGRITY */ | |
484 | } | |
485 | ||
486 | char * | |
487 | machine_boot_info( | |
488 | __unused char *buf, | |
489 | __unused vm_size_t size) | |
490 | { | |
491 | return (PE_boot_args()); | |
492 | } | |
493 | ||
494 | void | |
495 | machine_conf(void) | |
496 | { | |
497 | /* | |
498 | * This is known to be inaccurate. mem_size should always be capped at 2 GB | |
499 | */ | |
500 | machine_info.memory_size = (uint32_t)mem_size; | |
501 | } | |
502 | ||
503 | void | |
504 | machine_init(void) | |
505 | { | |
506 | debug_log_init(); | |
507 | clock_config(); | |
508 | is_clock_configured = TRUE; | |
509 | if (debug_enabled) | |
510 | pmap_map_globals(); | |
511 | } | |
512 | ||
513 | void | |
514 | slave_machine_init(__unused void *param) | |
515 | { | |
516 | cpu_machine_init(); /* Initialize the processor */ | |
517 | clock_init(); /* Init the clock */ | |
518 | } | |
519 | ||
520 | /* | |
521 | * Routine: machine_processor_shutdown | |
522 | * Function: | |
523 | */ | |
524 | thread_t | |
525 | machine_processor_shutdown( | |
526 | __unused thread_t thread, | |
527 | void (*doshutdown) (processor_t), | |
528 | processor_t processor) | |
529 | { | |
530 | return (Shutdown_context(doshutdown, processor)); | |
531 | } | |
532 | ||
533 | /* | |
534 | * Routine: ml_init_max_cpus | |
535 | * Function: | |
536 | */ | |
537 | void | |
538 | ml_init_max_cpus(unsigned int max_cpus) | |
539 | { | |
540 | boolean_t current_state; | |
541 | ||
542 | current_state = ml_set_interrupts_enabled(FALSE); | |
543 | if (max_cpus_initialized != MAX_CPUS_SET) { | |
544 | machine_info.max_cpus = max_cpus; | |
545 | machine_info.physical_cpu_max = max_cpus; | |
546 | machine_info.logical_cpu_max = max_cpus; | |
547 | if (max_cpus_initialized == MAX_CPUS_WAIT) | |
548 | thread_wakeup((event_t) & max_cpus_initialized); | |
549 | max_cpus_initialized = MAX_CPUS_SET; | |
550 | } | |
551 | (void) ml_set_interrupts_enabled(current_state); | |
552 | } | |
553 | ||
554 | /* | |
555 | * Routine: ml_get_max_cpus | |
556 | * Function: | |
557 | */ | |
558 | unsigned int | |
559 | ml_get_max_cpus(void) | |
560 | { | |
561 | boolean_t current_state; | |
562 | ||
563 | current_state = ml_set_interrupts_enabled(FALSE); | |
564 | if (max_cpus_initialized != MAX_CPUS_SET) { | |
565 | max_cpus_initialized = MAX_CPUS_WAIT; | |
566 | assert_wait((event_t) & max_cpus_initialized, THREAD_UNINT); | |
567 | (void) thread_block(THREAD_CONTINUE_NULL); | |
568 | } | |
569 | (void) ml_set_interrupts_enabled(current_state); | |
570 | return (machine_info.max_cpus); | |
571 | } | |
572 | ||
573 | /* | |
574 | * Routine: ml_init_lock_timeout | |
575 | * Function: | |
576 | */ | |
577 | void | |
578 | ml_init_lock_timeout(void) | |
579 | { | |
580 | uint64_t abstime; | |
581 | uint64_t mtxspin; | |
582 | uint64_t default_timeout_ns = NSEC_PER_SEC>>2; | |
583 | uint32_t slto; | |
584 | ||
585 | if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto))) | |
586 | default_timeout_ns = slto * NSEC_PER_USEC; | |
587 | ||
588 | nanoseconds_to_absolutetime(default_timeout_ns, &abstime); | |
589 | LockTimeOutUsec = (uint32_t)(abstime / NSEC_PER_USEC); | |
590 | LockTimeOut = (uint32_t)abstime; | |
591 | ||
592 | if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) { | |
593 | if (mtxspin > USEC_PER_SEC>>4) | |
594 | mtxspin = USEC_PER_SEC>>4; | |
595 | nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime); | |
596 | } else { | |
597 | nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime); | |
598 | } | |
599 | MutexSpin = abstime; | |
600 | } | |
601 | ||
602 | /* | |
603 | * This is called from the machine-independent routine cpu_up() | |
604 | * to perform machine-dependent info updates. | |
605 | */ | |
606 | void | |
607 | ml_cpu_up(void) | |
608 | { | |
609 | hw_atomic_add(&machine_info.physical_cpu, 1); | |
610 | hw_atomic_add(&machine_info.logical_cpu, 1); | |
611 | } | |
612 | ||
613 | /* | |
614 | * This is called from the machine-independent routine cpu_down() | |
615 | * to perform machine-dependent info updates. | |
616 | */ | |
617 | void | |
618 | ml_cpu_down(void) | |
619 | { | |
620 | cpu_data_t *cpu_data_ptr; | |
621 | ||
622 | hw_atomic_sub(&machine_info.physical_cpu, 1); | |
623 | hw_atomic_sub(&machine_info.logical_cpu, 1); | |
624 | ||
625 | /* | |
626 | * If we want to deal with outstanding IPIs, we need to | |
627 | * do relatively early in the processor_doshutdown path, | |
628 | * as we pend decrementer interrupts using the IPI | |
629 | * mechanism if we cannot immediately service them (if | |
630 | * IRQ is masked). Do so now. | |
631 | * | |
632 | * We aren't on the interrupt stack here; would it make | |
633 | * more sense to disable signaling and then enable | |
634 | * interrupts? It might be a bit cleaner. | |
635 | */ | |
636 | cpu_data_ptr = getCpuDatap(); | |
637 | cpu_data_ptr->cpu_running = FALSE; | |
638 | cpu_signal_handler_internal(TRUE); | |
639 | } | |
640 | ||
641 | /* | |
642 | * Routine: ml_cpu_get_info | |
643 | * Function: | |
644 | */ | |
645 | void | |
646 | ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info) | |
647 | { | |
648 | cache_info_t *cpuid_cache_info; | |
649 | ||
650 | cpuid_cache_info = cache_info(); | |
651 | ml_cpu_info->vector_unit = 0; | |
652 | ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz; | |
653 | ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize; | |
654 | ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize; | |
655 | ||
656 | #if (__ARM_ARCH__ >= 7) | |
657 | ml_cpu_info->l2_settings = 1; | |
658 | ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size; | |
659 | #else | |
660 | ml_cpu_info->l2_settings = 0; | |
661 | ml_cpu_info->l2_cache_size = 0xFFFFFFFF; | |
662 | #endif | |
663 | ml_cpu_info->l3_settings = 0; | |
664 | ml_cpu_info->l3_cache_size = 0xFFFFFFFF; | |
665 | } | |
666 | ||
667 | unsigned int | |
668 | ml_get_machine_mem(void) | |
669 | { | |
670 | return (machine_info.memory_size); | |
671 | } | |
672 | ||
673 | __attribute__((noreturn)) | |
674 | void | |
675 | halt_all_cpus(boolean_t reboot) | |
676 | { | |
677 | if (reboot) { | |
678 | printf("MACH Reboot\n"); | |
679 | PEHaltRestart(kPERestartCPU); | |
680 | } else { | |
681 | printf("CPU halted\n"); | |
682 | PEHaltRestart(kPEHaltCPU); | |
683 | } | |
684 | while (1); | |
685 | } | |
686 | ||
687 | __attribute__((noreturn)) | |
688 | void | |
689 | halt_cpu(void) | |
690 | { | |
691 | halt_all_cpus(FALSE); | |
692 | } | |
693 | ||
694 | /* | |
695 | * Routine: machine_signal_idle | |
696 | * Function: | |
697 | */ | |
698 | void | |
699 | machine_signal_idle( | |
700 | processor_t processor) | |
701 | { | |
702 | cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL); | |
703 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
704 | } | |
705 | ||
706 | void | |
707 | machine_signal_idle_deferred( | |
708 | processor_t processor) | |
709 | { | |
710 | cpu_signal_deferred(processor_to_cpu_datap(processor)); | |
711 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
712 | } | |
713 | ||
714 | void | |
715 | machine_signal_idle_cancel( | |
716 | processor_t processor) | |
717 | { | |
718 | cpu_signal_cancel(processor_to_cpu_datap(processor)); | |
719 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
720 | } | |
721 | ||
722 | /* | |
723 | * Routine: ml_install_interrupt_handler | |
724 | * Function: Initialize Interrupt Handler | |
725 | */ | |
726 | void | |
727 | ml_install_interrupt_handler( | |
728 | void *nub, | |
729 | int source, | |
730 | void *target, | |
731 | IOInterruptHandler handler, | |
732 | void *refCon) | |
733 | { | |
734 | cpu_data_t *cpu_data_ptr; | |
735 | boolean_t current_state; | |
736 | ||
737 | current_state = ml_set_interrupts_enabled(FALSE); | |
738 | cpu_data_ptr = getCpuDatap(); | |
739 | ||
740 | cpu_data_ptr->interrupt_nub = nub; | |
741 | cpu_data_ptr->interrupt_source = source; | |
742 | cpu_data_ptr->interrupt_target = target; | |
743 | cpu_data_ptr->interrupt_handler = handler; | |
744 | cpu_data_ptr->interrupt_refCon = refCon; | |
745 | ||
746 | cpu_data_ptr->interrupts_enabled = TRUE; | |
747 | (void) ml_set_interrupts_enabled(current_state); | |
748 | ||
749 | initialize_screen(NULL, kPEAcquireScreen); | |
750 | } | |
751 | ||
752 | /* | |
753 | * Routine: ml_init_interrupt | |
754 | * Function: Initialize Interrupts | |
755 | */ | |
756 | void | |
757 | ml_init_interrupt(void) | |
758 | { | |
759 | } | |
760 | ||
761 | /* | |
762 | * Routine: ml_init_timebase | |
763 | * Function: register and setup Timebase, Decremeter services | |
764 | */ | |
765 | void ml_init_timebase( | |
766 | void *args, | |
767 | tbd_ops_t tbd_funcs, | |
768 | vm_offset_t int_address, | |
769 | vm_offset_t int_value __unused) | |
770 | { | |
771 | cpu_data_t *cpu_data_ptr; | |
772 | ||
773 | cpu_data_ptr = (cpu_data_t *)args; | |
774 | ||
775 | if ((cpu_data_ptr == &BootCpuData) | |
776 | && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) { | |
777 | rtclock_timebase_func = *tbd_funcs; | |
778 | rtclock_timebase_addr = int_address; | |
779 | } | |
780 | } | |
781 | ||
782 | void | |
783 | ml_parse_cpu_topology(void) | |
784 | { | |
785 | DTEntry entry, child __unused; | |
786 | OpaqueDTEntryIterator iter; | |
787 | uint32_t cpu_boot_arg; | |
788 | int err; | |
789 | ||
790 | cpu_boot_arg = MAX_CPUS; | |
791 | ||
792 | PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)); | |
793 | ||
794 | err = DTLookupEntry(NULL, "/cpus", &entry); | |
795 | assert(err == kSuccess); | |
796 | ||
797 | err = DTInitEntryIterator(entry, &iter); | |
798 | assert(err == kSuccess); | |
799 | ||
800 | while (kSuccess == DTIterateEntries(&iter, &child)) { | |
801 | unsigned int propSize; | |
802 | void *prop = NULL; | |
803 | int cpu_id = avail_cpus++; | |
804 | ||
805 | if (kSuccess == DTGetProperty(child, "cpu-id", &prop, &propSize)) | |
806 | cpu_id = *((int32_t*)prop); | |
807 | ||
808 | assert(cpu_id < MAX_CPUS); | |
809 | assert(cpu_phys_ids[cpu_id] == (uint32_t)-1); | |
810 | ||
811 | if (boot_cpu == -1) { | |
812 | if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) | |
813 | panic("unable to retrieve state for cpu %d", cpu_id); | |
814 | ||
815 | if (strncmp((char*)prop, "running", propSize) == 0) { | |
816 | boot_cpu = cpu_id; | |
817 | } | |
818 | } | |
819 | if (kSuccess != DTGetProperty(child, "reg", &prop, &propSize)) | |
820 | panic("unable to retrieve physical ID for cpu %d", cpu_id); | |
821 | ||
822 | cpu_phys_ids[cpu_id] = *((uint32_t*)prop); | |
823 | ||
824 | if ((cpu_id > max_cpu_number) && ((cpu_id == boot_cpu) || (avail_cpus <= cpu_boot_arg))) | |
825 | max_cpu_number = cpu_id; | |
826 | } | |
827 | ||
828 | if (avail_cpus > cpu_boot_arg) | |
829 | avail_cpus = cpu_boot_arg; | |
830 | ||
831 | if (avail_cpus == 0) | |
832 | panic("No cpus found!"); | |
833 | ||
834 | if (boot_cpu == -1) | |
835 | panic("unable to determine boot cpu!"); | |
836 | } | |
837 | ||
838 | unsigned int | |
839 | ml_get_cpu_count(void) | |
840 | { | |
841 | return avail_cpus; | |
842 | } | |
843 | ||
844 | int | |
845 | ml_get_boot_cpu_number(void) | |
846 | { | |
847 | return boot_cpu; | |
848 | } | |
849 | ||
850 | cluster_type_t | |
851 | ml_get_boot_cluster(void) | |
852 | { | |
853 | return boot_cluster; | |
854 | } | |
855 | ||
856 | int | |
857 | ml_get_cpu_number(uint32_t phys_id) | |
858 | { | |
859 | for (int log_id = 0; log_id <= ml_get_max_cpu_number(); ++log_id) { | |
860 | if (cpu_phys_ids[log_id] == phys_id) | |
861 | return log_id; | |
862 | } | |
863 | return -1; | |
864 | } | |
865 | ||
866 | int | |
867 | ml_get_max_cpu_number(void) | |
868 | { | |
869 | return max_cpu_number; | |
870 | } | |
871 | ||
872 | ||
873 | void ml_lockdown_init() { | |
874 | lockdown_handler_grp = lck_grp_alloc_init("lockdown_handler", NULL); | |
875 | assert(lockdown_handler_grp != NULL); | |
876 | ||
877 | lck_mtx_init(&lockdown_handler_lck, lockdown_handler_grp, NULL); | |
878 | } | |
879 | ||
880 | kern_return_t | |
881 | ml_lockdown_handler_register(lockdown_handler_t f, void *this) | |
882 | { | |
883 | if (lockdown_handler || !f) { | |
884 | return KERN_FAILURE; | |
885 | } | |
886 | ||
887 | lck_mtx_lock(&lockdown_handler_lck); | |
888 | lockdown_handler = f; | |
889 | lockdown_this = this; | |
890 | ||
891 | #if !(defined(KERNEL_INTEGRITY_KTRR)) | |
892 | lockdown_done=1; | |
893 | lockdown_handler(this); | |
894 | #else | |
895 | if (lockdown_done) { | |
896 | lockdown_handler(this); | |
897 | } | |
898 | #endif | |
899 | lck_mtx_unlock(&lockdown_handler_lck); | |
900 | ||
901 | return KERN_SUCCESS; | |
902 | } | |
903 | ||
904 | void ml_lockdown_run_handler() { | |
905 | lck_mtx_lock(&lockdown_handler_lck); | |
906 | assert(!lockdown_done); | |
907 | ||
908 | lockdown_done = 1; | |
909 | if (lockdown_handler) { | |
910 | lockdown_handler(lockdown_this); | |
911 | } | |
912 | lck_mtx_unlock(&lockdown_handler_lck); | |
913 | } | |
914 | ||
915 | kern_return_t | |
916 | ml_processor_register( | |
917 | ml_processor_info_t * in_processor_info, | |
918 | processor_t * processor_out, | |
919 | ipi_handler_t * ipi_handler) | |
920 | { | |
921 | cpu_data_t *this_cpu_datap; | |
922 | processor_set_t pset; | |
923 | boolean_t is_boot_cpu; | |
924 | static unsigned int reg_cpu_count = 0; | |
925 | ||
926 | if (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number()) | |
927 | return KERN_FAILURE; | |
928 | ||
929 | if ((unsigned int)OSIncrementAtomic((SInt32*)®_cpu_count) >= avail_cpus) | |
930 | return KERN_FAILURE; | |
931 | ||
932 | if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) { | |
933 | is_boot_cpu = FALSE; | |
934 | this_cpu_datap = cpu_data_alloc(FALSE); | |
935 | cpu_data_init(this_cpu_datap); | |
936 | } else { | |
937 | this_cpu_datap = &BootCpuData; | |
938 | is_boot_cpu = TRUE; | |
939 | } | |
940 | ||
941 | assert(in_processor_info->log_id < MAX_CPUS); | |
942 | ||
943 | this_cpu_datap->cpu_id = in_processor_info->cpu_id; | |
944 | ||
5ba3f43e A |
945 | this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu); |
946 | if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) | |
947 | goto processor_register_error; | |
948 | ||
949 | if (!is_boot_cpu) { | |
950 | this_cpu_datap->cpu_number = in_processor_info->log_id; | |
951 | ||
952 | if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) | |
953 | goto processor_register_error; | |
954 | } | |
955 | ||
956 | this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle; | |
957 | this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch; | |
958 | nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency); | |
959 | this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr); | |
960 | ||
961 | this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer; | |
962 | this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon; | |
963 | ||
964 | this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler; | |
965 | this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr; | |
966 | this_cpu_datap->cpu_phys_id = in_processor_info->phys_id; | |
967 | this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty; | |
968 | ||
969 | this_cpu_datap->cpu_cluster_type = in_processor_info->cluster_type; | |
970 | this_cpu_datap->cpu_cluster_id = in_processor_info->cluster_id; | |
971 | this_cpu_datap->cpu_l2_id = in_processor_info->l2_cache_id; | |
972 | this_cpu_datap->cpu_l2_size = in_processor_info->l2_cache_size; | |
973 | this_cpu_datap->cpu_l3_id = in_processor_info->l3_cache_id; | |
974 | this_cpu_datap->cpu_l3_size = in_processor_info->l3_cache_size; | |
975 | ||
976 | this_cpu_datap->cluster_master = is_boot_cpu; | |
977 | ||
978 | pset = pset_find(in_processor_info->cluster_id, processor_pset(master_processor)); | |
979 | assert(pset != NULL); | |
980 | kprintf("%s>cpu_id %p cluster_id %d cpu_number %d is type %d\n", __FUNCTION__, in_processor_info->cpu_id, in_processor_info->cluster_id, this_cpu_datap->cpu_number, in_processor_info->cluster_type); | |
981 | ||
982 | if (!is_boot_cpu) { | |
983 | processor_init((struct processor *)this_cpu_datap->cpu_processor, | |
984 | this_cpu_datap->cpu_number, pset); | |
985 | ||
986 | if (this_cpu_datap->cpu_l2_access_penalty) { | |
987 | /* | |
988 | * Cores that have a non-zero L2 access penalty compared | |
989 | * to the boot processor should be de-prioritized by the | |
990 | * scheduler, so that threads use the cores with better L2 | |
991 | * preferentially. | |
992 | */ | |
993 | processor_set_primary(this_cpu_datap->cpu_processor, | |
994 | master_processor); | |
995 | } | |
996 | } | |
997 | ||
998 | *processor_out = this_cpu_datap->cpu_processor; | |
999 | *ipi_handler = cpu_signal_handler; | |
1000 | if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) | |
1001 | *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle; | |
1002 | ||
1003 | #if KPC | |
1004 | if (kpc_register_cpu(this_cpu_datap) != TRUE) | |
1005 | goto processor_register_error; | |
1006 | #endif | |
1007 | ||
1008 | if (!is_boot_cpu) { | |
1009 | prng_cpu_init(this_cpu_datap->cpu_number); | |
1010 | // now let next CPU register itself | |
1011 | OSIncrementAtomic((SInt32*)&real_ncpus); | |
1012 | } | |
1013 | ||
1014 | return KERN_SUCCESS; | |
1015 | ||
1016 | processor_register_error: | |
1017 | #if KPC | |
1018 | kpc_unregister_cpu(this_cpu_datap); | |
1019 | #endif | |
5ba3f43e A |
1020 | if (!is_boot_cpu) |
1021 | cpu_data_free(this_cpu_datap); | |
1022 | ||
1023 | return KERN_FAILURE; | |
1024 | } | |
1025 | ||
1026 | void | |
1027 | ml_init_arm_debug_interface( | |
1028 | void * in_cpu_datap, | |
1029 | vm_offset_t virt_address) | |
1030 | { | |
1031 | ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address; | |
1032 | do_debugid(); | |
1033 | } | |
1034 | ||
1035 | /* | |
1036 | * Routine: init_ast_check | |
1037 | * Function: | |
1038 | */ | |
1039 | void | |
1040 | init_ast_check( | |
1041 | __unused processor_t processor) | |
1042 | { | |
1043 | } | |
1044 | ||
1045 | /* | |
1046 | * Routine: cause_ast_check | |
1047 | * Function: | |
1048 | */ | |
1049 | void | |
1050 | cause_ast_check( | |
1051 | processor_t processor) | |
1052 | { | |
1053 | if (current_processor() != processor) { | |
1054 | cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL); | |
1055 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0); | |
1056 | } | |
1057 | } | |
1058 | ||
1059 | ||
1060 | /* | |
1061 | * Routine: ml_at_interrupt_context | |
1062 | * Function: Check if running at interrupt context | |
1063 | */ | |
1064 | boolean_t | |
1065 | ml_at_interrupt_context(void) | |
1066 | { | |
1067 | unsigned int local; | |
1068 | vm_offset_t intstack_top_ptr; | |
1069 | ||
1070 | intstack_top_ptr = getCpuDatap()->intstack_top; | |
1071 | return (((vm_offset_t)(&local) < intstack_top_ptr) && ((vm_offset_t)(&local) > (intstack_top_ptr - INTSTACK_SIZE))); | |
1072 | } | |
1073 | extern uint32_t cpu_idle_count; | |
1074 | ||
1075 | void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { | |
1076 | *icp = ml_at_interrupt_context(); | |
1077 | *pidlep = (cpu_idle_count == real_ncpus); | |
1078 | } | |
1079 | ||
1080 | /* | |
1081 | * Routine: ml_cause_interrupt | |
1082 | * Function: Generate a fake interrupt | |
1083 | */ | |
1084 | void | |
1085 | ml_cause_interrupt(void) | |
1086 | { | |
1087 | return; /* BS_XXX */ | |
1088 | } | |
1089 | ||
1090 | /* Map memory map IO space */ | |
1091 | vm_offset_t | |
1092 | ml_io_map( | |
1093 | vm_offset_t phys_addr, | |
1094 | vm_size_t size) | |
1095 | { | |
1096 | return (io_map(phys_addr, size, VM_WIMG_IO)); | |
1097 | } | |
1098 | ||
1099 | vm_offset_t | |
1100 | ml_io_map_wcomb( | |
1101 | vm_offset_t phys_addr, | |
1102 | vm_size_t size) | |
1103 | { | |
1104 | return (io_map(phys_addr, size, VM_WIMG_WCOMB)); | |
1105 | } | |
1106 | ||
1107 | /* boot memory allocation */ | |
1108 | vm_offset_t | |
1109 | ml_static_malloc( | |
1110 | __unused vm_size_t size) | |
1111 | { | |
1112 | return ((vm_offset_t) NULL); | |
1113 | } | |
1114 | ||
1115 | vm_map_address_t | |
1116 | ml_map_high_window( | |
1117 | vm_offset_t phys_addr, | |
1118 | vm_size_t len) | |
1119 | { | |
1120 | return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE); | |
1121 | } | |
1122 | ||
1123 | vm_offset_t | |
1124 | ml_static_ptovirt( | |
1125 | vm_offset_t paddr) | |
1126 | { | |
1127 | return phystokv(paddr); | |
1128 | } | |
1129 | ||
1130 | vm_offset_t | |
1131 | ml_static_vtop( | |
1132 | vm_offset_t vaddr) | |
1133 | { | |
1134 | if (((vm_address_t)(vaddr) - gVirtBase) >= gPhysSize) | |
1135 | panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr); | |
1136 | return ((vm_address_t)(vaddr) - gVirtBase + gPhysBase); | |
1137 | } | |
1138 | ||
1139 | kern_return_t | |
1140 | ml_static_protect( | |
1141 | vm_offset_t vaddr, /* kernel virtual address */ | |
1142 | vm_size_t size, | |
1143 | vm_prot_t new_prot) | |
1144 | { | |
1145 | pt_entry_t arm_prot = 0; | |
1146 | pt_entry_t arm_block_prot = 0; | |
1147 | vm_offset_t vaddr_cur; | |
1148 | ppnum_t ppn; | |
1149 | kern_return_t result = KERN_SUCCESS; | |
1150 | ||
1151 | if (vaddr < VM_MIN_KERNEL_ADDRESS) { | |
1152 | panic("ml_static_protect(): %p < %p", (void *) vaddr, (void *) VM_MIN_KERNEL_ADDRESS); | |
1153 | return KERN_FAILURE; | |
1154 | } | |
1155 | ||
1156 | assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ | |
1157 | ||
1158 | if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) { | |
1159 | panic("ml_static_protect(): WX request on %p", (void *) vaddr); | |
1160 | } | |
1161 | ||
1162 | /* Set up the protection bits, and block bits so we can validate block mappings. */ | |
1163 | if (new_prot & VM_PROT_WRITE) { | |
1164 | arm_prot |= ARM_PTE_AP(AP_RWNA); | |
1165 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA); | |
1166 | } else { | |
1167 | arm_prot |= ARM_PTE_AP(AP_RONA); | |
1168 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA); | |
1169 | } | |
1170 | ||
1171 | arm_prot |= ARM_PTE_NX; | |
1172 | arm_block_prot |= ARM_TTE_BLOCK_NX; | |
1173 | ||
1174 | if (!(new_prot & VM_PROT_EXECUTE)) { | |
1175 | arm_prot |= ARM_PTE_PNX; | |
1176 | arm_block_prot |= ARM_TTE_BLOCK_PNX; | |
1177 | } | |
1178 | ||
1179 | for (vaddr_cur = vaddr; | |
1180 | vaddr_cur < trunc_page_64(vaddr + size); | |
1181 | vaddr_cur += PAGE_SIZE) { | |
1182 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); | |
1183 | if (ppn != (vm_offset_t) NULL) { | |
1184 | #if __ARM64_TWO_LEVEL_PMAP__ | |
1185 | tt_entry_t *tte2; | |
1186 | #else | |
1187 | tt_entry_t *tte1, *tte2; | |
1188 | #endif | |
1189 | pt_entry_t *pte_p; | |
1190 | pt_entry_t ptmp; | |
1191 | ||
1192 | ||
1193 | #if __ARM64_TWO_LEVEL_PMAP__ | |
1194 | tte2 = &kernel_pmap->tte[(((vaddr_cur) & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT)]; | |
1195 | #else | |
1196 | tte1 = &kernel_pmap->tte[(((vaddr_cur) & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT)]; | |
1197 | tte2 = &((tt_entry_t*) phystokv((*tte1) & ARM_TTE_TABLE_MASK))[(((vaddr_cur) & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT)]; | |
1198 | #endif | |
1199 | ||
1200 | if (((*tte2) & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) { | |
1201 | if ((((*tte2) & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) && | |
1202 | ((*tte2 & (ARM_TTE_BLOCK_NXMASK | ARM_TTE_BLOCK_PNXMASK | ARM_TTE_BLOCK_APMASK)) == arm_block_prot)) { | |
1203 | /* | |
1204 | * We can support ml_static_protect on a block mapping if the mapping already has | |
1205 | * the desired protections. We still want to run checks on a per-page basis. | |
1206 | */ | |
1207 | continue; | |
1208 | } | |
1209 | ||
1210 | result = KERN_FAILURE; | |
1211 | break; | |
1212 | } | |
1213 | ||
1214 | pte_p = (pt_entry_t *)&((tt_entry_t*)(phystokv((*tte2) & ARM_TTE_TABLE_MASK)))[(((vaddr_cur) & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT)]; | |
1215 | ptmp = *pte_p; | |
1216 | ||
1217 | if ((ptmp & ARM_PTE_HINT_MASK) && ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot)) { | |
1218 | /* | |
1219 | * The contiguous hint is similar to a block mapping for ml_static_protect; if the existing | |
1220 | * protections do not match the desired protections, then we will fail (as we cannot update | |
1221 | * this mapping without updating other mappings as well). | |
1222 | */ | |
1223 | result = KERN_FAILURE; | |
1224 | break; | |
1225 | } | |
1226 | ||
1227 | __unreachable_ok_push | |
1228 | if (TEST_PAGE_RATIO_4) { | |
1229 | { | |
1230 | unsigned int i; | |
1231 | pt_entry_t *ptep_iter; | |
1232 | ||
1233 | ptep_iter = pte_p; | |
1234 | for (i=0; i<4; i++, ptep_iter++) { | |
1235 | /* Note that there is a hole in the HINT sanity checking here. */ | |
1236 | ptmp = *ptep_iter; | |
1237 | ||
1238 | /* We only need to update the page tables if the protections do not match. */ | |
1239 | if ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot) { | |
1240 | ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) | arm_prot; | |
1241 | *ptep_iter = ptmp; | |
1242 | } | |
1243 | } | |
1244 | } | |
1245 | #ifndef __ARM_L1_PTW__ | |
1246 | FlushPoC_DcacheRegion( trunc_page_32(pte_p), 4*sizeof(*pte_p)); | |
1247 | #endif | |
1248 | } else { | |
1249 | ptmp = *pte_p; | |
1250 | ||
1251 | /* We only need to update the page tables if the protections do not match. */ | |
1252 | if ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot) { | |
1253 | ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) | arm_prot; | |
1254 | *pte_p = ptmp; | |
1255 | } | |
1256 | ||
1257 | #ifndef __ARM_L1_PTW__ | |
1258 | FlushPoC_DcacheRegion( trunc_page_32(pte_p), sizeof(*pte_p)); | |
1259 | #endif | |
1260 | } | |
1261 | __unreachable_ok_pop | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | if (vaddr_cur > vaddr) { | |
1266 | assert(((vaddr_cur - vaddr) & 0xFFFFFFFF00000000ULL) == 0); | |
1267 | flush_mmu_tlb_region(vaddr, (uint32_t)(vaddr_cur - vaddr)); | |
1268 | } | |
1269 | ||
1270 | ||
1271 | return result; | |
1272 | } | |
1273 | ||
1274 | /* | |
1275 | * Routine: ml_static_mfree | |
1276 | * Function: | |
1277 | */ | |
1278 | void | |
1279 | ml_static_mfree( | |
1280 | vm_offset_t vaddr, | |
1281 | vm_size_t size) | |
1282 | { | |
1283 | vm_offset_t vaddr_cur; | |
1284 | ppnum_t ppn; | |
1285 | uint32_t freed_pages = 0; | |
1286 | ||
1287 | /* It is acceptable (if bad) to fail to free. */ | |
1288 | if (vaddr < VM_MIN_KERNEL_ADDRESS) | |
1289 | return; | |
1290 | ||
1291 | assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ | |
1292 | ||
1293 | for (vaddr_cur = vaddr; | |
1294 | vaddr_cur < trunc_page_64(vaddr + size); | |
1295 | vaddr_cur += PAGE_SIZE) { | |
1296 | ||
1297 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); | |
1298 | if (ppn != (vm_offset_t) NULL) { | |
1299 | /* | |
1300 | * It is not acceptable to fail to update the protections on a page | |
1301 | * we will release to the VM. We need to either panic or continue. | |
1302 | * For now, we'll panic (to help flag if there is memory we can | |
1303 | * reclaim). | |
1304 | */ | |
1305 | if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) { | |
1306 | panic("Failed ml_static_mfree on %p", (void *) vaddr_cur); | |
1307 | } | |
1308 | ||
1309 | #if 0 | |
1310 | /* | |
1311 | * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme | |
1312 | * relies on the persistence of these mappings for all time. | |
1313 | */ | |
1314 | // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE)); | |
1315 | #endif | |
1316 | ||
1317 | vm_page_create(ppn, (ppn + 1)); | |
1318 | freed_pages++; | |
1319 | } | |
1320 | } | |
1321 | vm_page_lockspin_queues(); | |
1322 | vm_page_wire_count -= freed_pages; | |
1323 | vm_page_wire_count_initial -= freed_pages; | |
1324 | vm_page_unlock_queues(); | |
1325 | #if DEBUG | |
1326 | kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); | |
1327 | #endif | |
1328 | } | |
1329 | ||
1330 | ||
1331 | /* virtual to physical on wired pages */ | |
1332 | vm_offset_t | |
1333 | ml_vtophys(vm_offset_t vaddr) | |
1334 | { | |
1335 | return kvtophys(vaddr); | |
1336 | } | |
1337 | ||
1338 | /* | |
1339 | * Routine: ml_nofault_copy | |
1340 | * Function: Perform a physical mode copy if the source and destination have | |
1341 | * valid translations in the kernel pmap. If translations are present, they are | |
1342 | * assumed to be wired; e.g., no attempt is made to guarantee that the | |
1343 | * translations obtained remain valid for the duration of the copy process. | |
1344 | */ | |
1345 | vm_size_t | |
1346 | ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) | |
1347 | { | |
1348 | addr64_t cur_phys_dst, cur_phys_src; | |
1349 | vm_size_t count, nbytes = 0; | |
1350 | ||
1351 | while (size > 0) { | |
1352 | if (!(cur_phys_src = kvtophys(virtsrc))) | |
1353 | break; | |
1354 | if (!(cur_phys_dst = kvtophys(virtdst))) | |
1355 | break; | |
1356 | if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) || | |
1357 | !pmap_valid_address(trunc_page_64(cur_phys_src))) | |
1358 | break; | |
1359 | count = PAGE_SIZE - (cur_phys_src & PAGE_MASK); | |
1360 | if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) | |
1361 | count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); | |
1362 | if (count > size) | |
1363 | count = size; | |
1364 | ||
1365 | bcopy_phys(cur_phys_src, cur_phys_dst, count); | |
1366 | ||
1367 | nbytes += count; | |
1368 | virtsrc += count; | |
1369 | virtdst += count; | |
1370 | size -= count; | |
1371 | } | |
1372 | ||
1373 | return nbytes; | |
1374 | } | |
1375 | ||
1376 | /* | |
1377 | * Routine: ml_validate_nofault | |
1378 | * Function: Validate that ths address range has a valid translations | |
1379 | * in the kernel pmap. If translations are present, they are | |
1380 | * assumed to be wired; i.e. no attempt is made to guarantee | |
1381 | * that the translation persist after the check. | |
1382 | * Returns: TRUE if the range is mapped and will not cause a fault, | |
1383 | * FALSE otherwise. | |
1384 | */ | |
1385 | ||
1386 | boolean_t ml_validate_nofault( | |
1387 | vm_offset_t virtsrc, vm_size_t size) | |
1388 | { | |
1389 | addr64_t cur_phys_src; | |
1390 | uint32_t count; | |
1391 | ||
1392 | while (size > 0) { | |
1393 | if (!(cur_phys_src = kvtophys(virtsrc))) | |
1394 | return FALSE; | |
1395 | if (!pmap_valid_address(trunc_page_64(cur_phys_src))) | |
1396 | return FALSE; | |
1397 | count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); | |
1398 | if (count > size) | |
1399 | count = (uint32_t)size; | |
1400 | ||
1401 | virtsrc += count; | |
1402 | size -= count; | |
1403 | } | |
1404 | ||
1405 | return TRUE; | |
1406 | } | |
1407 | ||
1408 | void | |
1409 | ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size) | |
1410 | { | |
1411 | *phys_addr = 0; | |
1412 | *size = 0; | |
1413 | } | |
1414 | ||
1415 | void | |
1416 | active_rt_threads(__unused boolean_t active) | |
1417 | { | |
1418 | } | |
1419 | ||
1420 | static void cpu_qos_cb_default(__unused int urgency, __unused uint64_t qos_param1, __unused uint64_t qos_param2) { | |
1421 | return; | |
1422 | } | |
1423 | ||
1424 | cpu_qos_update_t cpu_qos_update = cpu_qos_cb_default; | |
1425 | ||
1426 | void cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb) { | |
1427 | if (cpu_qos_cb != NULL) { | |
1428 | cpu_qos_update = cpu_qos_cb; | |
1429 | } else { | |
1430 | cpu_qos_update = cpu_qos_cb_default; | |
1431 | } | |
1432 | } | |
1433 | ||
1434 | void | |
1435 | thread_tell_urgency(int urgency, uint64_t rt_period, uint64_t rt_deadline, uint64_t sched_latency __unused, __unused thread_t nthread) | |
1436 | { | |
1437 | SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, sched_latency, 0); | |
1438 | ||
1439 | cpu_qos_update(urgency, rt_period, rt_deadline); | |
1440 | ||
1441 | SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0); | |
1442 | } | |
1443 | ||
1444 | void | |
1445 | machine_run_count(__unused uint32_t count) | |
1446 | { | |
1447 | } | |
1448 | ||
1449 | processor_t | |
1450 | machine_choose_processor(__unused processor_set_t pset, processor_t processor) | |
1451 | { | |
1452 | return (processor); | |
1453 | } | |
1454 | ||
1455 | vm_offset_t | |
1456 | ml_stack_remaining(void) | |
1457 | { | |
1458 | uintptr_t local = (uintptr_t) &local; | |
1459 | ||
1460 | if (ml_at_interrupt_context()) { | |
1461 | return (local - (getCpuDatap()->intstack_top - INTSTACK_SIZE)); | |
1462 | } else { | |
1463 | return (local - current_thread()->kernel_stack); | |
1464 | } | |
1465 | } | |
1466 | ||
1467 | #if KASAN | |
1468 | vm_offset_t ml_stack_base(void); | |
1469 | vm_size_t ml_stack_size(void); | |
1470 | ||
1471 | vm_offset_t | |
1472 | ml_stack_base(void) | |
1473 | { | |
1474 | if (ml_at_interrupt_context()) { | |
1475 | return getCpuDatap()->intstack_top - INTSTACK_SIZE; | |
1476 | } else { | |
1477 | return current_thread()->kernel_stack; | |
1478 | } | |
1479 | } | |
1480 | vm_size_t | |
1481 | ml_stack_size(void) | |
1482 | { | |
1483 | if (ml_at_interrupt_context()) { | |
1484 | return INTSTACK_SIZE; | |
1485 | } else { | |
1486 | return kernel_stack_size; | |
1487 | } | |
1488 | } | |
1489 | #endif | |
1490 | ||
1491 | boolean_t machine_timeout_suspended(void) { | |
1492 | return FALSE; | |
1493 | } | |
1494 | ||
1495 | kern_return_t | |
1496 | ml_interrupt_prewarm(__unused uint64_t deadline) | |
1497 | { | |
1498 | return KERN_FAILURE; | |
1499 | } | |
1500 | ||
1501 | /* | |
1502 | * Assumes fiq, irq disabled. | |
1503 | */ | |
1504 | void | |
1505 | ml_set_decrementer(uint32_t dec_value) | |
1506 | { | |
1507 | cpu_data_t *cdp = getCpuDatap(); | |
1508 | ||
1509 | assert(ml_get_interrupts_enabled() == FALSE); | |
1510 | cdp->cpu_decrementer = dec_value; | |
1511 | ||
1512 | if (cdp->cpu_set_decrementer_func) { | |
1513 | ((void (*)(uint32_t))cdp->cpu_set_decrementer_func)(dec_value); | |
1514 | } else { | |
1515 | __asm__ volatile("msr CNTP_TVAL_EL0, %0" : : "r"((uint64_t)dec_value)); | |
1516 | } | |
1517 | } | |
1518 | ||
1519 | uint64_t ml_get_hwclock() | |
1520 | { | |
1521 | uint64_t timebase; | |
1522 | ||
1523 | // ISB required by ARMV7C.b section B8.1.2 & ARMv8 section D6.1.2 | |
1524 | // "Reads of CNTPCT[_EL0] can occur speculatively and out of order relative | |
1525 | // to other instructions executed on the same processor." | |
1526 | __asm__ volatile("isb\n" | |
1527 | "mrs %0, CNTPCT_EL0" | |
1528 | : "=r"(timebase)); | |
1529 | ||
1530 | return timebase; | |
1531 | } | |
1532 | ||
1533 | uint64_t | |
1534 | ml_get_timebase() | |
1535 | { | |
1536 | return (ml_get_hwclock() + getCpuDatap()->cpu_base_timebase); | |
1537 | } | |
1538 | ||
1539 | uint32_t | |
1540 | ml_get_decrementer() | |
1541 | { | |
1542 | cpu_data_t *cdp = getCpuDatap(); | |
1543 | uint32_t dec; | |
1544 | ||
1545 | assert(ml_get_interrupts_enabled() == FALSE); | |
1546 | ||
1547 | if (cdp->cpu_get_decrementer_func) { | |
1548 | dec = ((uint32_t (*)(void))cdp->cpu_get_decrementer_func)(); | |
1549 | } else { | |
1550 | uint64_t wide_val; | |
1551 | ||
1552 | __asm__ volatile("mrs %0, CNTP_TVAL_EL0" : "=r"(wide_val)); | |
1553 | dec = (uint32_t)wide_val; | |
1554 | assert(wide_val == (uint64_t)dec); | |
1555 | } | |
1556 | ||
1557 | return dec; | |
1558 | } | |
1559 | ||
1560 | boolean_t | |
1561 | ml_get_timer_pending() | |
1562 | { | |
1563 | uint64_t cntp_ctl; | |
1564 | ||
1565 | __asm__ volatile("mrs %0, CNTP_CTL_EL0" : "=r"(cntp_ctl)); | |
1566 | return ((cntp_ctl & CNTP_CTL_EL0_ISTATUS) != 0) ? TRUE : FALSE; | |
1567 | } | |
1568 | ||
1569 | boolean_t | |
1570 | ml_wants_panic_trap_to_debugger(void) | |
1571 | { | |
1572 | boolean_t result = FALSE; | |
1573 | return result; | |
1574 | } | |
1575 | ||
1576 | static void | |
1577 | cache_trap_error(thread_t thread, vm_map_address_t fault_addr) | |
1578 | { | |
1579 | mach_exception_data_type_t exc_data[2]; | |
1580 | arm_saved_state_t *regs = get_user_regs(thread); | |
1581 | ||
1582 | set_saved_state_far(regs, fault_addr); | |
1583 | ||
1584 | exc_data[0] = KERN_INVALID_ADDRESS; | |
1585 | exc_data[1] = fault_addr; | |
1586 | ||
1587 | exception_triage(EXC_BAD_ACCESS, exc_data, 2); | |
1588 | } | |
1589 | ||
1590 | static void | |
1591 | cache_trap_recover() | |
1592 | { | |
1593 | vm_map_address_t fault_addr; | |
1594 | ||
1595 | __asm__ volatile("mrs %0, FAR_EL1" : "=r"(fault_addr)); | |
1596 | ||
1597 | cache_trap_error(current_thread(), fault_addr); | |
1598 | } | |
1599 | ||
1600 | static void | |
1601 | dcache_flush_trap(vm_map_address_t start, vm_map_size_t size) | |
1602 | { | |
1603 | vm_map_address_t end = start + size; | |
1604 | thread_t thread = current_thread(); | |
1605 | vm_offset_t old_recover = thread->recover; | |
1606 | ||
1607 | /* Check bounds */ | |
1608 | if (task_has_64BitAddr(current_task())) { | |
1609 | if (end > MACH_VM_MAX_ADDRESS) { | |
1610 | cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1)); | |
1611 | } | |
1612 | } else { | |
1613 | if (end > VM_MAX_ADDRESS) { | |
1614 | cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1)); | |
1615 | } | |
1616 | } | |
1617 | ||
1618 | if (start > end) { | |
1619 | cache_trap_error(thread, start & ((1 << ARM64_CLINE_SHIFT) - 1)); | |
1620 | } | |
1621 | ||
1622 | /* Set recovery function */ | |
1623 | thread->recover = (vm_address_t)cache_trap_recover; | |
1624 | ||
1625 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
1626 | /* | |
1627 | * We're coherent on Apple ARM64 CPUs, so this could be a nop. However, | |
1628 | * if the region given us is bad, it would be good to catch it and | |
1629 | * crash, ergo we still do the flush. | |
1630 | */ | |
1631 | assert((size & 0xFFFFFFFF00000000ULL) == 0); | |
1632 | FlushPoC_DcacheRegion(start, (uint32_t)size); | |
1633 | #else | |
1634 | #error "Make sure you don't need to xcall." | |
1635 | #endif | |
1636 | ||
1637 | /* Restore recovery function */ | |
1638 | thread->recover = old_recover; | |
1639 | ||
1640 | /* Return (caller does exception return) */ | |
1641 | } | |
1642 | ||
1643 | static void | |
1644 | icache_invalidate_trap(vm_map_address_t start, vm_map_size_t size) | |
1645 | { | |
1646 | vm_map_address_t end = start + size; | |
1647 | thread_t thread = current_thread(); | |
1648 | vm_offset_t old_recover = thread->recover; | |
1649 | ||
1650 | /* Check bounds */ | |
1651 | if (task_has_64BitAddr(current_task())) { | |
1652 | if (end > MACH_VM_MAX_ADDRESS) { | |
1653 | cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1)); | |
1654 | } | |
1655 | } else { | |
1656 | if (end > VM_MAX_ADDRESS) { | |
1657 | cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1)); | |
1658 | } | |
1659 | } | |
1660 | ||
1661 | if (start > end) { | |
1662 | cache_trap_error(thread, start & ((1 << ARM64_CLINE_SHIFT) - 1)); | |
1663 | } | |
1664 | ||
1665 | /* Set recovery function */ | |
1666 | thread->recover = (vm_address_t)cache_trap_recover; | |
1667 | ||
1668 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
1669 | /* Clean dcache to unification, except we're coherent on Apple ARM64 CPUs */ | |
1670 | #else | |
1671 | #error Make sure not cleaning is right for this platform! | |
1672 | #endif | |
1673 | ||
1674 | /* Invalidate iCache to point of unification */ | |
1675 | assert((size & 0xFFFFFFFF00000000ULL) == 0); | |
1676 | InvalidatePoU_IcacheRegion(start, (uint32_t)size); | |
1677 | ||
1678 | /* Restore recovery function */ | |
1679 | thread->recover = old_recover; | |
1680 | ||
1681 | /* Return (caller does exception return) */ | |
1682 | } | |
1683 | ||
1684 | __attribute__((noreturn)) | |
1685 | void | |
1686 | platform_syscall(arm_saved_state_t *state) | |
1687 | { | |
1688 | uint32_t code; | |
1689 | ||
1690 | #define platform_syscall_kprintf(x...) /* kprintf("platform_syscall: " x) */ | |
1691 | ||
1692 | code = (uint32_t)get_saved_state_reg(state, 3); | |
1693 | switch (code) { | |
1694 | case 0: | |
1695 | /* I-Cache flush */ | |
1696 | platform_syscall_kprintf("icache flush requested.\n"); | |
1697 | icache_invalidate_trap(get_saved_state_reg(state, 0), get_saved_state_reg(state, 1)); | |
1698 | break; | |
1699 | case 1: | |
1700 | /* D-Cache flush */ | |
1701 | platform_syscall_kprintf("dcache flush requested.\n"); | |
1702 | dcache_flush_trap(get_saved_state_reg(state, 0), get_saved_state_reg(state, 1)); | |
1703 | break; | |
1704 | case 2: | |
1705 | /* set cthread */ | |
1706 | platform_syscall_kprintf("set cthread self.\n"); | |
1707 | thread_set_cthread_self(get_saved_state_reg(state, 0)); | |
1708 | break; | |
1709 | case 3: | |
1710 | /* get cthread */ | |
1711 | platform_syscall_kprintf("get cthread self.\n"); | |
1712 | set_saved_state_reg(state, 0, thread_get_cthread_self()); | |
1713 | break; | |
1714 | default: | |
1715 | platform_syscall_kprintf("unknown: %d\n", code); | |
1716 | break; | |
1717 | } | |
1718 | ||
1719 | thread_exception_return(); | |
1720 | } | |
1721 | ||
1722 | static void | |
1723 | _enable_timebase_event_stream(uint32_t bit_index) | |
1724 | { | |
1725 | uint64_t cntkctl; /* One wants to use 32 bits, but "mrs" prefers it this way */ | |
1726 | ||
1727 | if (bit_index >= 64) { | |
1728 | panic("%s: invalid bit index (%u)", __FUNCTION__, bit_index); | |
1729 | } | |
1730 | ||
1731 | __asm__ volatile ("mrs %0, CNTKCTL_EL1" : "=r"(cntkctl)); | |
1732 | ||
1733 | cntkctl |= (bit_index << CNTKCTL_EL1_EVENTI_SHIFT); | |
1734 | cntkctl |= CNTKCTL_EL1_EVNTEN; | |
1735 | cntkctl |= CNTKCTL_EL1_EVENTDIR; /* 1->0; why not? */ | |
1736 | ||
1737 | /* | |
1738 | * If the SOC supports it (and it isn't broken), enable | |
1739 | * EL0 access to the physical timebase register. | |
1740 | */ | |
1741 | if (user_timebase_allowed()) { | |
1742 | cntkctl |= CNTKCTL_EL1_PL0PCTEN; | |
1743 | } | |
1744 | ||
1745 | __asm__ volatile ("msr CNTKCTL_EL1, %0" : : "r"(cntkctl)); | |
1746 | } | |
1747 | ||
1748 | /* | |
1749 | * Turn timer on, unmask that interrupt. | |
1750 | */ | |
1751 | static void | |
1752 | _enable_virtual_timer(void) | |
1753 | { | |
1754 | uint64_t cntvctl = CNTP_CTL_EL0_ENABLE; /* One wants to use 32 bits, but "mrs" prefers it this way */ | |
1755 | ||
1756 | __asm__ volatile ("msr CNTP_CTL_EL0, %0" : : "r"(cntvctl)); | |
1757 | } | |
1758 | ||
1759 | void | |
1760 | fiq_context_init(boolean_t enable_fiq __unused) | |
1761 | { | |
1762 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
1763 | /* Could fill in our own ops here, if we needed them */ | |
1764 | uint64_t ticks_per_sec, ticks_per_event, events_per_sec; | |
1765 | uint32_t bit_index; | |
1766 | ||
1767 | ticks_per_sec = gPEClockFrequencyInfo.timebase_frequency_hz; | |
1768 | #if defined(ARM_BOARD_WFE_TIMEOUT_NS) | |
1769 | events_per_sec = 1000000000 / ARM_BOARD_WFE_TIMEOUT_NS; | |
1770 | #else | |
1771 | /* Default to 1usec (or as close as we can get) */ | |
1772 | events_per_sec = 1000000; | |
1773 | #endif | |
1774 | ticks_per_event = ticks_per_sec / events_per_sec; | |
1775 | bit_index = flsll(ticks_per_event) - 1; /* Highest bit set */ | |
1776 | ||
1777 | /* Round up to power of two */ | |
1778 | if ((ticks_per_event & ((1 << bit_index) - 1)) != 0) { | |
1779 | bit_index++; | |
1780 | } | |
1781 | ||
1782 | /* | |
1783 | * The timer can only trigger on rising or falling edge, | |
1784 | * not both; we don't care which we trigger on, but we | |
1785 | * do need to adjust which bit we are interested in to | |
1786 | * account for this. | |
1787 | */ | |
1788 | if (bit_index != 0) | |
1789 | bit_index--; | |
1790 | ||
1791 | _enable_timebase_event_stream(bit_index); | |
1792 | #else | |
1793 | #error Need a board configuration. | |
1794 | #endif | |
1795 | ||
1796 | /* Interrupts still disabled. */ | |
1797 | assert(ml_get_interrupts_enabled() == FALSE); | |
1798 | _enable_virtual_timer(); | |
1799 | } | |
1800 | ||
1801 | /* | |
1802 | * ARM64_TODO: remove me (just a convenience while we don't have crashreporter) | |
1803 | */ | |
1804 | extern int copyinframe(vm_address_t, char *, boolean_t); | |
1805 | size_t _OSUserBacktrace(char *buffer, size_t bufsize); | |
1806 | ||
1807 | size_t _OSUserBacktrace(char *buffer, size_t bufsize) | |
1808 | { | |
1809 | thread_t thread = current_thread(); | |
1810 | boolean_t is64bit = thread_is_64bit(thread); | |
1811 | size_t trace_size_bytes = 0, lr_size; | |
1812 | vm_address_t frame_addr; // Should really by mach_vm_offset_t... | |
1813 | ||
1814 | if (bufsize < 8) { | |
1815 | return 0; | |
1816 | } | |
1817 | ||
1818 | if (get_threadtask(thread) == kernel_task) { | |
1819 | panic("%s: Should never be called from a kernel thread.", __FUNCTION__); | |
1820 | } | |
1821 | ||
1822 | frame_addr = get_saved_state_fp(thread->machine.upcb); | |
1823 | if (is64bit) { | |
1824 | uint64_t frame[2]; | |
1825 | lr_size = sizeof(frame[1]); | |
1826 | ||
1827 | *((uint64_t*)buffer) = get_saved_state_pc(thread->machine.upcb); | |
1828 | trace_size_bytes = lr_size; | |
1829 | ||
1830 | while (trace_size_bytes + lr_size < bufsize) { | |
1831 | if (!(frame_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { | |
1832 | break; | |
1833 | } | |
1834 | ||
1835 | if (0 != copyinframe(frame_addr, (char*)frame, TRUE)) { | |
1836 | break; | |
1837 | } | |
1838 | ||
1839 | *((uint64_t*)(buffer + trace_size_bytes)) = frame[1]; /* lr */ | |
1840 | frame_addr = frame[0]; | |
1841 | trace_size_bytes += lr_size; | |
1842 | ||
1843 | if (frame[0] == 0x0ULL) { | |
1844 | break; | |
1845 | } | |
1846 | } | |
1847 | } else { | |
1848 | uint32_t frame[2]; | |
1849 | lr_size = sizeof(frame[1]); | |
1850 | ||
1851 | *((uint32_t*)buffer) = (uint32_t)get_saved_state_pc(thread->machine.upcb); | |
1852 | trace_size_bytes = lr_size; | |
1853 | ||
1854 | while (trace_size_bytes + lr_size < bufsize) { | |
1855 | if (!(frame_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { | |
1856 | break; | |
1857 | } | |
1858 | ||
1859 | if (0 != copyinframe(frame_addr, (char*)frame, FALSE)) { | |
1860 | break; | |
1861 | } | |
1862 | ||
1863 | *((uint32_t*)(buffer + trace_size_bytes)) = frame[1]; /* lr */ | |
1864 | frame_addr = frame[0]; | |
1865 | trace_size_bytes += lr_size; | |
1866 | ||
1867 | if (frame[0] == 0x0ULL) { | |
1868 | break; | |
1869 | } | |
1870 | } | |
1871 | } | |
1872 | ||
1873 | return trace_size_bytes; | |
1874 | } | |
1875 | ||
1876 | boolean_t | |
1877 | ml_delay_should_spin(uint64_t interval) | |
1878 | { | |
1879 | cpu_data_t *cdp = getCpuDatap(); | |
1880 | ||
1881 | if (cdp->cpu_idle_latency) { | |
1882 | return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE; | |
1883 | } else { | |
1884 | /* | |
1885 | * Early boot, latency is unknown. Err on the side of blocking, | |
1886 | * which should always be safe, even if slow | |
1887 | */ | |
1888 | return FALSE; | |
1889 | } | |
1890 | } | |
1891 | ||
1892 | boolean_t ml_thread_is64bit(thread_t thread) { | |
1893 | return (thread_is_64bit(thread)); | |
1894 | } | |
1895 | ||
1896 | void ml_timer_evaluate(void) { | |
1897 | } | |
1898 | ||
1899 | boolean_t | |
1900 | ml_timer_forced_evaluation(void) { | |
1901 | return FALSE; | |
1902 | } | |
1903 | ||
1904 | uint64_t | |
1905 | ml_energy_stat(thread_t t) { | |
1906 | return t->machine.energy_estimate_nj; | |
1907 | } | |
1908 | ||
1909 | ||
1910 | void | |
1911 | ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { | |
1912 | #if CONFIG_EMBEDDED | |
1913 | /* | |
1914 | * For now: update the resource coalition stats of the | |
1915 | * current thread's coalition | |
1916 | */ | |
1917 | task_coalition_update_gpu_stats(current_task(), gpu_ns_delta); | |
1918 | #endif | |
1919 | } | |
1920 | ||
1921 | uint64_t | |
1922 | ml_gpu_stat(__unused thread_t t) { | |
1923 | return 0; | |
1924 | } | |
1925 | ||
1926 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
1927 | static void | |
1928 | timer_state_event(boolean_t switch_to_kernel) | |
1929 | { | |
1930 | thread_t thread = current_thread(); | |
1931 | if (!thread->precise_user_kernel_time) return; | |
1932 | ||
1933 | processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data; | |
1934 | uint64_t now = ml_get_timebase(); | |
1935 | ||
1936 | timer_stop(pd->current_state, now); | |
1937 | pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state; | |
1938 | timer_start(pd->current_state, now); | |
1939 | ||
1940 | timer_stop(pd->thread_timer, now); | |
1941 | pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer; | |
1942 | timer_start(pd->thread_timer, now); | |
1943 | } | |
1944 | ||
1945 | void | |
1946 | timer_state_event_user_to_kernel(void) | |
1947 | { | |
1948 | timer_state_event(TRUE); | |
1949 | } | |
1950 | ||
1951 | void | |
1952 | timer_state_event_kernel_to_user(void) | |
1953 | { | |
1954 | timer_state_event(FALSE); | |
1955 | } | |
1956 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ | |
1957 | ||
1958 | /* | |
1959 | * The following are required for parts of the kernel | |
1960 | * that cannot resolve these functions as inlines: | |
1961 | */ | |
1962 | extern thread_t current_act(void); | |
1963 | thread_t | |
1964 | current_act(void) | |
1965 | { | |
1966 | return current_thread_fast(); | |
1967 | } | |
1968 | ||
1969 | #undef current_thread | |
1970 | extern thread_t current_thread(void); | |
1971 | thread_t | |
1972 | current_thread(void) | |
1973 | { | |
1974 | return current_thread_fast(); | |
1975 | } | |
1976 | ||
1977 | typedef struct | |
1978 | { | |
1979 | ex_cb_t cb; | |
1980 | void *refcon; | |
1981 | } | |
1982 | ex_cb_info_t; | |
1983 | ||
1984 | ex_cb_info_t ex_cb_info[EXCB_CLASS_MAX]; | |
1985 | ||
1986 | /* | |
1987 | * Callback registration | |
1988 | * Currently we support only one registered callback per class but | |
1989 | * it should be possible to support more callbacks | |
1990 | */ | |
1991 | kern_return_t ex_cb_register( | |
1992 | ex_cb_class_t cb_class, | |
1993 | ex_cb_t cb, | |
1994 | void *refcon) | |
1995 | { | |
1996 | ex_cb_info_t *pInfo = &ex_cb_info[cb_class]; | |
1997 | ||
1998 | if ((NULL == cb) || (cb_class >= EXCB_CLASS_MAX)) | |
1999 | { | |
2000 | return KERN_INVALID_VALUE; | |
2001 | } | |
2002 | ||
2003 | if (NULL == pInfo->cb) | |
2004 | { | |
2005 | pInfo->cb = cb; | |
2006 | pInfo->refcon = refcon; | |
2007 | return KERN_SUCCESS; | |
2008 | } | |
2009 | return KERN_FAILURE; | |
2010 | } | |
2011 | ||
2012 | /* | |
2013 | * Called internally by platform kernel to invoke the registered callback for class | |
2014 | */ | |
2015 | ex_cb_action_t ex_cb_invoke( | |
2016 | ex_cb_class_t cb_class, | |
2017 | vm_offset_t far) | |
2018 | { | |
2019 | ex_cb_info_t *pInfo = &ex_cb_info[cb_class]; | |
2020 | ex_cb_state_t state = {far}; | |
2021 | ||
2022 | if (cb_class >= EXCB_CLASS_MAX) | |
2023 | { | |
2024 | panic("Invalid exception callback class 0x%x\n", cb_class); | |
2025 | } | |
2026 | ||
2027 | if (pInfo->cb) | |
2028 | { | |
2029 | return pInfo->cb(cb_class, pInfo->refcon, &state); | |
2030 | } | |
2031 | return EXCB_ACTION_NONE; | |
2032 | } | |
2033 |