]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/machine_routines.c
13aca14c10786b4251c8dde006b52e8b99a91e78
[apple/xnu.git] / osfmk / arm64 / machine_routines.c
1 /*
2 * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm64/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/caches_internal.h>
37 #include <arm/misc_protos.h>
38 #include <arm/machdep_call.h>
39 #include <arm/machine_routines.h>
40 #include <arm/rtclock.h>
41 #include <arm/cpuid_internal.h>
42 #include <arm/cpu_capabilities.h>
43 #include <console/serial_protos.h>
44 #include <kern/machine.h>
45 #include <prng/random.h>
46 #include <kern/startup.h>
47 #include <kern/thread.h>
48 #include <kern/timer_queue.h>
49 #include <mach/machine.h>
50 #include <machine/atomic.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_page.h>
53 #include <sys/kdebug.h>
54 #include <kern/coalition.h>
55 #include <pexpert/device_tree.h>
56
57 #include <IOKit/IOPlatformExpert.h>
58
59 #if defined(KERNEL_INTEGRITY_KTRR)
60 #include <libkern/kernel_mach_header.h>
61 #endif
62
63 #include <libkern/section_keywords.h>
64
65 #if KPC
66 #include <kern/kpc.h>
67 #endif
68
69
70
71 static int max_cpus_initialized = 0;
72 #define MAX_CPUS_SET 0x1
73 #define MAX_CPUS_WAIT 0x2
74
75 uint32_t LockTimeOut;
76 uint32_t LockTimeOutUsec;
77 uint64_t TLockTimeOut;
78 uint64_t MutexSpin;
79 boolean_t is_clock_configured = FALSE;
80
81 uint32_t yield_delay_us = 0; /* Must be less than cpu_idle_latency to ensure ml_delay_should_spin is true */
82
83 #if CONFIG_NONFATAL_ASSERTS
84 extern int mach_assert;
85 #endif
86 extern volatile uint32_t debug_enabled;
87
88 extern vm_offset_t segLOWEST;
89 extern vm_offset_t segLOWESTTEXT;
90 extern vm_offset_t segLASTB;
91 extern unsigned long segSizeLAST;
92
93
94 void machine_conf(void);
95
96 thread_t Idle_context(void);
97
98 SECURITY_READ_ONLY_LATE(static uint32_t) cpu_phys_ids[MAX_CPUS] = {[0 ... MAX_CPUS - 1] = (uint32_t)-1};
99 SECURITY_READ_ONLY_LATE(static unsigned int) avail_cpus = 0;
100 SECURITY_READ_ONLY_LATE(static int) boot_cpu = -1;
101 SECURITY_READ_ONLY_LATE(static int) max_cpu_number = 0;
102 SECURITY_READ_ONLY_LATE(cluster_type_t) boot_cluster = CLUSTER_TYPE_SMP;
103
104 SECURITY_READ_ONLY_LATE(static uint32_t) fiq_eventi = UINT32_MAX;
105
106 lockdown_handler_t lockdown_handler;
107 void *lockdown_this;
108 lck_mtx_t lockdown_handler_lck;
109 lck_grp_t *lockdown_handler_grp;
110 int lockdown_done;
111
112 void ml_lockdown_init(void);
113 void ml_lockdown_run_handler(void);
114 uint32_t get_arm_cpu_version(void);
115
116
117 __dead2
118 void
119 ml_cpu_signal(unsigned int cpu_mpidr __unused)
120 {
121 panic("Platform does not support ACC Fast IPI");
122 }
123
124 __dead2
125 void
126 ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs)
127 {
128 (void)nanosecs;
129 panic("Platform does not support ACC Fast IPI");
130 }
131
132 uint64_t
133 ml_cpu_signal_deferred_get_timer()
134 {
135 return 0;
136 }
137
138 __dead2
139 void
140 ml_cpu_signal_deferred(unsigned int cpu_mpidr __unused)
141 {
142 panic("Platform does not support ACC Fast IPI deferral");
143 }
144
145 __dead2
146 void
147 ml_cpu_signal_retract(unsigned int cpu_mpidr __unused)
148 {
149 panic("Platform does not support ACC Fast IPI retraction");
150 }
151
152 void
153 machine_idle(void)
154 {
155 __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
156 Idle_context();
157 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
158 }
159
160 void
161 init_vfp(void)
162 {
163 return;
164 }
165
166 boolean_t
167 get_vfp_enabled(void)
168 {
169 return TRUE;
170 }
171
172 void
173 OSSynchronizeIO(void)
174 {
175 __builtin_arm_dsb(DSB_SY);
176 }
177
178 uint64_t
179 get_aux_control(void)
180 {
181 uint64_t value;
182
183 MRS(value, "ACTLR_EL1");
184 return value;
185 }
186
187 uint64_t
188 get_mmu_control(void)
189 {
190 uint64_t value;
191
192 MRS(value, "SCTLR_EL1");
193 return value;
194 }
195
196 uint64_t
197 get_tcr(void)
198 {
199 uint64_t value;
200
201 MRS(value, "TCR_EL1");
202 return value;
203 }
204
205 boolean_t
206 ml_get_interrupts_enabled(void)
207 {
208 uint64_t value;
209
210 MRS(value, "DAIF");
211 if (value & DAIF_IRQF) {
212 return FALSE;
213 }
214 return TRUE;
215 }
216
217 pmap_paddr_t
218 get_mmu_ttb(void)
219 {
220 pmap_paddr_t value;
221
222 MRS(value, "TTBR0_EL1");
223 return value;
224 }
225
226 uint32_t
227 get_arm_cpu_version(void)
228 {
229 uint32_t value = machine_read_midr();
230
231 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
232 return ((value & MIDR_EL1_REV_MASK) >> MIDR_EL1_REV_SHIFT) | ((value & MIDR_EL1_VAR_MASK) >> (MIDR_EL1_VAR_SHIFT - 4));
233 }
234
235 /*
236 * user_cont_hwclock_allowed()
237 *
238 * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0)
239 * as a continuous time source (e.g. from mach_continuous_time)
240 */
241 boolean_t
242 user_cont_hwclock_allowed(void)
243 {
244 return FALSE;
245 }
246
247
248 uint8_t
249 user_timebase_type(void)
250 {
251 return USER_TIMEBASE_SPEC;
252 }
253
254 boolean_t
255 arm64_wfe_allowed(void)
256 {
257 return TRUE;
258 }
259
260 #if defined(KERNEL_INTEGRITY_KTRR)
261
262 uint64_t rorgn_begin __attribute__((section("__DATA, __const"))) = 0;
263 uint64_t rorgn_end __attribute__((section("__DATA, __const"))) = 0;
264 vm_offset_t amcc_base;
265
266 static void assert_unlocked(void);
267 static void assert_amcc_cache_disabled(void);
268 static void lock_amcc(void);
269 static void lock_mmu(uint64_t begin, uint64_t end);
270
271 void
272 rorgn_stash_range(void)
273 {
274 #if DEVELOPMENT || DEBUG
275 boolean_t rorgn_disable = FALSE;
276
277 PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable, sizeof(rorgn_disable));
278
279 if (rorgn_disable) {
280 /* take early out if boot arg present, don't query any machine registers to avoid
281 * dependency on amcc DT entry
282 */
283 return;
284 }
285 #endif
286
287 /* Get the AMC values, and stash them into rorgn_begin, rorgn_end.
288 * gPhysBase is the base of DRAM managed by xnu. we need DRAM_BASE as
289 * the AMCC RO region begin/end registers are in units of 16KB page
290 * numbers from DRAM_BASE so we'll truncate gPhysBase at 512MB granule
291 * and assert the value is the canonical DRAM_BASE PA of 0x8_0000_0000 for arm64.
292 */
293
294 uint64_t dram_base = gPhysBase & ~0x1FFFFFFFULL; /* 512MB */
295 assert(dram_base == 0x800000000ULL);
296
297 #if defined(KERNEL_INTEGRITY_KTRR)
298 uint64_t soc_base = 0;
299 DTEntry entryP = NULL;
300 uintptr_t *reg_prop = NULL;
301 uint32_t prop_size = 0;
302 int rc;
303
304 soc_base = pe_arm_get_soc_base_phys();
305 rc = DTFindEntry("name", "mcc", &entryP);
306 assert(rc == kSuccess);
307 rc = DTGetProperty(entryP, "reg", (void **)&reg_prop, &prop_size);
308 assert(rc == kSuccess);
309 amcc_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1));
310 #else
311 #error "KERNEL_INTEGRITY config error"
312 #endif
313
314 #if defined(KERNEL_INTEGRITY_KTRR)
315 assert(rRORGNENDADDR > rRORGNBASEADDR);
316 rorgn_begin = (rRORGNBASEADDR << AMCC_PGSHIFT) + dram_base;
317 rorgn_end = (rRORGNENDADDR << AMCC_PGSHIFT) + dram_base;
318 #else
319 #error KERNEL_INTEGRITY config error
320 #endif /* defined (KERNEL_INTEGRITY_KTRR) */
321 }
322
323 static void
324 assert_unlocked()
325 {
326 uint64_t ktrr_lock = 0;
327 uint32_t rorgn_lock = 0;
328
329 assert(amcc_base);
330 #if defined(KERNEL_INTEGRITY_KTRR)
331 rorgn_lock = rRORGNLOCK;
332 ktrr_lock = __builtin_arm_rsr64(ARM64_REG_KTRR_LOCK_EL1);
333 #else
334 #error KERNEL_INTEGRITY config error
335 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
336
337 assert(!ktrr_lock);
338 assert(!rorgn_lock);
339 }
340
341 static void
342 lock_amcc()
343 {
344 #if defined(KERNEL_INTEGRITY_KTRR)
345 rRORGNLOCK = 1;
346 __builtin_arm_isb(ISB_SY);
347 #else
348 #error KERNEL_INTEGRITY config error
349 #endif
350 }
351
352 static void
353 lock_mmu(uint64_t begin, uint64_t end)
354 {
355 #if defined(KERNEL_INTEGRITY_KTRR)
356
357 __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin);
358 __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end);
359 __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL);
360
361 /* flush TLB */
362
363 __builtin_arm_isb(ISB_SY);
364 flush_mmu_tlb();
365
366 #else /* defined(KERNEL_INTEGRITY_KTRR) */
367 #error KERNEL_INTEGRITY config error
368 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
369 }
370
371 static void
372 assert_amcc_cache_disabled()
373 {
374 #if defined(KERNEL_INTEGRITY_KTRR)
375 assert((rMCCGEN & 1) == 0); /* assert M$ disabled or LLC clean will be unreliable */
376 #else
377 #error KERNEL_INTEGRITY config error
378 #endif
379 }
380
381 /*
382 * void rorgn_lockdown(void)
383 *
384 * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked
385 *
386 * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in
387 * start.s:start_cpu() for subsequent wake/resume of all cores
388 */
389 void
390 rorgn_lockdown(void)
391 {
392 vm_offset_t ktrr_begin, ktrr_end;
393 unsigned long last_segsz;
394
395 #if DEVELOPMENT || DEBUG
396 boolean_t ktrr_disable = FALSE;
397
398 PE_parse_boot_argn("-unsafe_kernel_text", &ktrr_disable, sizeof(ktrr_disable));
399
400 if (ktrr_disable) {
401 /*
402 * take early out if boot arg present, since we may not have amcc DT entry present
403 * we can't assert that iboot hasn't programmed the RO region lockdown registers
404 */
405 goto out;
406 }
407 #endif /* DEVELOPMENT || DEBUG */
408
409 assert_unlocked();
410
411 /* [x] - Use final method of determining all kernel text range or expect crashes */
412 ktrr_begin = segLOWEST;
413 assert(ktrr_begin && gVirtBase && gPhysBase);
414
415 ktrr_begin = kvtophys(ktrr_begin);
416
417 ktrr_end = kvtophys(segLASTB);
418 last_segsz = segSizeLAST;
419 #if defined(KERNEL_INTEGRITY_KTRR)
420 /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC KTRR region) */
421 ktrr_end = (ktrr_end - 1) & ~AMCC_PGMASK;
422 /* ensure that iboot and xnu agree on the ktrr range */
423 assert(rorgn_begin == ktrr_begin && rorgn_end == (ktrr_end + last_segsz));
424 /* assert that __LAST segment containing privileged insns is only a single page */
425 assert(last_segsz == PAGE_SIZE);
426 #endif
427
428
429 #if DEBUG || DEVELOPMENT
430 printf("KTRR Begin: %p End: %p, setting lockdown\n", (void *)ktrr_begin, (void *)ktrr_end);
431 #endif
432
433 /* [x] - ensure all in flight writes are flushed to AMCC before enabling RO Region Lock */
434
435 assert_amcc_cache_disabled();
436
437 CleanPoC_DcacheRegion_Force(phystokv(ktrr_begin),
438 (unsigned)((ktrr_end + last_segsz) - ktrr_begin + AMCC_PGMASK));
439
440 lock_amcc();
441
442 lock_mmu(ktrr_begin, ktrr_end);
443
444 #if DEVELOPMENT || DEBUG
445 out:
446 #endif
447
448 /* now we can run lockdown handler */
449 ml_lockdown_run_handler();
450 }
451
452 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
453
454 void
455 machine_startup(__unused boot_args * args)
456 {
457 int boot_arg;
458
459
460 #if CONFIG_NONFATAL_ASSERTS
461 PE_parse_boot_argn("assert", &mach_assert, sizeof(mach_assert));
462 #endif
463
464 if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) {
465 default_preemption_rate = boot_arg;
466 }
467 if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof(boot_arg))) {
468 default_bg_preemption_rate = boot_arg;
469 }
470
471 PE_parse_boot_argn("yield_delay_us", &yield_delay_us, sizeof(yield_delay_us));
472
473 machine_conf();
474
475 /*
476 * Kick off the kernel bootstrap.
477 */
478 kernel_bootstrap();
479 /* NOTREACHED */
480 }
481
482 void
483 machine_lockdown_preflight(void)
484 {
485 #if CONFIG_KERNEL_INTEGRITY
486
487 #if defined(KERNEL_INTEGRITY_KTRR)
488 rorgn_stash_range();
489 #endif
490
491 #endif
492 }
493
494 void
495 machine_lockdown(void)
496 {
497 #if CONFIG_KERNEL_INTEGRITY
498 #if KERNEL_INTEGRITY_WT
499 /* Watchtower
500 *
501 * Notify the monitor about the completion of early kernel bootstrap.
502 * From this point forward it will enforce the integrity of kernel text,
503 * rodata and page tables.
504 */
505
506 #ifdef MONITOR
507 monitor_call(MONITOR_LOCKDOWN, 0, 0, 0);
508 #endif
509 #endif /* KERNEL_INTEGRITY_WT */
510
511
512 #if defined(KERNEL_INTEGRITY_KTRR)
513 /* KTRR
514 *
515 * Lock physical KTRR region. KTRR region is read-only. Memory outside
516 * the region is not executable at EL1.
517 */
518
519 rorgn_lockdown();
520 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
521
522
523 #endif /* CONFIG_KERNEL_INTEGRITY */
524 }
525
526 char *
527 machine_boot_info(
528 __unused char *buf,
529 __unused vm_size_t size)
530 {
531 return PE_boot_args();
532 }
533
534 void
535 machine_conf(void)
536 {
537 /*
538 * This is known to be inaccurate. mem_size should always be capped at 2 GB
539 */
540 machine_info.memory_size = (uint32_t)mem_size;
541 }
542
543 void
544 machine_init(void)
545 {
546 debug_log_init();
547 clock_config();
548 is_clock_configured = TRUE;
549 if (debug_enabled) {
550 pmap_map_globals();
551 }
552 }
553
554 void
555 slave_machine_init(__unused void *param)
556 {
557 cpu_machine_init(); /* Initialize the processor */
558 clock_init(); /* Init the clock */
559 }
560
561 /*
562 * Routine: machine_processor_shutdown
563 * Function:
564 */
565 thread_t
566 machine_processor_shutdown(
567 __unused thread_t thread,
568 void (*doshutdown)(processor_t),
569 processor_t processor)
570 {
571 return Shutdown_context(doshutdown, processor);
572 }
573
574 /*
575 * Routine: ml_init_max_cpus
576 * Function:
577 */
578 void
579 ml_init_max_cpus(unsigned int max_cpus)
580 {
581 boolean_t current_state;
582
583 current_state = ml_set_interrupts_enabled(FALSE);
584 if (max_cpus_initialized != MAX_CPUS_SET) {
585 machine_info.max_cpus = max_cpus;
586 machine_info.physical_cpu_max = max_cpus;
587 machine_info.logical_cpu_max = max_cpus;
588 if (max_cpus_initialized == MAX_CPUS_WAIT) {
589 thread_wakeup((event_t) &max_cpus_initialized);
590 }
591 max_cpus_initialized = MAX_CPUS_SET;
592 }
593 (void) ml_set_interrupts_enabled(current_state);
594 }
595
596 /*
597 * Routine: ml_get_max_cpus
598 * Function:
599 */
600 unsigned int
601 ml_get_max_cpus(void)
602 {
603 boolean_t current_state;
604
605 current_state = ml_set_interrupts_enabled(FALSE);
606 if (max_cpus_initialized != MAX_CPUS_SET) {
607 max_cpus_initialized = MAX_CPUS_WAIT;
608 assert_wait((event_t) &max_cpus_initialized, THREAD_UNINT);
609 (void) thread_block(THREAD_CONTINUE_NULL);
610 }
611 (void) ml_set_interrupts_enabled(current_state);
612 return machine_info.max_cpus;
613 }
614
615 /*
616 * Routine: ml_init_lock_timeout
617 * Function:
618 */
619 void
620 ml_init_lock_timeout(void)
621 {
622 uint64_t abstime;
623 uint64_t mtxspin;
624 uint64_t default_timeout_ns = NSEC_PER_SEC >> 2;
625 uint32_t slto;
626
627 if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) {
628 default_timeout_ns = slto * NSEC_PER_USEC;
629 }
630
631 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
632 LockTimeOutUsec = (uint32_t) (default_timeout_ns / NSEC_PER_USEC);
633 LockTimeOut = (uint32_t)abstime;
634
635 if (PE_parse_boot_argn("tlto_us", &slto, sizeof(slto))) {
636 nanoseconds_to_absolutetime(slto * NSEC_PER_USEC, &abstime);
637 TLockTimeOut = abstime;
638 } else {
639 TLockTimeOut = LockTimeOut >> 1;
640 }
641
642 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
643 if (mtxspin > USEC_PER_SEC >> 4) {
644 mtxspin = USEC_PER_SEC >> 4;
645 }
646 nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime);
647 } else {
648 nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime);
649 }
650 MutexSpin = abstime;
651 }
652
653 /*
654 * This is called from the machine-independent routine cpu_up()
655 * to perform machine-dependent info updates.
656 */
657 void
658 ml_cpu_up(void)
659 {
660 os_atomic_inc(&machine_info.physical_cpu, relaxed);
661 os_atomic_inc(&machine_info.logical_cpu, relaxed);
662 }
663
664 /*
665 * This is called from the machine-independent routine cpu_down()
666 * to perform machine-dependent info updates.
667 */
668 void
669 ml_cpu_down(void)
670 {
671 cpu_data_t *cpu_data_ptr;
672
673 os_atomic_dec(&machine_info.physical_cpu, relaxed);
674 os_atomic_dec(&machine_info.logical_cpu, relaxed);
675
676 /*
677 * If we want to deal with outstanding IPIs, we need to
678 * do relatively early in the processor_doshutdown path,
679 * as we pend decrementer interrupts using the IPI
680 * mechanism if we cannot immediately service them (if
681 * IRQ is masked). Do so now.
682 *
683 * We aren't on the interrupt stack here; would it make
684 * more sense to disable signaling and then enable
685 * interrupts? It might be a bit cleaner.
686 */
687 cpu_data_ptr = getCpuDatap();
688 cpu_data_ptr->cpu_running = FALSE;
689
690 if (cpu_data_ptr != &BootCpuData) {
691 /*
692 * Move all of this cpu's timers to the master/boot cpu,
693 * and poke it in case there's a sooner deadline for it to schedule.
694 */
695 timer_queue_shutdown(&cpu_data_ptr->rtclock_timer.queue);
696 cpu_xcall(BootCpuData.cpu_number, &timer_queue_expire_local, NULL);
697 }
698
699 cpu_signal_handler_internal(TRUE);
700 }
701
702 /*
703 * Routine: ml_cpu_get_info
704 * Function:
705 */
706 void
707 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
708 {
709 cache_info_t *cpuid_cache_info;
710
711 cpuid_cache_info = cache_info();
712 ml_cpu_info->vector_unit = 0;
713 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
714 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
715 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
716
717 #if (__ARM_ARCH__ >= 7)
718 ml_cpu_info->l2_settings = 1;
719 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
720 #else
721 ml_cpu_info->l2_settings = 0;
722 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
723 #endif
724 ml_cpu_info->l3_settings = 0;
725 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
726 }
727
728 unsigned int
729 ml_get_machine_mem(void)
730 {
731 return machine_info.memory_size;
732 }
733
734 __attribute__((noreturn))
735 void
736 halt_all_cpus(boolean_t reboot)
737 {
738 if (reboot) {
739 printf("MACH Reboot\n");
740 PEHaltRestart(kPERestartCPU);
741 } else {
742 printf("CPU halted\n");
743 PEHaltRestart(kPEHaltCPU);
744 }
745 while (1) {
746 ;
747 }
748 }
749
750 __attribute__((noreturn))
751 void
752 halt_cpu(void)
753 {
754 halt_all_cpus(FALSE);
755 }
756
757 /*
758 * Routine: machine_signal_idle
759 * Function:
760 */
761 void
762 machine_signal_idle(
763 processor_t processor)
764 {
765 cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL);
766 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
767 }
768
769 void
770 machine_signal_idle_deferred(
771 processor_t processor)
772 {
773 cpu_signal_deferred(processor_to_cpu_datap(processor));
774 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
775 }
776
777 void
778 machine_signal_idle_cancel(
779 processor_t processor)
780 {
781 cpu_signal_cancel(processor_to_cpu_datap(processor));
782 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
783 }
784
785 /*
786 * Routine: ml_install_interrupt_handler
787 * Function: Initialize Interrupt Handler
788 */
789 void
790 ml_install_interrupt_handler(
791 void *nub,
792 int source,
793 void *target,
794 IOInterruptHandler handler,
795 void *refCon)
796 {
797 cpu_data_t *cpu_data_ptr;
798 boolean_t current_state;
799
800 current_state = ml_set_interrupts_enabled(FALSE);
801 cpu_data_ptr = getCpuDatap();
802
803 cpu_data_ptr->interrupt_nub = nub;
804 cpu_data_ptr->interrupt_source = source;
805 cpu_data_ptr->interrupt_target = target;
806 cpu_data_ptr->interrupt_handler = handler;
807 cpu_data_ptr->interrupt_refCon = refCon;
808
809 cpu_data_ptr->interrupts_enabled = TRUE;
810 (void) ml_set_interrupts_enabled(current_state);
811
812 initialize_screen(NULL, kPEAcquireScreen);
813 }
814
815 /*
816 * Routine: ml_init_interrupt
817 * Function: Initialize Interrupts
818 */
819 void
820 ml_init_interrupt(void)
821 {
822 }
823
824 /*
825 * Routine: ml_init_timebase
826 * Function: register and setup Timebase, Decremeter services
827 */
828 void
829 ml_init_timebase(
830 void *args,
831 tbd_ops_t tbd_funcs,
832 vm_offset_t int_address,
833 vm_offset_t int_value __unused)
834 {
835 cpu_data_t *cpu_data_ptr;
836
837 cpu_data_ptr = (cpu_data_t *)args;
838
839 if ((cpu_data_ptr == &BootCpuData)
840 && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) {
841 rtclock_timebase_func = *tbd_funcs;
842 rtclock_timebase_addr = int_address;
843 }
844 }
845
846 void
847 ml_parse_cpu_topology(void)
848 {
849 DTEntry entry, child __unused;
850 OpaqueDTEntryIterator iter;
851 uint32_t cpu_boot_arg;
852 int err;
853
854 cpu_boot_arg = MAX_CPUS;
855
856 PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg));
857
858 err = DTLookupEntry(NULL, "/cpus", &entry);
859 assert(err == kSuccess);
860
861 err = DTInitEntryIterator(entry, &iter);
862 assert(err == kSuccess);
863
864 while (kSuccess == DTIterateEntries(&iter, &child)) {
865 unsigned int propSize;
866 void *prop = NULL;
867 int cpu_id = avail_cpus++;
868
869 if (kSuccess == DTGetProperty(child, "cpu-id", &prop, &propSize)) {
870 cpu_id = *((int32_t*)prop);
871 }
872
873 assert(cpu_id < MAX_CPUS);
874 assert(cpu_phys_ids[cpu_id] == (uint32_t)-1);
875
876 if (boot_cpu == -1) {
877 if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) {
878 panic("unable to retrieve state for cpu %d", cpu_id);
879 }
880
881 if (strncmp((char*)prop, "running", propSize) == 0) {
882 boot_cpu = cpu_id;
883 }
884 }
885 if (kSuccess != DTGetProperty(child, "reg", &prop, &propSize)) {
886 panic("unable to retrieve physical ID for cpu %d", cpu_id);
887 }
888
889 cpu_phys_ids[cpu_id] = *((uint32_t*)prop);
890
891 if ((cpu_id > max_cpu_number) && ((cpu_id == boot_cpu) || (avail_cpus <= cpu_boot_arg))) {
892 max_cpu_number = cpu_id;
893 }
894 }
895
896 if (avail_cpus > cpu_boot_arg) {
897 avail_cpus = cpu_boot_arg;
898 }
899
900 if (avail_cpus == 0) {
901 panic("No cpus found!");
902 }
903
904 if (boot_cpu == -1) {
905 panic("unable to determine boot cpu!");
906 }
907
908 /*
909 * Set TPIDRRO_EL0 to indicate the correct cpu number, as we may
910 * not be booting from cpu 0. Userspace will consume the current
911 * CPU number through this register. For non-boot cores, this is
912 * done in start.s (start_cpu) using the cpu_number field of the
913 * per-cpu data object.
914 */
915 assert(__builtin_arm_rsr64("TPIDRRO_EL0") == 0);
916 __builtin_arm_wsr64("TPIDRRO_EL0", (uint64_t)boot_cpu);
917 }
918
919 unsigned int
920 ml_get_cpu_count(void)
921 {
922 return avail_cpus;
923 }
924
925 int
926 ml_get_boot_cpu_number(void)
927 {
928 return boot_cpu;
929 }
930
931 cluster_type_t
932 ml_get_boot_cluster(void)
933 {
934 return boot_cluster;
935 }
936
937 int
938 ml_get_cpu_number(uint32_t phys_id)
939 {
940 for (int log_id = 0; log_id <= ml_get_max_cpu_number(); ++log_id) {
941 if (cpu_phys_ids[log_id] == phys_id) {
942 return log_id;
943 }
944 }
945 return -1;
946 }
947
948 int
949 ml_get_max_cpu_number(void)
950 {
951 return max_cpu_number;
952 }
953
954
955 void
956 ml_lockdown_init()
957 {
958 lockdown_handler_grp = lck_grp_alloc_init("lockdown_handler", NULL);
959 assert(lockdown_handler_grp != NULL);
960
961 lck_mtx_init(&lockdown_handler_lck, lockdown_handler_grp, NULL);
962
963 }
964
965 kern_return_t
966 ml_lockdown_handler_register(lockdown_handler_t f, void *this)
967 {
968 if (lockdown_handler || !f) {
969 return KERN_FAILURE;
970 }
971
972 lck_mtx_lock(&lockdown_handler_lck);
973 lockdown_handler = f;
974 lockdown_this = this;
975
976 #if !(defined(KERNEL_INTEGRITY_KTRR))
977 lockdown_done = 1;
978 lockdown_handler(this);
979 #else
980 if (lockdown_done) {
981 lockdown_handler(this);
982 }
983 #endif
984 lck_mtx_unlock(&lockdown_handler_lck);
985
986 return KERN_SUCCESS;
987 }
988
989 void
990 ml_lockdown_run_handler()
991 {
992 lck_mtx_lock(&lockdown_handler_lck);
993 assert(!lockdown_done);
994
995 lockdown_done = 1;
996 if (lockdown_handler) {
997 lockdown_handler(lockdown_this);
998 }
999 lck_mtx_unlock(&lockdown_handler_lck);
1000 }
1001
1002 kern_return_t
1003 ml_processor_register(ml_processor_info_t *in_processor_info,
1004 processor_t *processor_out, ipi_handler_t *ipi_handler_out,
1005 perfmon_interrupt_handler_func *pmi_handler_out)
1006 {
1007 cpu_data_t *this_cpu_datap;
1008 processor_set_t pset;
1009 boolean_t is_boot_cpu;
1010 static unsigned int reg_cpu_count = 0;
1011
1012 if (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number()) {
1013 return KERN_FAILURE;
1014 }
1015
1016 if ((unsigned int)OSIncrementAtomic((SInt32*)&reg_cpu_count) >= avail_cpus) {
1017 return KERN_FAILURE;
1018 }
1019
1020 if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) {
1021 is_boot_cpu = FALSE;
1022 this_cpu_datap = cpu_data_alloc(FALSE);
1023 cpu_data_init(this_cpu_datap);
1024 } else {
1025 this_cpu_datap = &BootCpuData;
1026 is_boot_cpu = TRUE;
1027 }
1028
1029 assert(in_processor_info->log_id < MAX_CPUS);
1030
1031 this_cpu_datap->cpu_id = in_processor_info->cpu_id;
1032
1033 this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu);
1034 if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) {
1035 goto processor_register_error;
1036 }
1037
1038 if (!is_boot_cpu) {
1039 this_cpu_datap->cpu_number = in_processor_info->log_id;
1040
1041 if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) {
1042 goto processor_register_error;
1043 }
1044 }
1045
1046 this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle;
1047 this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch;
1048 nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency);
1049 this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr);
1050
1051 this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer;
1052 this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon;
1053
1054 this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler;
1055 this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr;
1056 this_cpu_datap->cpu_phys_id = in_processor_info->phys_id;
1057 this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty;
1058
1059 this_cpu_datap->cpu_cluster_type = in_processor_info->cluster_type;
1060 this_cpu_datap->cpu_cluster_id = in_processor_info->cluster_id;
1061 this_cpu_datap->cpu_l2_id = in_processor_info->l2_cache_id;
1062 this_cpu_datap->cpu_l2_size = in_processor_info->l2_cache_size;
1063 this_cpu_datap->cpu_l3_id = in_processor_info->l3_cache_id;
1064 this_cpu_datap->cpu_l3_size = in_processor_info->l3_cache_size;
1065
1066 this_cpu_datap->cluster_master = is_boot_cpu;
1067
1068 pset = pset_find(in_processor_info->cluster_id, processor_pset(master_processor));
1069 assert(pset != NULL);
1070 kprintf("%s>cpu_id %p cluster_id %d cpu_number %d is type %d\n", __FUNCTION__, in_processor_info->cpu_id, in_processor_info->cluster_id, this_cpu_datap->cpu_number, in_processor_info->cluster_type);
1071
1072 if (!is_boot_cpu) {
1073 processor_init((struct processor *)this_cpu_datap->cpu_processor,
1074 this_cpu_datap->cpu_number, pset);
1075
1076 if (this_cpu_datap->cpu_l2_access_penalty) {
1077 /*
1078 * Cores that have a non-zero L2 access penalty compared
1079 * to the boot processor should be de-prioritized by the
1080 * scheduler, so that threads use the cores with better L2
1081 * preferentially.
1082 */
1083 processor_set_primary(this_cpu_datap->cpu_processor,
1084 master_processor);
1085 }
1086 }
1087
1088 *processor_out = this_cpu_datap->cpu_processor;
1089 *ipi_handler_out = cpu_signal_handler;
1090 #if CPMU_AIC_PMI && MONOTONIC
1091 *pmi_handler_out = mt_cpmu_aic_pmi;
1092 #else
1093 *pmi_handler_out = NULL;
1094 #endif /* CPMU_AIC_PMI && MONOTONIC */
1095 if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) {
1096 *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle;
1097 }
1098
1099 #if KPC
1100 if (kpc_register_cpu(this_cpu_datap) != TRUE) {
1101 goto processor_register_error;
1102 }
1103 #endif /* KPC */
1104
1105 if (!is_boot_cpu) {
1106 random_cpu_init(this_cpu_datap->cpu_number);
1107 // now let next CPU register itself
1108 OSIncrementAtomic((SInt32*)&real_ncpus);
1109 }
1110
1111 return KERN_SUCCESS;
1112
1113 processor_register_error:
1114 #if KPC
1115 kpc_unregister_cpu(this_cpu_datap);
1116 #endif /* KPC */
1117 if (!is_boot_cpu) {
1118 cpu_data_free(this_cpu_datap);
1119 }
1120
1121 return KERN_FAILURE;
1122 }
1123
1124 void
1125 ml_init_arm_debug_interface(
1126 void * in_cpu_datap,
1127 vm_offset_t virt_address)
1128 {
1129 ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address;
1130 do_debugid();
1131 }
1132
1133 /*
1134 * Routine: init_ast_check
1135 * Function:
1136 */
1137 void
1138 init_ast_check(
1139 __unused processor_t processor)
1140 {
1141 }
1142
1143 /*
1144 * Routine: cause_ast_check
1145 * Function:
1146 */
1147 void
1148 cause_ast_check(
1149 processor_t processor)
1150 {
1151 if (current_processor() != processor) {
1152 cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL);
1153 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0);
1154 }
1155 }
1156
1157 extern uint32_t cpu_idle_count;
1158
1159 void
1160 ml_get_power_state(boolean_t *icp, boolean_t *pidlep)
1161 {
1162 *icp = ml_at_interrupt_context();
1163 *pidlep = (cpu_idle_count == real_ncpus);
1164 }
1165
1166 /*
1167 * Routine: ml_cause_interrupt
1168 * Function: Generate a fake interrupt
1169 */
1170 void
1171 ml_cause_interrupt(void)
1172 {
1173 return; /* BS_XXX */
1174 }
1175
1176 /* Map memory map IO space */
1177 vm_offset_t
1178 ml_io_map(
1179 vm_offset_t phys_addr,
1180 vm_size_t size)
1181 {
1182 return io_map(phys_addr, size, VM_WIMG_IO);
1183 }
1184
1185 /* Map memory map IO space (with protections specified) */
1186 vm_offset_t
1187 ml_io_map_with_prot(
1188 vm_offset_t phys_addr,
1189 vm_size_t size,
1190 vm_prot_t prot)
1191 {
1192 return io_map_with_prot(phys_addr, size, VM_WIMG_IO, prot);
1193 }
1194
1195 vm_offset_t
1196 ml_io_map_wcomb(
1197 vm_offset_t phys_addr,
1198 vm_size_t size)
1199 {
1200 return io_map(phys_addr, size, VM_WIMG_WCOMB);
1201 }
1202
1203 /* boot memory allocation */
1204 vm_offset_t
1205 ml_static_malloc(
1206 __unused vm_size_t size)
1207 {
1208 return (vm_offset_t) NULL;
1209 }
1210
1211 vm_map_address_t
1212 ml_map_high_window(
1213 vm_offset_t phys_addr,
1214 vm_size_t len)
1215 {
1216 return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE);
1217 }
1218
1219 vm_offset_t
1220 ml_static_ptovirt(
1221 vm_offset_t paddr)
1222 {
1223 return phystokv(paddr);
1224 }
1225
1226 vm_offset_t
1227 ml_static_slide(
1228 vm_offset_t vaddr)
1229 {
1230 return phystokv(vaddr + vm_kernel_slide - gVirtBase + gPhysBase);
1231 }
1232
1233 vm_offset_t
1234 ml_static_unslide(
1235 vm_offset_t vaddr)
1236 {
1237 return ml_static_vtop(vaddr) - gPhysBase + gVirtBase - vm_kernel_slide;
1238 }
1239
1240 extern tt_entry_t *arm_kva_to_tte(vm_offset_t va);
1241
1242 kern_return_t
1243 ml_static_protect(
1244 vm_offset_t vaddr, /* kernel virtual address */
1245 vm_size_t size,
1246 vm_prot_t new_prot)
1247 {
1248 pt_entry_t arm_prot = 0;
1249 pt_entry_t arm_block_prot = 0;
1250 vm_offset_t vaddr_cur;
1251 ppnum_t ppn;
1252 kern_return_t result = KERN_SUCCESS;
1253
1254 if (vaddr < VM_MIN_KERNEL_ADDRESS) {
1255 panic("ml_static_protect(): %p < %p", (void *) vaddr, (void *) VM_MIN_KERNEL_ADDRESS);
1256 return KERN_FAILURE;
1257 }
1258
1259 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
1260
1261 if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) {
1262 panic("ml_static_protect(): WX request on %p", (void *) vaddr);
1263 }
1264
1265 /* Set up the protection bits, and block bits so we can validate block mappings. */
1266 if (new_prot & VM_PROT_WRITE) {
1267 arm_prot |= ARM_PTE_AP(AP_RWNA);
1268 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA);
1269 } else {
1270 arm_prot |= ARM_PTE_AP(AP_RONA);
1271 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA);
1272 }
1273
1274 arm_prot |= ARM_PTE_NX;
1275 arm_block_prot |= ARM_TTE_BLOCK_NX;
1276
1277 if (!(new_prot & VM_PROT_EXECUTE)) {
1278 arm_prot |= ARM_PTE_PNX;
1279 arm_block_prot |= ARM_TTE_BLOCK_PNX;
1280 }
1281
1282 for (vaddr_cur = vaddr;
1283 vaddr_cur < trunc_page_64(vaddr + size);
1284 vaddr_cur += PAGE_SIZE) {
1285 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
1286 if (ppn != (vm_offset_t) NULL) {
1287 tt_entry_t *tte2;
1288 pt_entry_t *pte_p;
1289 pt_entry_t ptmp;
1290
1291
1292 tte2 = arm_kva_to_tte(vaddr_cur);
1293
1294 if (((*tte2) & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) {
1295 if ((((*tte2) & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) &&
1296 ((*tte2 & (ARM_TTE_BLOCK_NXMASK | ARM_TTE_BLOCK_PNXMASK | ARM_TTE_BLOCK_APMASK)) == arm_block_prot)) {
1297 /*
1298 * We can support ml_static_protect on a block mapping if the mapping already has
1299 * the desired protections. We still want to run checks on a per-page basis.
1300 */
1301 continue;
1302 }
1303
1304 result = KERN_FAILURE;
1305 break;
1306 }
1307
1308 pte_p = (pt_entry_t *)&((tt_entry_t*)(phystokv((*tte2) & ARM_TTE_TABLE_MASK)))[(((vaddr_cur) & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT)];
1309 ptmp = *pte_p;
1310
1311 if ((ptmp & ARM_PTE_HINT_MASK) && ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot)) {
1312 /*
1313 * The contiguous hint is similar to a block mapping for ml_static_protect; if the existing
1314 * protections do not match the desired protections, then we will fail (as we cannot update
1315 * this mapping without updating other mappings as well).
1316 */
1317 result = KERN_FAILURE;
1318 break;
1319 }
1320
1321 __unreachable_ok_push
1322 if (TEST_PAGE_RATIO_4) {
1323 {
1324 unsigned int i;
1325 pt_entry_t *ptep_iter;
1326
1327 ptep_iter = pte_p;
1328 for (i = 0; i < 4; i++, ptep_iter++) {
1329 /* Note that there is a hole in the HINT sanity checking here. */
1330 ptmp = *ptep_iter;
1331
1332 /* We only need to update the page tables if the protections do not match. */
1333 if ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot) {
1334 ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) | arm_prot;
1335 *ptep_iter = ptmp;
1336 }
1337 }
1338 }
1339 } else {
1340 ptmp = *pte_p;
1341
1342 /* We only need to update the page tables if the protections do not match. */
1343 if ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot) {
1344 ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) | arm_prot;
1345 *pte_p = ptmp;
1346 }
1347 }
1348 __unreachable_ok_pop
1349 }
1350 }
1351
1352 if (vaddr_cur > vaddr) {
1353 assert(((vaddr_cur - vaddr) & 0xFFFFFFFF00000000ULL) == 0);
1354 flush_mmu_tlb_region(vaddr, (uint32_t)(vaddr_cur - vaddr));
1355 }
1356
1357
1358 return result;
1359 }
1360
1361 /*
1362 * Routine: ml_static_mfree
1363 * Function:
1364 */
1365 void
1366 ml_static_mfree(
1367 vm_offset_t vaddr,
1368 vm_size_t size)
1369 {
1370 vm_offset_t vaddr_cur;
1371 ppnum_t ppn;
1372 uint32_t freed_pages = 0;
1373
1374 /* It is acceptable (if bad) to fail to free. */
1375 if (vaddr < VM_MIN_KERNEL_ADDRESS) {
1376 return;
1377 }
1378
1379 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
1380
1381 for (vaddr_cur = vaddr;
1382 vaddr_cur < trunc_page_64(vaddr + size);
1383 vaddr_cur += PAGE_SIZE) {
1384 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
1385 if (ppn != (vm_offset_t) NULL) {
1386 /*
1387 * It is not acceptable to fail to update the protections on a page
1388 * we will release to the VM. We need to either panic or continue.
1389 * For now, we'll panic (to help flag if there is memory we can
1390 * reclaim).
1391 */
1392 if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) {
1393 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur);
1394 }
1395
1396 #if 0
1397 /*
1398 * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
1399 * relies on the persistence of these mappings for all time.
1400 */
1401 // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
1402 #endif
1403
1404 vm_page_create(ppn, (ppn + 1));
1405 freed_pages++;
1406 }
1407 }
1408 vm_page_lockspin_queues();
1409 vm_page_wire_count -= freed_pages;
1410 vm_page_wire_count_initial -= freed_pages;
1411 vm_page_unlock_queues();
1412 #if DEBUG
1413 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
1414 #endif
1415 }
1416
1417
1418 /* virtual to physical on wired pages */
1419 vm_offset_t
1420 ml_vtophys(vm_offset_t vaddr)
1421 {
1422 return kvtophys(vaddr);
1423 }
1424
1425 /*
1426 * Routine: ml_nofault_copy
1427 * Function: Perform a physical mode copy if the source and destination have
1428 * valid translations in the kernel pmap. If translations are present, they are
1429 * assumed to be wired; e.g., no attempt is made to guarantee that the
1430 * translations obtained remain valid for the duration of the copy process.
1431 */
1432 vm_size_t
1433 ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
1434 {
1435 addr64_t cur_phys_dst, cur_phys_src;
1436 vm_size_t count, nbytes = 0;
1437
1438 while (size > 0) {
1439 if (!(cur_phys_src = kvtophys(virtsrc))) {
1440 break;
1441 }
1442 if (!(cur_phys_dst = kvtophys(virtdst))) {
1443 break;
1444 }
1445 if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) ||
1446 !pmap_valid_address(trunc_page_64(cur_phys_src))) {
1447 break;
1448 }
1449 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
1450 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) {
1451 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
1452 }
1453 if (count > size) {
1454 count = size;
1455 }
1456
1457 bcopy_phys(cur_phys_src, cur_phys_dst, count);
1458
1459 nbytes += count;
1460 virtsrc += count;
1461 virtdst += count;
1462 size -= count;
1463 }
1464
1465 return nbytes;
1466 }
1467
1468 /*
1469 * Routine: ml_validate_nofault
1470 * Function: Validate that ths address range has a valid translations
1471 * in the kernel pmap. If translations are present, they are
1472 * assumed to be wired; i.e. no attempt is made to guarantee
1473 * that the translation persist after the check.
1474 * Returns: TRUE if the range is mapped and will not cause a fault,
1475 * FALSE otherwise.
1476 */
1477
1478 boolean_t
1479 ml_validate_nofault(
1480 vm_offset_t virtsrc, vm_size_t size)
1481 {
1482 addr64_t cur_phys_src;
1483 uint32_t count;
1484
1485 while (size > 0) {
1486 if (!(cur_phys_src = kvtophys(virtsrc))) {
1487 return FALSE;
1488 }
1489 if (!pmap_valid_address(trunc_page_64(cur_phys_src))) {
1490 return FALSE;
1491 }
1492 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
1493 if (count > size) {
1494 count = (uint32_t)size;
1495 }
1496
1497 virtsrc += count;
1498 size -= count;
1499 }
1500
1501 return TRUE;
1502 }
1503
1504 void
1505 ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size)
1506 {
1507 *phys_addr = 0;
1508 *size = 0;
1509 }
1510
1511 void
1512 active_rt_threads(__unused boolean_t active)
1513 {
1514 }
1515
1516 static void
1517 cpu_qos_cb_default(__unused int urgency, __unused uint64_t qos_param1, __unused uint64_t qos_param2)
1518 {
1519 return;
1520 }
1521
1522 cpu_qos_update_t cpu_qos_update = cpu_qos_cb_default;
1523
1524 void
1525 cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb)
1526 {
1527 if (cpu_qos_cb != NULL) {
1528 cpu_qos_update = cpu_qos_cb;
1529 } else {
1530 cpu_qos_update = cpu_qos_cb_default;
1531 }
1532 }
1533
1534 void
1535 thread_tell_urgency(thread_urgency_t urgency, uint64_t rt_period, uint64_t rt_deadline, uint64_t sched_latency __unused, __unused thread_t nthread)
1536 {
1537 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, sched_latency, 0);
1538
1539 cpu_qos_update((int)urgency, rt_period, rt_deadline);
1540
1541 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0);
1542 }
1543
1544 void
1545 machine_run_count(__unused uint32_t count)
1546 {
1547 }
1548
1549 processor_t
1550 machine_choose_processor(__unused processor_set_t pset, processor_t processor)
1551 {
1552 return processor;
1553 }
1554
1555 #if KASAN
1556 vm_offset_t ml_stack_base(void);
1557 vm_size_t ml_stack_size(void);
1558
1559 vm_offset_t
1560 ml_stack_base(void)
1561 {
1562 uintptr_t local = (uintptr_t) &local;
1563 vm_offset_t intstack_top_ptr;
1564
1565 intstack_top_ptr = getCpuDatap()->intstack_top;
1566 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
1567 return intstack_top_ptr - INTSTACK_SIZE;
1568 } else {
1569 return current_thread()->kernel_stack;
1570 }
1571 }
1572 vm_size_t
1573 ml_stack_size(void)
1574 {
1575 uintptr_t local = (uintptr_t) &local;
1576 vm_offset_t intstack_top_ptr;
1577
1578 intstack_top_ptr = getCpuDatap()->intstack_top;
1579 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
1580 return INTSTACK_SIZE;
1581 } else {
1582 return kernel_stack_size;
1583 }
1584 }
1585 #endif
1586
1587 boolean_t
1588 machine_timeout_suspended(void)
1589 {
1590 return FALSE;
1591 }
1592
1593 kern_return_t
1594 ml_interrupt_prewarm(__unused uint64_t deadline)
1595 {
1596 return KERN_FAILURE;
1597 }
1598
1599 /*
1600 * Assumes fiq, irq disabled.
1601 */
1602 void
1603 ml_set_decrementer(uint32_t dec_value)
1604 {
1605 cpu_data_t *cdp = getCpuDatap();
1606
1607 assert(ml_get_interrupts_enabled() == FALSE);
1608 cdp->cpu_decrementer = dec_value;
1609
1610 if (cdp->cpu_set_decrementer_func) {
1611 ((void (*)(uint32_t))cdp->cpu_set_decrementer_func)(dec_value);
1612 } else {
1613 __asm__ volatile ("msr CNTP_TVAL_EL0, %0" : : "r"((uint64_t)dec_value));
1614 }
1615 }
1616
1617 uint64_t
1618 ml_get_hwclock()
1619 {
1620 uint64_t timebase;
1621
1622 // ISB required by ARMV7C.b section B8.1.2 & ARMv8 section D6.1.2
1623 // "Reads of CNTPCT[_EL0] can occur speculatively and out of order relative
1624 // to other instructions executed on the same processor."
1625 __builtin_arm_isb(ISB_SY);
1626 timebase = __builtin_arm_rsr64("CNTPCT_EL0");
1627
1628 return timebase;
1629 }
1630
1631 uint64_t
1632 ml_get_timebase()
1633 {
1634 return ml_get_hwclock() + getCpuDatap()->cpu_base_timebase;
1635 }
1636
1637 uint32_t
1638 ml_get_decrementer()
1639 {
1640 cpu_data_t *cdp = getCpuDatap();
1641 uint32_t dec;
1642
1643 assert(ml_get_interrupts_enabled() == FALSE);
1644
1645 if (cdp->cpu_get_decrementer_func) {
1646 dec = ((uint32_t (*)(void))cdp->cpu_get_decrementer_func)();
1647 } else {
1648 uint64_t wide_val;
1649
1650 __asm__ volatile ("mrs %0, CNTP_TVAL_EL0" : "=r"(wide_val));
1651 dec = (uint32_t)wide_val;
1652 assert(wide_val == (uint64_t)dec);
1653 }
1654
1655 return dec;
1656 }
1657
1658 boolean_t
1659 ml_get_timer_pending()
1660 {
1661 uint64_t cntp_ctl;
1662
1663 __asm__ volatile ("mrs %0, CNTP_CTL_EL0" : "=r"(cntp_ctl));
1664 return ((cntp_ctl & CNTP_CTL_EL0_ISTATUS) != 0) ? TRUE : FALSE;
1665 }
1666
1667 boolean_t
1668 ml_wants_panic_trap_to_debugger(void)
1669 {
1670 boolean_t result = FALSE;
1671 return result;
1672 }
1673
1674 static void
1675 cache_trap_error(thread_t thread, vm_map_address_t fault_addr)
1676 {
1677 mach_exception_data_type_t exc_data[2];
1678 arm_saved_state_t *regs = get_user_regs(thread);
1679
1680 set_saved_state_far(regs, fault_addr);
1681
1682 exc_data[0] = KERN_INVALID_ADDRESS;
1683 exc_data[1] = fault_addr;
1684
1685 exception_triage(EXC_BAD_ACCESS, exc_data, 2);
1686 }
1687
1688 static void
1689 cache_trap_recover()
1690 {
1691 vm_map_address_t fault_addr;
1692
1693 __asm__ volatile ("mrs %0, FAR_EL1" : "=r"(fault_addr));
1694
1695 cache_trap_error(current_thread(), fault_addr);
1696 }
1697
1698 static void
1699 set_cache_trap_recover(thread_t thread)
1700 {
1701 #if defined(HAS_APPLE_PAC)
1702 thread->recover = (vm_address_t)ptrauth_auth_and_resign(&cache_trap_recover,
1703 ptrauth_key_function_pointer, 0,
1704 ptrauth_key_function_pointer, ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER));
1705 #else /* defined(HAS_APPLE_PAC) */
1706 thread->recover = (vm_address_t)cache_trap_recover;
1707 #endif /* defined(HAS_APPLE_PAC) */
1708 }
1709
1710 static void
1711 dcache_flush_trap(vm_map_address_t start, vm_map_size_t size)
1712 {
1713 vm_map_address_t end = start + size;
1714 thread_t thread = current_thread();
1715 vm_offset_t old_recover = thread->recover;
1716
1717 /* Check bounds */
1718 if (task_has_64Bit_addr(current_task())) {
1719 if (end > MACH_VM_MAX_ADDRESS) {
1720 cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1));
1721 }
1722 } else {
1723 if (end > VM_MAX_ADDRESS) {
1724 cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1));
1725 }
1726 }
1727
1728 if (start > end) {
1729 cache_trap_error(thread, start & ((1 << ARM64_CLINE_SHIFT) - 1));
1730 }
1731
1732 set_cache_trap_recover(thread);
1733
1734 /*
1735 * We're coherent on Apple ARM64 CPUs, so this could be a nop. However,
1736 * if the region given us is bad, it would be good to catch it and
1737 * crash, ergo we still do the flush.
1738 */
1739 FlushPoC_DcacheRegion(start, (uint32_t)size);
1740
1741 /* Restore recovery function */
1742 thread->recover = old_recover;
1743
1744 /* Return (caller does exception return) */
1745 }
1746
1747 static void
1748 icache_invalidate_trap(vm_map_address_t start, vm_map_size_t size)
1749 {
1750 vm_map_address_t end = start + size;
1751 thread_t thread = current_thread();
1752 vm_offset_t old_recover = thread->recover;
1753
1754 /* Check bounds */
1755 if (task_has_64Bit_addr(current_task())) {
1756 if (end > MACH_VM_MAX_ADDRESS) {
1757 cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1));
1758 }
1759 } else {
1760 if (end > VM_MAX_ADDRESS) {
1761 cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1));
1762 }
1763 }
1764
1765 if (start > end) {
1766 cache_trap_error(thread, start & ((1 << ARM64_CLINE_SHIFT) - 1));
1767 }
1768
1769 set_cache_trap_recover(thread);
1770
1771 /* Invalidate iCache to point of unification */
1772 InvalidatePoU_IcacheRegion(start, (uint32_t)size);
1773
1774 /* Restore recovery function */
1775 thread->recover = old_recover;
1776
1777 /* Return (caller does exception return) */
1778 }
1779
1780 __attribute__((noreturn))
1781 void
1782 platform_syscall(arm_saved_state_t *state)
1783 {
1784 uint32_t code;
1785
1786 #define platform_syscall_kprintf(x...) /* kprintf("platform_syscall: " x) */
1787
1788 code = (uint32_t)get_saved_state_reg(state, 3);
1789 switch (code) {
1790 case 0:
1791 /* I-Cache flush */
1792 platform_syscall_kprintf("icache flush requested.\n");
1793 icache_invalidate_trap(get_saved_state_reg(state, 0), get_saved_state_reg(state, 1));
1794 break;
1795 case 1:
1796 /* D-Cache flush */
1797 platform_syscall_kprintf("dcache flush requested.\n");
1798 dcache_flush_trap(get_saved_state_reg(state, 0), get_saved_state_reg(state, 1));
1799 break;
1800 case 2:
1801 /* set cthread */
1802 platform_syscall_kprintf("set cthread self.\n");
1803 thread_set_cthread_self(get_saved_state_reg(state, 0));
1804 break;
1805 case 3:
1806 /* get cthread */
1807 platform_syscall_kprintf("get cthread self.\n");
1808 set_saved_state_reg(state, 0, thread_get_cthread_self());
1809 break;
1810 default:
1811 platform_syscall_kprintf("unknown: %d\n", code);
1812 break;
1813 }
1814
1815 thread_exception_return();
1816 }
1817
1818 static void
1819 _enable_timebase_event_stream(uint32_t bit_index)
1820 {
1821 uint64_t cntkctl; /* One wants to use 32 bits, but "mrs" prefers it this way */
1822
1823 if (bit_index >= 64) {
1824 panic("%s: invalid bit index (%u)", __FUNCTION__, bit_index);
1825 }
1826
1827 __asm__ volatile ("mrs %0, CNTKCTL_EL1" : "=r"(cntkctl));
1828
1829 cntkctl |= (bit_index << CNTKCTL_EL1_EVENTI_SHIFT);
1830 cntkctl |= CNTKCTL_EL1_EVNTEN;
1831 cntkctl |= CNTKCTL_EL1_EVENTDIR; /* 1->0; why not? */
1832
1833 /*
1834 * If the SOC supports it (and it isn't broken), enable
1835 * EL0 access to the physical timebase register.
1836 */
1837 if (user_timebase_type() != USER_TIMEBASE_NONE) {
1838 cntkctl |= CNTKCTL_EL1_PL0PCTEN;
1839 }
1840
1841 __asm__ volatile ("msr CNTKCTL_EL1, %0" : : "r"(cntkctl));
1842 }
1843
1844 /*
1845 * Turn timer on, unmask that interrupt.
1846 */
1847 static void
1848 _enable_virtual_timer(void)
1849 {
1850 uint64_t cntvctl = CNTP_CTL_EL0_ENABLE; /* One wants to use 32 bits, but "mrs" prefers it this way */
1851
1852 __asm__ volatile ("msr CNTP_CTL_EL0, %0" : : "r"(cntvctl));
1853 }
1854
1855 uint64_t events_per_sec = 0;
1856
1857 void
1858 fiq_context_init(boolean_t enable_fiq __unused)
1859 {
1860 _enable_timebase_event_stream(fiq_eventi);
1861
1862 /* Interrupts still disabled. */
1863 assert(ml_get_interrupts_enabled() == FALSE);
1864 _enable_virtual_timer();
1865 }
1866
1867 void
1868 fiq_context_bootstrap(boolean_t enable_fiq)
1869 {
1870 #if defined(APPLE_ARM64_ARCH_FAMILY) || defined(BCM2837)
1871 /* Could fill in our own ops here, if we needed them */
1872 uint64_t ticks_per_sec, ticks_per_event;
1873 uint32_t bit_index;
1874
1875 ticks_per_sec = gPEClockFrequencyInfo.timebase_frequency_hz;
1876 ticks_per_event = ticks_per_sec / events_per_sec;
1877 bit_index = flsll(ticks_per_event) - 1; /* Highest bit set */
1878
1879 /* Round up to power of two */
1880 if ((ticks_per_event & ((1 << bit_index) - 1)) != 0) {
1881 bit_index++;
1882 }
1883
1884 /*
1885 * The timer can only trigger on rising or falling edge,
1886 * not both; we don't care which we trigger on, but we
1887 * do need to adjust which bit we are interested in to
1888 * account for this.
1889 */
1890 if (bit_index != 0) {
1891 bit_index--;
1892 }
1893
1894 fiq_eventi = bit_index;
1895 #else
1896 #error Need a board configuration.
1897 #endif
1898 fiq_context_init(enable_fiq);
1899 }
1900
1901 boolean_t
1902 ml_delay_should_spin(uint64_t interval)
1903 {
1904 cpu_data_t *cdp = getCpuDatap();
1905
1906 if (cdp->cpu_idle_latency) {
1907 return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE;
1908 } else {
1909 /*
1910 * Early boot, latency is unknown. Err on the side of blocking,
1911 * which should always be safe, even if slow
1912 */
1913 return FALSE;
1914 }
1915 }
1916
1917 boolean_t
1918 ml_thread_is64bit(thread_t thread)
1919 {
1920 return thread_is_64bit_addr(thread);
1921 }
1922
1923 void
1924 ml_delay_on_yield(void)
1925 {
1926 #if DEVELOPMENT || DEBUG
1927 if (yield_delay_us) {
1928 delay(yield_delay_us);
1929 }
1930 #endif
1931 }
1932
1933 void
1934 ml_timer_evaluate(void)
1935 {
1936 }
1937
1938 boolean_t
1939 ml_timer_forced_evaluation(void)
1940 {
1941 return FALSE;
1942 }
1943
1944 uint64_t
1945 ml_energy_stat(thread_t t)
1946 {
1947 return t->machine.energy_estimate_nj;
1948 }
1949
1950
1951 void
1952 ml_gpu_stat_update(__unused uint64_t gpu_ns_delta)
1953 {
1954 #if CONFIG_EMBEDDED
1955 /*
1956 * For now: update the resource coalition stats of the
1957 * current thread's coalition
1958 */
1959 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta);
1960 #endif
1961 }
1962
1963 uint64_t
1964 ml_gpu_stat(__unused thread_t t)
1965 {
1966 return 0;
1967 }
1968
1969 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1970 static void
1971 timer_state_event(boolean_t switch_to_kernel)
1972 {
1973 thread_t thread = current_thread();
1974 if (!thread->precise_user_kernel_time) {
1975 return;
1976 }
1977
1978 processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data;
1979 uint64_t now = ml_get_timebase();
1980
1981 timer_stop(pd->current_state, now);
1982 pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state;
1983 timer_start(pd->current_state, now);
1984
1985 timer_stop(pd->thread_timer, now);
1986 pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer;
1987 timer_start(pd->thread_timer, now);
1988 }
1989
1990 void
1991 timer_state_event_user_to_kernel(void)
1992 {
1993 timer_state_event(TRUE);
1994 }
1995
1996 void
1997 timer_state_event_kernel_to_user(void)
1998 {
1999 timer_state_event(FALSE);
2000 }
2001 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
2002
2003 /*
2004 * The following are required for parts of the kernel
2005 * that cannot resolve these functions as inlines:
2006 */
2007 extern thread_t current_act(void) __attribute__((const));
2008 thread_t
2009 current_act(void)
2010 {
2011 return current_thread_fast();
2012 }
2013
2014 #undef current_thread
2015 extern thread_t current_thread(void) __attribute__((const));
2016 thread_t
2017 current_thread(void)
2018 {
2019 return current_thread_fast();
2020 }
2021
2022 typedef struct{
2023 ex_cb_t cb;
2024 void *refcon;
2025 }
2026 ex_cb_info_t;
2027
2028 ex_cb_info_t ex_cb_info[EXCB_CLASS_MAX];
2029
2030 /*
2031 * Callback registration
2032 * Currently we support only one registered callback per class but
2033 * it should be possible to support more callbacks
2034 */
2035 kern_return_t
2036 ex_cb_register(
2037 ex_cb_class_t cb_class,
2038 ex_cb_t cb,
2039 void *refcon)
2040 {
2041 ex_cb_info_t *pInfo = &ex_cb_info[cb_class];
2042
2043 if ((NULL == cb) || (cb_class >= EXCB_CLASS_MAX)) {
2044 return KERN_INVALID_VALUE;
2045 }
2046
2047 if (NULL == pInfo->cb) {
2048 pInfo->cb = cb;
2049 pInfo->refcon = refcon;
2050 return KERN_SUCCESS;
2051 }
2052 return KERN_FAILURE;
2053 }
2054
2055 /*
2056 * Called internally by platform kernel to invoke the registered callback for class
2057 */
2058 ex_cb_action_t
2059 ex_cb_invoke(
2060 ex_cb_class_t cb_class,
2061 vm_offset_t far)
2062 {
2063 ex_cb_info_t *pInfo = &ex_cb_info[cb_class];
2064 ex_cb_state_t state = {far};
2065
2066 if (cb_class >= EXCB_CLASS_MAX) {
2067 panic("Invalid exception callback class 0x%x\n", cb_class);
2068 }
2069
2070 if (pInfo->cb) {
2071 return pInfo->cb(cb_class, pInfo->refcon, &state);
2072 }
2073 return EXCB_ACTION_NONE;
2074 }
2075
2076 #if defined(HAS_APPLE_PAC)
2077 void
2078 ml_task_set_disable_user_jop(task_t task, boolean_t disable_user_jop)
2079 {
2080 assert(task);
2081 task->disable_user_jop = disable_user_jop;
2082 }
2083
2084 void
2085 ml_thread_set_disable_user_jop(thread_t thread, boolean_t disable_user_jop)
2086 {
2087 assert(thread);
2088 thread->machine.disable_user_jop = disable_user_jop;
2089 }
2090
2091 void
2092 ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit)
2093 {
2094 if (inherit) {
2095 task->rop_pid = parent_task->rop_pid;
2096 } else {
2097 task->rop_pid = early_random();
2098 }
2099 }
2100 #endif /* defined(HAS_APPLE_PAC) */
2101
2102
2103 #if defined(HAS_APPLE_PAC)
2104
2105 /*
2106 * ml_auth_ptr_unchecked: call this instead of ptrauth_auth_data
2107 * instrinsic when you don't want to trap on auth fail.
2108 *
2109 */
2110
2111 void *
2112 ml_auth_ptr_unchecked(void *ptr, ptrauth_key key, uint64_t modifier)
2113 {
2114 switch (key & 0x3) {
2115 case ptrauth_key_asia:
2116 asm volatile ("autia %[ptr], %[modifier]" : [ptr] "+r"(ptr) : [modifier] "r"(modifier));
2117 break;
2118 case ptrauth_key_asib:
2119 asm volatile ("autib %[ptr], %[modifier]" : [ptr] "+r"(ptr) : [modifier] "r"(modifier));
2120 break;
2121 case ptrauth_key_asda:
2122 asm volatile ("autda %[ptr], %[modifier]" : [ptr] "+r"(ptr) : [modifier] "r"(modifier));
2123 break;
2124 case ptrauth_key_asdb:
2125 asm volatile ("autdb %[ptr], %[modifier]" : [ptr] "+r"(ptr) : [modifier] "r"(modifier));
2126 break;
2127 }
2128
2129 return ptr;
2130 }
2131 #endif /* defined(HAS_APPLE_PAC) */