2 * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm64/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/caches_internal.h>
37 #include <arm/misc_protos.h>
38 #include <arm/machdep_call.h>
39 #include <arm/machine_routines.h>
40 #include <arm/rtclock.h>
41 #include <arm/cpuid_internal.h>
42 #include <arm/cpu_capabilities.h>
43 #include <console/serial_protos.h>
44 #include <kern/machine.h>
45 #include <prng/random.h>
46 #include <kern/startup.h>
47 #include <kern/thread.h>
48 #include <kern/timer_queue.h>
49 #include <mach/machine.h>
50 #include <machine/atomic.h>
52 #include <vm/vm_page.h>
53 #include <sys/kdebug.h>
54 #include <kern/coalition.h>
55 #include <pexpert/device_tree.h>
57 #include <IOKit/IOPlatformExpert.h>
59 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
60 #include <libkern/kernel_mach_header.h>
63 #include <libkern/section_keywords.h>
70 static uint8_t cluster_initialized
= 0;
74 static int max_cpus_initialized
= 0;
75 #define MAX_CPUS_SET 0x1
76 #define MAX_CPUS_WAIT 0x2
79 uint32_t LockTimeOutUsec
;
80 uint64_t TLockTimeOut
;
82 uint64_t low_MutexSpin
;
83 int64_t high_MutexSpin
;
85 boolean_t is_clock_configured
= FALSE
;
87 uint32_t yield_delay_us
= 0; /* Must be less than cpu_idle_latency to ensure ml_delay_should_spin is true */
89 #if CONFIG_NONFATAL_ASSERTS
90 extern int mach_assert
;
92 extern volatile uint32_t debug_enabled
;
94 extern vm_offset_t segLOWEST
;
95 extern vm_offset_t segLOWESTTEXT
;
96 extern vm_offset_t segLASTB
;
97 extern unsigned long segSizeLAST
;
100 unsigned int gFastIPI
= 1;
101 #define kDeferredIPITimerDefault (64 * NSEC_PER_USEC) /* in nanoseconds */
102 static uint64_t deferred_ipi_timer_ns
= kDeferredIPITimerDefault
;
103 #endif /* defined(HAS_IPI) */
105 void machine_conf(void);
107 thread_t
Idle_context(void);
109 SECURITY_READ_ONLY_LATE(static uint32_t) cpu_phys_ids
[MAX_CPUS
] = {[0 ... MAX_CPUS
- 1] = (uint32_t)-1};
110 SECURITY_READ_ONLY_LATE(static unsigned int) avail_cpus
= 0;
111 SECURITY_READ_ONLY_LATE(static int) boot_cpu
= -1;
112 SECURITY_READ_ONLY_LATE(static int) max_cpu_number
= 0;
113 SECURITY_READ_ONLY_LATE(cluster_type_t
) boot_cluster
= CLUSTER_TYPE_SMP
;
115 SECURITY_READ_ONLY_LATE(static uint32_t) fiq_eventi
= UINT32_MAX
;
117 lockdown_handler_t lockdown_handler
;
119 lck_mtx_t lockdown_handler_lck
;
120 lck_grp_t
*lockdown_handler_grp
;
121 uint32_t lockdown_done
;
123 void ml_lockdown_init(void);
124 void ml_lockdown_run_handler(void);
125 uint32_t get_arm_cpu_version(void);
129 ml_cpu_signal_type(unsigned int cpu_mpidr
, uint32_t type
)
132 uint64_t local_mpidr
;
133 /* NOTE: this logic expects that we are called in a non-preemptible
134 * context, or at least one in which the calling thread is bound
135 * to a single CPU. Otherwise we may migrate between choosing which
136 * IPI mechanism to use and issuing the IPI. */
137 MRS(local_mpidr
, "MPIDR_EL1");
138 if ((local_mpidr
& MPIDR_AFF1_MASK
) == (cpu_mpidr
& MPIDR_AFF1_MASK
)) {
139 uint64_t x
= type
| (cpu_mpidr
& MPIDR_AFF0_MASK
);
140 MSR(ARM64_REG_IPI_RR_LOCAL
, x
);
142 #define IPI_RR_TARGET_CLUSTER_SHIFT 16
143 uint64_t x
= type
| ((cpu_mpidr
& MPIDR_AFF1_MASK
) << (IPI_RR_TARGET_CLUSTER_SHIFT
- MPIDR_AFF1_SHIFT
)) | (cpu_mpidr
& MPIDR_AFF0_MASK
);
144 MSR(ARM64_REG_IPI_RR_GLOBAL
, x
);
147 uint64_t x
= type
| (cpu_mpidr
& MPIDR_AFF0_MASK
);
148 MSR(ARM64_REG_IPI_RR
, x
);
153 #if !defined(HAS_IPI)
157 ml_cpu_signal(unsigned int cpu_mpidr __unused
)
160 ml_cpu_signal_type(cpu_mpidr
, ARM64_REG_IPI_RR_TYPE_IMMEDIATE
);
162 panic("Platform does not support ACC Fast IPI");
166 #if !defined(HAS_IPI)
170 ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs
)
173 /* adjust IPI_CR timer countdown value for deferred IPI
174 * accepts input in nanosecs, convert to absolutetime (REFCLK ticks),
175 * clamp maximum REFCLK ticks to 0xFFFF (16 bit field)
177 * global register, should only require a single write to update all
178 * CPU cores: from Skye ACC user spec section 5.7.3.3
180 * IPICR is a global register but there are two copies in ACC: one at pBLK and one at eBLK.
181 * IPICR write SPR token also traverses both pCPM and eCPM rings and updates both copies.
185 nanoseconds_to_absolutetime(nanosecs
, &abstime
);
187 abstime
= MIN(abstime
, 0xFFFF);
189 /* update deferred_ipi_timer_ns with the new clamped value */
190 absolutetime_to_nanoseconds(abstime
, &deferred_ipi_timer_ns
);
192 MSR(ARM64_REG_IPI_CR
, abstime
);
195 panic("Platform does not support ACC Fast IPI");
200 ml_cpu_signal_deferred_get_timer()
203 return deferred_ipi_timer_ns
;
209 #if !defined(HAS_IPI)
213 ml_cpu_signal_deferred(unsigned int cpu_mpidr __unused
)
216 ml_cpu_signal_type(cpu_mpidr
, ARM64_REG_IPI_RR_TYPE_DEFERRED
);
218 panic("Platform does not support ACC Fast IPI deferral");
222 #if !defined(HAS_IPI)
226 ml_cpu_signal_retract(unsigned int cpu_mpidr __unused
)
229 ml_cpu_signal_type(cpu_mpidr
, ARM64_REG_IPI_RR_TYPE_RETRACT
);
231 panic("Platform does not support ACC Fast IPI retraction");
238 __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF
| DAIFSC_FIQF
));
240 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF
| DAIFSC_FIQF
));
250 get_vfp_enabled(void)
256 OSSynchronizeIO(void)
258 __builtin_arm_dsb(DSB_SY
);
262 get_aux_control(void)
266 MRS(value
, "ACTLR_EL1");
271 get_mmu_control(void)
275 MRS(value
, "SCTLR_EL1");
284 MRS(value
, "TCR_EL1");
289 ml_get_interrupts_enabled(void)
294 if (value
& DAIF_IRQF
) {
305 MRS(value
, "TTBR0_EL1");
310 get_arm_cpu_version(void)
312 uint32_t value
= machine_read_midr();
314 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
315 return ((value
& MIDR_EL1_REV_MASK
) >> MIDR_EL1_REV_SHIFT
) | ((value
& MIDR_EL1_VAR_MASK
) >> (MIDR_EL1_VAR_SHIFT
- 4));
319 * user_cont_hwclock_allowed()
321 * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0)
322 * as a continuous time source (e.g. from mach_continuous_time)
325 user_cont_hwclock_allowed(void)
327 #if HAS_CONTINUOUS_HWCLOCK
336 user_timebase_type(void)
338 return USER_TIMEBASE_SPEC
;
342 arm64_wfe_allowed(void)
347 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
349 uint64_t rorgn_begin
__attribute__((section("__DATA, __const"))) = 0;
350 uint64_t rorgn_end
__attribute__((section("__DATA, __const"))) = 0;
351 vm_offset_t amcc_base
;
353 static void assert_unlocked(void);
354 static void assert_amcc_cache_disabled(void);
355 static void lock_amcc(void);
356 static void lock_mmu(uint64_t begin
, uint64_t end
);
359 rorgn_stash_range(void)
361 #if DEVELOPMENT || DEBUG
362 boolean_t rorgn_disable
= FALSE
;
364 PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable
, sizeof(rorgn_disable
));
367 /* take early out if boot arg present, don't query any machine registers to avoid
368 * dependency on amcc DT entry
374 /* Get the AMC values, and stash them into rorgn_begin, rorgn_end.
375 * gPhysBase is the base of DRAM managed by xnu. we need DRAM_BASE as
376 * the AMCC RO region begin/end registers are in units of 16KB page
377 * numbers from DRAM_BASE so we'll truncate gPhysBase at 512MB granule
378 * and assert the value is the canonical DRAM_BASE PA of 0x8_0000_0000 for arm64.
381 uint64_t dram_base
= gPhysBase
& ~0x1FFFFFFFULL
; /* 512MB */
382 assert(dram_base
== 0x800000000ULL
);
384 #if defined(KERNEL_INTEGRITY_KTRR)
385 uint64_t soc_base
= 0;
386 DTEntry entryP
= NULL
;
387 uintptr_t *reg_prop
= NULL
;
388 uint32_t prop_size
= 0;
391 soc_base
= pe_arm_get_soc_base_phys();
392 rc
= DTFindEntry("name", "mcc", &entryP
);
393 assert(rc
== kSuccess
);
394 rc
= DTGetProperty(entryP
, "reg", (void **)®_prop
, &prop_size
);
395 assert(rc
== kSuccess
);
396 amcc_base
= ml_io_map(soc_base
+ *reg_prop
, *(reg_prop
+ 1));
397 #elif defined(KERNEL_INTEGRITY_CTRR)
398 /* TODO: t8020 mcc entry not in device tree yet; we'll do it LIVE */
399 #define TEMP_AMCC_BASE_PA 0x200000000ULL
400 #define TEMP_AMCC_SZ 0x100000
401 amcc_base
= ml_io_map(TEMP_AMCC_BASE_PA
, TEMP_AMCC_SZ
);
403 #error "KERNEL_INTEGRITY config error"
406 #if defined(KERNEL_INTEGRITY_KTRR)
407 assert(rRORGNENDADDR
> rRORGNBASEADDR
);
408 rorgn_begin
= (rRORGNBASEADDR
<< AMCC_PGSHIFT
) + dram_base
;
409 rorgn_end
= (rRORGNENDADDR
<< AMCC_PGSHIFT
) + dram_base
;
410 #elif defined(KERNEL_INTEGRITY_CTRR)
411 rorgn_begin
= rCTRR_AMCC_PLANE_REG(0, CTRR_A_BASEADDR
);
412 rorgn_end
= rCTRR_AMCC_PLANE_REG(0, CTRR_A_ENDADDR
);
413 assert(rorgn_end
> rorgn_begin
);
415 for (int i
= 0; i
< CTRR_AMCC_MAX_PLANES
; ++i
) {
416 uint32_t begin
= rCTRR_AMCC_PLANE_REG(i
, CTRR_A_BASEADDR
);
417 uint32_t end
= rCTRR_AMCC_PLANE_REG(i
, CTRR_A_ENDADDR
);
418 if (!(begin
== rorgn_begin
&& end
== rorgn_end
)) {
419 #if DEVELOPMENT || DEBUG
420 panic("iboot programmed CTRR bounds are inconsistent");
422 panic("Inconsistent memory configuration");
427 // convert from page number from DRAM base to PA
428 rorgn_begin
= (rorgn_begin
<< AMCC_PGSHIFT
) + dram_base
;
429 rorgn_end
= (rorgn_end
<< AMCC_PGSHIFT
) + dram_base
;
432 #error KERNEL_INTEGRITY config error
433 #endif /* defined (KERNEL_INTEGRITY_KTRR) */
439 uint64_t ktrr_lock
= 0;
440 uint32_t rorgn_lock
= 0;
443 #if defined(KERNEL_INTEGRITY_KTRR)
444 rorgn_lock
= rRORGNLOCK
;
445 ktrr_lock
= __builtin_arm_rsr64(ARM64_REG_KTRR_LOCK_EL1
);
446 #elif defined(KERNEL_INTEGRITY_CTRR)
447 for (int i
= 0; i
< CTRR_AMCC_MAX_PLANES
; ++i
) {
448 rorgn_lock
|= rCTRR_AMCC_PLANE_REG(i
, CTRR_A_LOCK
);
450 ktrr_lock
= __builtin_arm_rsr64(ARM64_REG_CTRR_LOCK_EL1
);
452 #error KERNEL_INTEGRITY config error
453 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
462 #if defined(KERNEL_INTEGRITY_KTRR)
464 __builtin_arm_isb(ISB_SY
);
465 #elif defined(KERNEL_INTEGRITY_CTRR)
466 /* lockdown planes in reverse order as plane 0 should be locked last */
467 for (int i
= 0; i
< CTRR_AMCC_MAX_PLANES
; ++i
) {
468 rCTRR_AMCC_PLANE_REG(CTRR_AMCC_MAX_PLANES
- i
- 1, CTRR_A_ENABLE
) = 1;
469 rCTRR_AMCC_PLANE_REG(CTRR_AMCC_MAX_PLANES
- i
- 1, CTRR_A_LOCK
) = 1;
470 __builtin_arm_isb(ISB_SY
);
473 #error KERNEL_INTEGRITY config error
478 lock_mmu(uint64_t begin
, uint64_t end
)
480 #if defined(KERNEL_INTEGRITY_KTRR)
482 __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1
, begin
);
483 __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1
, end
);
484 __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1
, 1ULL);
488 __builtin_arm_isb(ISB_SY
);
491 #elif defined (KERNEL_INTEGRITY_CTRR)
492 /* this will lock the entire bootstrap cluster. non bootstrap clusters
493 * will be locked by respective cluster master in start.s */
495 __builtin_arm_wsr64(ARM64_REG_CTRR_A_LWR_EL1
, begin
);
496 __builtin_arm_wsr64(ARM64_REG_CTRR_A_UPR_EL1
, end
);
498 #if !defined(APPLEVORTEX)
499 /* H12 changed sequence, must invalidate TLB immediately after setting CTRR bounds */
500 __builtin_arm_isb(ISB_SY
); /* ensure all prior MSRs are complete */
502 #endif /* !defined(APPLEVORTEX) */
504 __builtin_arm_wsr64(ARM64_REG_CTRR_CTL_EL1
, CTRR_CTL_EL1_A_PXN
| CTRR_CTL_EL1_A_MMUON_WRPROTECT
);
505 __builtin_arm_wsr64(ARM64_REG_CTRR_LOCK_EL1
, 1ULL);
507 uint64_t current_el
= __builtin_arm_rsr64("CurrentEL");
508 if (current_el
== PSR64_MODE_EL2
) {
509 // CTRR v2 has explicit registers for cluster config. they can only be written in EL2
511 __builtin_arm_wsr64(ACC_CTRR_A_LWR_EL2
, begin
);
512 __builtin_arm_wsr64(ACC_CTRR_A_UPR_EL2
, end
);
513 __builtin_arm_wsr64(ACC_CTRR_CTL_EL2
, CTRR_CTL_EL1_A_PXN
| CTRR_CTL_EL1_A_MMUON_WRPROTECT
);
514 __builtin_arm_wsr64(ACC_CTRR_LOCK_EL2
, 1ULL);
517 __builtin_arm_isb(ISB_SY
); /* ensure all prior MSRs are complete */
518 #if defined(APPLEVORTEX)
520 #endif /* defined(APPLEVORTEX) */
522 #else /* defined(KERNEL_INTEGRITY_KTRR) */
523 #error KERNEL_INTEGRITY config error
524 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
528 assert_amcc_cache_disabled()
530 #if defined(KERNEL_INTEGRITY_KTRR)
531 assert((rMCCGEN
& 1) == 0); /* assert M$ disabled or LLC clean will be unreliable */
532 #elif defined(KERNEL_INTEGRITY_CTRR) && (defined(ARM64_BOARD_CONFIG_T8006))
534 * T8006 differentiates between data and tag ways being powered up, so
535 * make sure to check that both are zero on its single memory plane.
537 assert((rCTRR_AMCC_PLANE_REG(0, CTRR_AMCC_PWRONWAYCNTSTATUS
) &
538 (AMCC_CURTAGWAYCNT_MASK
| AMCC_CURDATWAYCNT_MASK
)) == 0);
539 #elif defined (KERNEL_INTEGRITY_CTRR)
540 for (int i
= 0; i
< CTRR_AMCC_MAX_PLANES
; ++i
) {
541 assert(rCTRR_AMCC_PLANE_REG(i
, CTRR_AMCC_WAYONCNT
) == 0);
544 #error KERNEL_INTEGRITY config error
549 * void rorgn_lockdown(void)
551 * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked
553 * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in
554 * start.s:start_cpu() for subsequent wake/resume of all cores
559 vm_offset_t ktrr_begin
, ktrr_end
;
560 unsigned long last_segsz
;
562 #if DEVELOPMENT || DEBUG
563 boolean_t ktrr_disable
= FALSE
;
565 PE_parse_boot_argn("-unsafe_kernel_text", &ktrr_disable
, sizeof(ktrr_disable
));
569 * take early out if boot arg present, since we may not have amcc DT entry present
570 * we can't assert that iboot hasn't programmed the RO region lockdown registers
574 #endif /* DEVELOPMENT || DEBUG */
578 /* [x] - Use final method of determining all kernel text range or expect crashes */
579 ktrr_begin
= segLOWEST
;
580 assert(ktrr_begin
&& gVirtBase
&& gPhysBase
);
582 ktrr_begin
= kvtophys(ktrr_begin
);
584 ktrr_end
= kvtophys(segLASTB
);
585 last_segsz
= segSizeLAST
;
586 #if defined(KERNEL_INTEGRITY_KTRR)
587 /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC KTRR region) */
588 ktrr_end
= (ktrr_end
- 1) & ~AMCC_PGMASK
;
589 /* ensure that iboot and xnu agree on the ktrr range */
590 assert(rorgn_begin
== ktrr_begin
&& rorgn_end
== (ktrr_end
+ last_segsz
));
591 /* assert that __LAST segment containing privileged insns is only a single page */
592 assert(last_segsz
== PAGE_SIZE
);
593 #elif defined(KERNEL_INTEGRITY_CTRR)
594 ktrr_end
= (ktrr_end
+ last_segsz
- 1) & ~AMCC_PGMASK
;
595 /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making
596 * __pinst no execute because PXN applies with MMU off in CTRR. */
597 assert(rorgn_begin
== ktrr_begin
&& rorgn_end
== ktrr_end
);
601 #if DEBUG || DEVELOPMENT
602 printf("KTRR Begin: %p End: %p, setting lockdown\n", (void *)ktrr_begin
, (void *)ktrr_end
);
605 /* [x] - ensure all in flight writes are flushed to AMCC before enabling RO Region Lock */
607 assert_amcc_cache_disabled();
609 CleanPoC_DcacheRegion_Force(phystokv(ktrr_begin
),
610 (unsigned)((ktrr_end
+ last_segsz
) - ktrr_begin
+ AMCC_PGMASK
));
614 lock_mmu(ktrr_begin
, ktrr_end
);
616 #if DEVELOPMENT || DEBUG
620 #if defined(KERNEL_INTEGRITY_CTRR)
622 /* wake any threads blocked on cluster master lockdown */
624 uint64_t mpidr_el1_value
;
627 MRS(mpidr_el1_value
, "MPIDR_EL1");
628 cdp
->cpu_cluster_id
= (mpidr_el1_value
& MPIDR_AFF1_MASK
) >> MPIDR_AFF1_SHIFT
;
629 assert(cdp
->cpu_cluster_id
< __ARM_CLUSTER_COUNT__
);
630 ctrr_cluster_locked
[cdp
->cpu_cluster_id
] = 1;
631 thread_wakeup(&ctrr_cluster_locked
[cdp
->cpu_cluster_id
]);
634 /* now we can run lockdown handler */
635 ml_lockdown_run_handler();
638 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
641 machine_startup(__unused boot_args
* args
)
645 #if defined(HAS_IPI) && (DEVELOPMENT || DEBUG)
646 if (!PE_parse_boot_argn("fastipi", &gFastIPI
, sizeof(gFastIPI
))) {
650 PE_parse_boot_argn("fastipitimeout", &deferred_ipi_timer_ns
, sizeof(deferred_ipi_timer_ns
));
651 #endif /* defined(HAS_IPI) && (DEVELOPMENT || DEBUG)*/
653 #if CONFIG_NONFATAL_ASSERTS
654 PE_parse_boot_argn("assert", &mach_assert
, sizeof(mach_assert
));
657 if (PE_parse_boot_argn("preempt", &boot_arg
, sizeof(boot_arg
))) {
658 default_preemption_rate
= boot_arg
;
660 if (PE_parse_boot_argn("bg_preempt", &boot_arg
, sizeof(boot_arg
))) {
661 default_bg_preemption_rate
= boot_arg
;
664 PE_parse_boot_argn("yield_delay_us", &yield_delay_us
, sizeof(yield_delay_us
));
669 * Kick off the kernel bootstrap.
676 machine_lockdown_preflight(void)
678 #if CONFIG_KERNEL_INTEGRITY
680 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
688 machine_lockdown(void)
690 #if CONFIG_KERNEL_INTEGRITY
691 #if KERNEL_INTEGRITY_WT
694 * Notify the monitor about the completion of early kernel bootstrap.
695 * From this point forward it will enforce the integrity of kernel text,
696 * rodata and page tables.
700 monitor_call(MONITOR_LOCKDOWN
, 0, 0, 0);
702 #endif /* KERNEL_INTEGRITY_WT */
708 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
711 * Lock physical KTRR region. KTRR region is read-only. Memory outside
712 * the region is not executable at EL1.
716 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
719 #endif /* CONFIG_KERNEL_INTEGRITY */
725 __unused vm_size_t size
)
727 return PE_boot_args();
734 * This is known to be inaccurate. mem_size should always be capped at 2 GB
736 machine_info
.memory_size
= (uint32_t)mem_size
;
744 is_clock_configured
= TRUE
;
751 slave_machine_init(__unused
void *param
)
753 cpu_machine_init(); /* Initialize the processor */
754 clock_init(); /* Init the clock */
758 * Routine: machine_processor_shutdown
762 machine_processor_shutdown(
763 __unused thread_t thread
,
764 void (*doshutdown
)(processor_t
),
765 processor_t processor
)
767 return Shutdown_context(doshutdown
, processor
);
771 * Routine: ml_init_max_cpus
775 ml_init_max_cpus(unsigned int max_cpus
)
777 boolean_t current_state
;
779 current_state
= ml_set_interrupts_enabled(FALSE
);
780 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
781 machine_info
.max_cpus
= max_cpus
;
782 machine_info
.physical_cpu_max
= max_cpus
;
783 machine_info
.logical_cpu_max
= max_cpus
;
784 if (max_cpus_initialized
== MAX_CPUS_WAIT
) {
785 thread_wakeup((event_t
) &max_cpus_initialized
);
787 max_cpus_initialized
= MAX_CPUS_SET
;
789 (void) ml_set_interrupts_enabled(current_state
);
793 * Routine: ml_get_max_cpus
797 ml_get_max_cpus(void)
799 boolean_t current_state
;
801 current_state
= ml_set_interrupts_enabled(FALSE
);
802 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
803 max_cpus_initialized
= MAX_CPUS_WAIT
;
804 assert_wait((event_t
) &max_cpus_initialized
, THREAD_UNINT
);
805 (void) thread_block(THREAD_CONTINUE_NULL
);
807 (void) ml_set_interrupts_enabled(current_state
);
808 return machine_info
.max_cpus
;
812 * Routine: ml_init_lock_timeout
816 ml_init_lock_timeout(void)
820 uint64_t default_timeout_ns
= NSEC_PER_SEC
>> 2;
823 if (PE_parse_boot_argn("slto_us", &slto
, sizeof(slto
))) {
824 default_timeout_ns
= slto
* NSEC_PER_USEC
;
827 nanoseconds_to_absolutetime(default_timeout_ns
, &abstime
);
828 LockTimeOutUsec
= (uint32_t) (default_timeout_ns
/ NSEC_PER_USEC
);
829 LockTimeOut
= (uint32_t)abstime
;
831 if (PE_parse_boot_argn("tlto_us", &slto
, sizeof(slto
))) {
832 nanoseconds_to_absolutetime(slto
* NSEC_PER_USEC
, &abstime
);
833 TLockTimeOut
= abstime
;
835 TLockTimeOut
= LockTimeOut
>> 1;
838 if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof(mtxspin
))) {
839 if (mtxspin
> USEC_PER_SEC
>> 4) {
840 mtxspin
= USEC_PER_SEC
>> 4;
842 nanoseconds_to_absolutetime(mtxspin
* NSEC_PER_USEC
, &abstime
);
844 nanoseconds_to_absolutetime(10 * NSEC_PER_USEC
, &abstime
);
847 low_MutexSpin
= MutexSpin
;
849 * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
850 * real_ncpus is not set at this time
852 * NOTE: active spinning is disabled in arm. It can be activated
853 * by setting high_MutexSpin through the sysctl.
855 high_MutexSpin
= low_MutexSpin
;
859 * This is called from the machine-independent routine cpu_up()
860 * to perform machine-dependent info updates.
865 os_atomic_inc(&machine_info
.physical_cpu
, relaxed
);
866 os_atomic_inc(&machine_info
.logical_cpu
, relaxed
);
870 * This is called from the machine-independent routine cpu_down()
871 * to perform machine-dependent info updates.
876 cpu_data_t
*cpu_data_ptr
;
878 os_atomic_dec(&machine_info
.physical_cpu
, relaxed
);
879 os_atomic_dec(&machine_info
.logical_cpu
, relaxed
);
882 * If we want to deal with outstanding IPIs, we need to
883 * do relatively early in the processor_doshutdown path,
884 * as we pend decrementer interrupts using the IPI
885 * mechanism if we cannot immediately service them (if
886 * IRQ is masked). Do so now.
888 * We aren't on the interrupt stack here; would it make
889 * more sense to disable signaling and then enable
890 * interrupts? It might be a bit cleaner.
892 cpu_data_ptr
= getCpuDatap();
893 cpu_data_ptr
->cpu_running
= FALSE
;
895 if (cpu_data_ptr
!= &BootCpuData
) {
897 * Move all of this cpu's timers to the master/boot cpu,
898 * and poke it in case there's a sooner deadline for it to schedule.
900 timer_queue_shutdown(&cpu_data_ptr
->rtclock_timer
.queue
);
901 cpu_xcall(BootCpuData
.cpu_number
, &timer_queue_expire_local
, NULL
);
904 cpu_signal_handler_internal(TRUE
);
908 * Routine: ml_cpu_get_info
912 ml_cpu_get_info(ml_cpu_info_t
* ml_cpu_info
)
914 cache_info_t
*cpuid_cache_info
;
916 cpuid_cache_info
= cache_info();
917 ml_cpu_info
->vector_unit
= 0;
918 ml_cpu_info
->cache_line_size
= cpuid_cache_info
->c_linesz
;
919 ml_cpu_info
->l1_icache_size
= cpuid_cache_info
->c_isize
;
920 ml_cpu_info
->l1_dcache_size
= cpuid_cache_info
->c_dsize
;
922 #if (__ARM_ARCH__ >= 7)
923 ml_cpu_info
->l2_settings
= 1;
924 ml_cpu_info
->l2_cache_size
= cpuid_cache_info
->c_l2size
;
926 ml_cpu_info
->l2_settings
= 0;
927 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
929 ml_cpu_info
->l3_settings
= 0;
930 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
934 ml_get_machine_mem(void)
936 return machine_info
.memory_size
;
939 __attribute__((noreturn
))
941 halt_all_cpus(boolean_t reboot
)
944 printf("MACH Reboot\n");
945 PEHaltRestart(kPERestartCPU
);
947 printf("CPU halted\n");
948 PEHaltRestart(kPEHaltCPU
);
955 __attribute__((noreturn
))
959 halt_all_cpus(FALSE
);
963 * Routine: machine_signal_idle
968 processor_t processor
)
970 cpu_signal(processor_to_cpu_datap(processor
), SIGPnop
, (void *)NULL
, (void *)NULL
);
971 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
975 machine_signal_idle_deferred(
976 processor_t processor
)
978 cpu_signal_deferred(processor_to_cpu_datap(processor
));
979 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_DEFERRED_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
983 machine_signal_idle_cancel(
984 processor_t processor
)
986 cpu_signal_cancel(processor_to_cpu_datap(processor
));
987 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_CANCEL_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
991 * Routine: ml_install_interrupt_handler
992 * Function: Initialize Interrupt Handler
995 ml_install_interrupt_handler(
999 IOInterruptHandler handler
,
1002 cpu_data_t
*cpu_data_ptr
;
1003 boolean_t current_state
;
1005 current_state
= ml_set_interrupts_enabled(FALSE
);
1006 cpu_data_ptr
= getCpuDatap();
1008 cpu_data_ptr
->interrupt_nub
= nub
;
1009 cpu_data_ptr
->interrupt_source
= source
;
1010 cpu_data_ptr
->interrupt_target
= target
;
1011 cpu_data_ptr
->interrupt_handler
= handler
;
1012 cpu_data_ptr
->interrupt_refCon
= refCon
;
1014 cpu_data_ptr
->interrupts_enabled
= TRUE
;
1015 (void) ml_set_interrupts_enabled(current_state
);
1017 initialize_screen(NULL
, kPEAcquireScreen
);
1021 * Routine: ml_init_interrupt
1022 * Function: Initialize Interrupts
1025 ml_init_interrupt(void)
1027 #if defined(HAS_IPI)
1029 * ml_init_interrupt will get called once for each CPU, but this is redundant
1030 * because there is only one global copy of the register for skye. do it only
1031 * on the bootstrap cpu
1033 if (getCpuDatap()->cluster_master
) {
1034 ml_cpu_signal_deferred_adjust_timer(deferred_ipi_timer_ns
);
1040 * Routine: ml_init_timebase
1041 * Function: register and setup Timebase, Decremeter services
1046 tbd_ops_t tbd_funcs
,
1047 vm_offset_t int_address
,
1048 vm_offset_t int_value __unused
)
1050 cpu_data_t
*cpu_data_ptr
;
1052 cpu_data_ptr
= (cpu_data_t
*)args
;
1054 if ((cpu_data_ptr
== &BootCpuData
)
1055 && (rtclock_timebase_func
.tbd_fiq_handler
== (void *)NULL
)) {
1056 rtclock_timebase_func
= *tbd_funcs
;
1057 rtclock_timebase_addr
= int_address
;
1062 ml_parse_cpu_topology(void)
1064 DTEntry entry
, child __unused
;
1065 OpaqueDTEntryIterator iter
;
1066 uint32_t cpu_boot_arg
;
1069 cpu_boot_arg
= MAX_CPUS
;
1071 PE_parse_boot_argn("cpus", &cpu_boot_arg
, sizeof(cpu_boot_arg
));
1073 err
= DTLookupEntry(NULL
, "/cpus", &entry
);
1074 assert(err
== kSuccess
);
1076 err
= DTInitEntryIterator(entry
, &iter
);
1077 assert(err
== kSuccess
);
1079 while (kSuccess
== DTIterateEntries(&iter
, &child
)) {
1080 unsigned int propSize
;
1082 int cpu_id
= avail_cpus
++;
1084 if (kSuccess
== DTGetProperty(child
, "cpu-id", &prop
, &propSize
)) {
1085 cpu_id
= *((int32_t*)prop
);
1088 assert(cpu_id
< MAX_CPUS
);
1089 assert(cpu_phys_ids
[cpu_id
] == (uint32_t)-1);
1091 if (boot_cpu
== -1) {
1092 if (kSuccess
!= DTGetProperty(child
, "state", &prop
, &propSize
)) {
1093 panic("unable to retrieve state for cpu %d", cpu_id
);
1096 if (strncmp((char*)prop
, "running", propSize
) == 0) {
1100 if (kSuccess
!= DTGetProperty(child
, "reg", &prop
, &propSize
)) {
1101 panic("unable to retrieve physical ID for cpu %d", cpu_id
);
1104 cpu_phys_ids
[cpu_id
] = *((uint32_t*)prop
);
1106 if ((cpu_id
> max_cpu_number
) && ((cpu_id
== boot_cpu
) || (avail_cpus
<= cpu_boot_arg
))) {
1107 max_cpu_number
= cpu_id
;
1111 if (avail_cpus
> cpu_boot_arg
) {
1112 avail_cpus
= cpu_boot_arg
;
1115 if (avail_cpus
== 0) {
1116 panic("No cpus found!");
1119 if (boot_cpu
== -1) {
1120 panic("unable to determine boot cpu!");
1124 * Set TPIDRRO_EL0 to indicate the correct cpu number, as we may
1125 * not be booting from cpu 0. Userspace will consume the current
1126 * CPU number through this register. For non-boot cores, this is
1127 * done in start.s (start_cpu) using the cpu_number field of the
1128 * per-cpu data object.
1130 assert(__builtin_arm_rsr64("TPIDRRO_EL0") == 0);
1131 __builtin_arm_wsr64("TPIDRRO_EL0", (uint64_t)boot_cpu
);
1135 ml_get_cpu_count(void)
1141 ml_get_boot_cpu_number(void)
1147 ml_get_boot_cluster(void)
1149 return boot_cluster
;
1153 ml_get_cpu_number(uint32_t phys_id
)
1155 for (int log_id
= 0; log_id
<= ml_get_max_cpu_number(); ++log_id
) {
1156 if (cpu_phys_ids
[log_id
] == phys_id
) {
1164 ml_get_max_cpu_number(void)
1166 return max_cpu_number
;
1173 lockdown_handler_grp
= lck_grp_alloc_init("lockdown_handler", NULL
);
1174 assert(lockdown_handler_grp
!= NULL
);
1176 lck_mtx_init(&lockdown_handler_lck
, lockdown_handler_grp
, NULL
);
1178 #if defined(KERNEL_INTEGRITY_CTRR)
1179 init_ctrr_cpu_start_lock();
1184 ml_lockdown_handler_register(lockdown_handler_t f
, void *this)
1186 if (lockdown_handler
|| !f
) {
1187 return KERN_FAILURE
;
1190 lck_mtx_lock(&lockdown_handler_lck
);
1191 lockdown_handler
= f
;
1192 lockdown_this
= this;
1194 #if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR))
1196 lockdown_handler(this);
1198 if (lockdown_done
) {
1199 lockdown_handler(this);
1202 lck_mtx_unlock(&lockdown_handler_lck
);
1204 return KERN_SUCCESS
;
1208 ml_lockdown_run_handler()
1210 lck_mtx_lock(&lockdown_handler_lck
);
1211 assert(!lockdown_done
);
1214 if (lockdown_handler
) {
1215 lockdown_handler(lockdown_this
);
1217 lck_mtx_unlock(&lockdown_handler_lck
);
1221 ml_processor_register(ml_processor_info_t
*in_processor_info
,
1222 processor_t
*processor_out
, ipi_handler_t
*ipi_handler_out
,
1223 perfmon_interrupt_handler_func
*pmi_handler_out
)
1225 cpu_data_t
*this_cpu_datap
;
1226 processor_set_t pset
;
1227 boolean_t is_boot_cpu
;
1228 static unsigned int reg_cpu_count
= 0;
1230 if (in_processor_info
->log_id
> (uint32_t)ml_get_max_cpu_number()) {
1231 return KERN_FAILURE
;
1234 if ((unsigned int)OSIncrementAtomic((SInt32
*)®_cpu_count
) >= avail_cpus
) {
1235 return KERN_FAILURE
;
1238 if (in_processor_info
->log_id
!= (uint32_t)ml_get_boot_cpu_number()) {
1239 is_boot_cpu
= FALSE
;
1240 this_cpu_datap
= cpu_data_alloc(FALSE
);
1241 cpu_data_init(this_cpu_datap
);
1243 this_cpu_datap
= &BootCpuData
;
1247 assert(in_processor_info
->log_id
< MAX_CPUS
);
1249 this_cpu_datap
->cpu_id
= in_processor_info
->cpu_id
;
1251 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(is_boot_cpu
);
1252 if (this_cpu_datap
->cpu_console_buf
== (void *)(NULL
)) {
1253 goto processor_register_error
;
1257 this_cpu_datap
->cpu_number
= in_processor_info
->log_id
;
1259 if (cpu_data_register(this_cpu_datap
) != KERN_SUCCESS
) {
1260 goto processor_register_error
;
1264 this_cpu_datap
->cpu_idle_notify
= (void *) in_processor_info
->processor_idle
;
1265 this_cpu_datap
->cpu_cache_dispatch
= in_processor_info
->platform_cache_dispatch
;
1266 nanoseconds_to_absolutetime((uint64_t) in_processor_info
->powergate_latency
, &this_cpu_datap
->cpu_idle_latency
);
1267 this_cpu_datap
->cpu_reset_assist
= kvtophys(in_processor_info
->powergate_stub_addr
);
1269 this_cpu_datap
->idle_timer_notify
= (void *) in_processor_info
->idle_timer
;
1270 this_cpu_datap
->idle_timer_refcon
= in_processor_info
->idle_timer_refcon
;
1272 this_cpu_datap
->platform_error_handler
= (void *) in_processor_info
->platform_error_handler
;
1273 this_cpu_datap
->cpu_regmap_paddr
= in_processor_info
->regmap_paddr
;
1274 this_cpu_datap
->cpu_phys_id
= in_processor_info
->phys_id
;
1275 this_cpu_datap
->cpu_l2_access_penalty
= in_processor_info
->l2_access_penalty
;
1277 this_cpu_datap
->cpu_cluster_type
= in_processor_info
->cluster_type
;
1278 this_cpu_datap
->cpu_cluster_id
= in_processor_info
->cluster_id
;
1279 this_cpu_datap
->cpu_l2_id
= in_processor_info
->l2_cache_id
;
1280 this_cpu_datap
->cpu_l2_size
= in_processor_info
->l2_cache_size
;
1281 this_cpu_datap
->cpu_l3_id
= in_processor_info
->l3_cache_id
;
1282 this_cpu_datap
->cpu_l3_size
= in_processor_info
->l3_cache_size
;
1285 this_cpu_datap
->cluster_master
= !OSTestAndSet(this_cpu_datap
->cpu_cluster_id
, &cluster_initialized
);
1286 #else /* HAS_CLUSTER */
1287 this_cpu_datap
->cluster_master
= is_boot_cpu
;
1288 #endif /* HAS_CLUSTER */
1290 pset
= pset_find(in_processor_info
->cluster_id
, processor_pset(master_processor
));
1291 assert(pset
!= NULL
);
1292 kprintf("%s>cpu_id %p cluster_id %d cpu_number %d is type %d\n", __FUNCTION__
, in_processor_info
->cpu_id
, in_processor_info
->cluster_id
, this_cpu_datap
->cpu_number
, in_processor_info
->cluster_type
);
1295 processor_init((struct processor
*)this_cpu_datap
->cpu_processor
,
1296 this_cpu_datap
->cpu_number
, pset
);
1298 if (this_cpu_datap
->cpu_l2_access_penalty
) {
1300 * Cores that have a non-zero L2 access penalty compared
1301 * to the boot processor should be de-prioritized by the
1302 * scheduler, so that threads use the cores with better L2
1305 processor_set_primary(this_cpu_datap
->cpu_processor
,
1310 *processor_out
= this_cpu_datap
->cpu_processor
;
1311 *ipi_handler_out
= cpu_signal_handler
;
1312 #if CPMU_AIC_PMI && MONOTONIC
1313 *pmi_handler_out
= mt_cpmu_aic_pmi
;
1315 *pmi_handler_out
= NULL
;
1316 #endif /* CPMU_AIC_PMI && MONOTONIC */
1317 if (in_processor_info
->idle_tickle
!= (idle_tickle_t
*) NULL
) {
1318 *in_processor_info
->idle_tickle
= (idle_tickle_t
) cpu_idle_tickle
;
1322 if (kpc_register_cpu(this_cpu_datap
) != TRUE
) {
1323 goto processor_register_error
;
1328 random_cpu_init(this_cpu_datap
->cpu_number
);
1329 // now let next CPU register itself
1330 OSIncrementAtomic((SInt32
*)&real_ncpus
);
1333 return KERN_SUCCESS
;
1335 processor_register_error
:
1337 kpc_unregister_cpu(this_cpu_datap
);
1340 cpu_data_free(this_cpu_datap
);
1343 return KERN_FAILURE
;
1347 ml_init_arm_debug_interface(
1348 void * in_cpu_datap
,
1349 vm_offset_t virt_address
)
1351 ((cpu_data_t
*)in_cpu_datap
)->cpu_debug_interface_map
= virt_address
;
1356 * Routine: init_ast_check
1361 __unused processor_t processor
)
1366 * Routine: cause_ast_check
1371 processor_t processor
)
1373 if (current_processor() != processor
) {
1374 cpu_signal(processor_to_cpu_datap(processor
), SIGPast
, (void *)NULL
, (void *)NULL
);
1375 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 1 /* ast */, 0, 0, 0);
1379 extern uint32_t cpu_idle_count
;
1382 ml_get_power_state(boolean_t
*icp
, boolean_t
*pidlep
)
1384 *icp
= ml_at_interrupt_context();
1385 *pidlep
= (cpu_idle_count
== real_ncpus
);
1389 * Routine: ml_cause_interrupt
1390 * Function: Generate a fake interrupt
1393 ml_cause_interrupt(void)
1395 return; /* BS_XXX */
1398 /* Map memory map IO space */
1401 vm_offset_t phys_addr
,
1404 return io_map(phys_addr
, size
, VM_WIMG_IO
);
1407 /* Map memory map IO space (with protections specified) */
1409 ml_io_map_with_prot(
1410 vm_offset_t phys_addr
,
1414 return io_map_with_prot(phys_addr
, size
, VM_WIMG_IO
, prot
);
1419 vm_offset_t phys_addr
,
1422 return io_map(phys_addr
, size
, VM_WIMG_WCOMB
);
1425 /* boot memory allocation */
1428 __unused vm_size_t size
)
1430 return (vm_offset_t
) NULL
;
1435 vm_offset_t phys_addr
,
1438 return pmap_map_high_window_bd(phys_addr
, len
, VM_PROT_READ
| VM_PROT_WRITE
);
1445 return phystokv(paddr
);
1452 return phystokv(vaddr
+ vm_kernel_slide
- gVirtBase
+ gPhysBase
);
1459 return ml_static_vtop(vaddr
) - gPhysBase
+ gVirtBase
- vm_kernel_slide
;
1462 extern tt_entry_t
*arm_kva_to_tte(vm_offset_t va
);
1466 vm_offset_t vaddr
, /* kernel virtual address */
1470 pt_entry_t arm_prot
= 0;
1471 pt_entry_t arm_block_prot
= 0;
1472 vm_offset_t vaddr_cur
;
1474 kern_return_t result
= KERN_SUCCESS
;
1476 if (vaddr
< VM_MIN_KERNEL_ADDRESS
) {
1477 panic("ml_static_protect(): %p < %p", (void *) vaddr
, (void *) VM_MIN_KERNEL_ADDRESS
);
1478 return KERN_FAILURE
;
1481 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
1483 if ((new_prot
& VM_PROT_WRITE
) && (new_prot
& VM_PROT_EXECUTE
)) {
1484 panic("ml_static_protect(): WX request on %p", (void *) vaddr
);
1487 /* Set up the protection bits, and block bits so we can validate block mappings. */
1488 if (new_prot
& VM_PROT_WRITE
) {
1489 arm_prot
|= ARM_PTE_AP(AP_RWNA
);
1490 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RWNA
);
1492 arm_prot
|= ARM_PTE_AP(AP_RONA
);
1493 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RONA
);
1496 arm_prot
|= ARM_PTE_NX
;
1497 arm_block_prot
|= ARM_TTE_BLOCK_NX
;
1499 if (!(new_prot
& VM_PROT_EXECUTE
)) {
1500 arm_prot
|= ARM_PTE_PNX
;
1501 arm_block_prot
|= ARM_TTE_BLOCK_PNX
;
1504 for (vaddr_cur
= vaddr
;
1505 vaddr_cur
< trunc_page_64(vaddr
+ size
);
1506 vaddr_cur
+= PAGE_SIZE
) {
1507 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
1508 if (ppn
!= (vm_offset_t
) NULL
) {
1514 assert(!TEST_PAGE_RATIO_4
);
1515 assert(!pmap_is_monitor(ppn
));
1518 tte2
= arm_kva_to_tte(vaddr_cur
);
1520 if (((*tte2
) & ARM_TTE_TYPE_MASK
) != ARM_TTE_TYPE_TABLE
) {
1521 if ((((*tte2
) & ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_BLOCK
) &&
1522 ((*tte2
& (ARM_TTE_BLOCK_NXMASK
| ARM_TTE_BLOCK_PNXMASK
| ARM_TTE_BLOCK_APMASK
)) == arm_block_prot
)) {
1524 * We can support ml_static_protect on a block mapping if the mapping already has
1525 * the desired protections. We still want to run checks on a per-page basis.
1530 result
= KERN_FAILURE
;
1534 pte_p
= (pt_entry_t
*)&((tt_entry_t
*)(phystokv((*tte2
) & ARM_TTE_TABLE_MASK
)))[(((vaddr_cur
) & ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
)];
1537 if ((ptmp
& ARM_PTE_HINT_MASK
) && ((ptmp
& (ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) != arm_prot
)) {
1539 * The contiguous hint is similar to a block mapping for ml_static_protect; if the existing
1540 * protections do not match the desired protections, then we will fail (as we cannot update
1541 * this mapping without updating other mappings as well).
1543 result
= KERN_FAILURE
;
1547 __unreachable_ok_push
1548 if (TEST_PAGE_RATIO_4
) {
1551 pt_entry_t
*ptep_iter
;
1554 for (i
= 0; i
< 4; i
++, ptep_iter
++) {
1555 /* Note that there is a hole in the HINT sanity checking here. */
1558 /* We only need to update the page tables if the protections do not match. */
1559 if ((ptmp
& (ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) != arm_prot
) {
1560 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) | arm_prot
;
1568 /* We only need to update the page tables if the protections do not match. */
1569 if ((ptmp
& (ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) != arm_prot
) {
1570 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) | arm_prot
;
1574 __unreachable_ok_pop
1578 if (vaddr_cur
> vaddr
) {
1579 assert(((vaddr_cur
- vaddr
) & 0xFFFFFFFF00000000ULL
) == 0);
1580 flush_mmu_tlb_region(vaddr
, (uint32_t)(vaddr_cur
- vaddr
));
1588 * Routine: ml_static_mfree
1596 vm_offset_t vaddr_cur
;
1598 uint32_t freed_pages
= 0;
1600 /* It is acceptable (if bad) to fail to free. */
1601 if (vaddr
< VM_MIN_KERNEL_ADDRESS
) {
1605 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
1607 for (vaddr_cur
= vaddr
;
1608 vaddr_cur
< trunc_page_64(vaddr
+ size
);
1609 vaddr_cur
+= PAGE_SIZE
) {
1610 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
1611 if (ppn
!= (vm_offset_t
) NULL
) {
1613 * It is not acceptable to fail to update the protections on a page
1614 * we will release to the VM. We need to either panic or continue.
1615 * For now, we'll panic (to help flag if there is memory we can
1618 if (ml_static_protect(vaddr_cur
, PAGE_SIZE
, VM_PROT_WRITE
| VM_PROT_READ
) != KERN_SUCCESS
) {
1619 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur
);
1624 * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
1625 * relies on the persistence of these mappings for all time.
1627 // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
1630 vm_page_create(ppn
, (ppn
+ 1));
1634 vm_page_lockspin_queues();
1635 vm_page_wire_count
-= freed_pages
;
1636 vm_page_wire_count_initial
-= freed_pages
;
1637 vm_page_unlock_queues();
1639 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages
, (void *)vaddr
, (uint64_t)size
, ppn
);
1644 /* virtual to physical on wired pages */
1646 ml_vtophys(vm_offset_t vaddr
)
1648 return kvtophys(vaddr
);
1652 * Routine: ml_nofault_copy
1653 * Function: Perform a physical mode copy if the source and destination have
1654 * valid translations in the kernel pmap. If translations are present, they are
1655 * assumed to be wired; e.g., no attempt is made to guarantee that the
1656 * translations obtained remain valid for the duration of the copy process.
1659 ml_nofault_copy(vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
1661 addr64_t cur_phys_dst
, cur_phys_src
;
1662 vm_size_t count
, nbytes
= 0;
1665 if (!(cur_phys_src
= kvtophys(virtsrc
))) {
1668 if (!(cur_phys_dst
= kvtophys(virtdst
))) {
1671 if (!pmap_valid_address(trunc_page_64(cur_phys_dst
)) ||
1672 !pmap_valid_address(trunc_page_64(cur_phys_src
))) {
1675 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
1676 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
))) {
1677 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
1683 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
1695 * Routine: ml_validate_nofault
1696 * Function: Validate that ths address range has a valid translations
1697 * in the kernel pmap. If translations are present, they are
1698 * assumed to be wired; i.e. no attempt is made to guarantee
1699 * that the translation persist after the check.
1700 * Returns: TRUE if the range is mapped and will not cause a fault,
1705 ml_validate_nofault(
1706 vm_offset_t virtsrc
, vm_size_t size
)
1708 addr64_t cur_phys_src
;
1712 if (!(cur_phys_src
= kvtophys(virtsrc
))) {
1715 if (!pmap_valid_address(trunc_page_64(cur_phys_src
))) {
1718 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
1720 count
= (uint32_t)size
;
1731 ml_get_bouncepool_info(vm_offset_t
* phys_addr
, vm_size_t
* size
)
1738 active_rt_threads(__unused boolean_t active
)
1743 cpu_qos_cb_default(__unused
int urgency
, __unused
uint64_t qos_param1
, __unused
uint64_t qos_param2
)
1748 cpu_qos_update_t cpu_qos_update
= cpu_qos_cb_default
;
1751 cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb
)
1753 if (cpu_qos_cb
!= NULL
) {
1754 cpu_qos_update
= cpu_qos_cb
;
1756 cpu_qos_update
= cpu_qos_cb_default
;
1761 thread_tell_urgency(thread_urgency_t urgency
, uint64_t rt_period
, uint64_t rt_deadline
, uint64_t sched_latency __unused
, __unused thread_t nthread
)
1763 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_URGENCY
) | DBG_FUNC_START
, urgency
, rt_period
, rt_deadline
, sched_latency
, 0);
1765 cpu_qos_update((int)urgency
, rt_period
, rt_deadline
);
1767 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_URGENCY
) | DBG_FUNC_END
, urgency
, rt_period
, rt_deadline
, 0, 0);
1771 machine_run_count(__unused
uint32_t count
)
1776 machine_choose_processor(__unused processor_set_t pset
, processor_t processor
)
1782 vm_offset_t
ml_stack_base(void);
1783 vm_size_t
ml_stack_size(void);
1788 uintptr_t local
= (uintptr_t) &local
;
1789 vm_offset_t intstack_top_ptr
;
1791 intstack_top_ptr
= getCpuDatap()->intstack_top
;
1792 if ((local
< intstack_top_ptr
) && (local
> intstack_top_ptr
- INTSTACK_SIZE
)) {
1793 return intstack_top_ptr
- INTSTACK_SIZE
;
1795 return current_thread()->kernel_stack
;
1801 uintptr_t local
= (uintptr_t) &local
;
1802 vm_offset_t intstack_top_ptr
;
1804 intstack_top_ptr
= getCpuDatap()->intstack_top
;
1805 if ((local
< intstack_top_ptr
) && (local
> intstack_top_ptr
- INTSTACK_SIZE
)) {
1806 return INTSTACK_SIZE
;
1808 return kernel_stack_size
;
1814 machine_timeout_suspended(void)
1820 ml_interrupt_prewarm(__unused
uint64_t deadline
)
1822 return KERN_FAILURE
;
1826 * Assumes fiq, irq disabled.
1829 ml_set_decrementer(uint32_t dec_value
)
1831 cpu_data_t
*cdp
= getCpuDatap();
1833 assert(ml_get_interrupts_enabled() == FALSE
);
1834 cdp
->cpu_decrementer
= dec_value
;
1836 if (cdp
->cpu_set_decrementer_func
) {
1837 ((void (*)(uint32_t))cdp
->cpu_set_decrementer_func
)(dec_value
);
1839 __asm__
volatile ("msr CNTP_TVAL_EL0, %0" : : "r"((uint64_t)dec_value
));
1848 // ISB required by ARMV7C.b section B8.1.2 & ARMv8 section D6.1.2
1849 // "Reads of CNTPCT[_EL0] can occur speculatively and out of order relative
1850 // to other instructions executed on the same processor."
1851 __builtin_arm_isb(ISB_SY
);
1852 timebase
= __builtin_arm_rsr64("CNTPCT_EL0");
1860 return ml_get_hwclock() + getCpuDatap()->cpu_base_timebase
;
1864 ml_get_decrementer()
1866 cpu_data_t
*cdp
= getCpuDatap();
1869 assert(ml_get_interrupts_enabled() == FALSE
);
1871 if (cdp
->cpu_get_decrementer_func
) {
1872 dec
= ((uint32_t (*)(void))cdp
->cpu_get_decrementer_func
)();
1876 __asm__
volatile ("mrs %0, CNTP_TVAL_EL0" : "=r"(wide_val
));
1877 dec
= (uint32_t)wide_val
;
1878 assert(wide_val
== (uint64_t)dec
);
1885 ml_get_timer_pending()
1889 __asm__
volatile ("mrs %0, CNTP_CTL_EL0" : "=r"(cntp_ctl
));
1890 return ((cntp_ctl
& CNTP_CTL_EL0_ISTATUS
) != 0) ? TRUE
: FALSE
;
1894 ml_wants_panic_trap_to_debugger(void)
1896 boolean_t result
= FALSE
;
1899 * This looks racey, but if we are in the PPL, preemption will be
1902 result
= ((pmap_get_cpu_data()->ppl_state
== PPL_STATE_DISPATCH
) && pmap_ppl_locked_down
);
1908 cache_trap_error(thread_t thread
, vm_map_address_t fault_addr
)
1910 mach_exception_data_type_t exc_data
[2];
1911 arm_saved_state_t
*regs
= get_user_regs(thread
);
1913 set_saved_state_far(regs
, fault_addr
);
1915 exc_data
[0] = KERN_INVALID_ADDRESS
;
1916 exc_data
[1] = fault_addr
;
1918 exception_triage(EXC_BAD_ACCESS
, exc_data
, 2);
1922 cache_trap_recover()
1924 vm_map_address_t fault_addr
;
1926 __asm__
volatile ("mrs %0, FAR_EL1" : "=r"(fault_addr
));
1928 cache_trap_error(current_thread(), fault_addr
);
1932 set_cache_trap_recover(thread_t thread
)
1934 #if defined(HAS_APPLE_PAC)
1935 thread
->recover
= (vm_address_t
)ptrauth_auth_and_resign(&cache_trap_recover
,
1936 ptrauth_key_function_pointer
, 0,
1937 ptrauth_key_function_pointer
, ptrauth_blend_discriminator(&thread
->recover
, PAC_DISCRIMINATOR_RECOVER
));
1938 #else /* defined(HAS_APPLE_PAC) */
1939 thread
->recover
= (vm_address_t
)cache_trap_recover
;
1940 #endif /* defined(HAS_APPLE_PAC) */
1944 dcache_flush_trap(vm_map_address_t start
, vm_map_size_t size
)
1946 vm_map_address_t end
= start
+ size
;
1947 thread_t thread
= current_thread();
1948 vm_offset_t old_recover
= thread
->recover
;
1951 if (task_has_64Bit_addr(current_task())) {
1952 if (end
> MACH_VM_MAX_ADDRESS
) {
1953 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1956 if (end
> VM_MAX_ADDRESS
) {
1957 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1962 cache_trap_error(thread
, start
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1965 set_cache_trap_recover(thread
);
1968 * We're coherent on Apple ARM64 CPUs, so this could be a nop. However,
1969 * if the region given us is bad, it would be good to catch it and
1970 * crash, ergo we still do the flush.
1972 FlushPoC_DcacheRegion(start
, (uint32_t)size
);
1974 /* Restore recovery function */
1975 thread
->recover
= old_recover
;
1977 /* Return (caller does exception return) */
1981 icache_invalidate_trap(vm_map_address_t start
, vm_map_size_t size
)
1983 vm_map_address_t end
= start
+ size
;
1984 thread_t thread
= current_thread();
1985 vm_offset_t old_recover
= thread
->recover
;
1988 if (task_has_64Bit_addr(current_task())) {
1989 if (end
> MACH_VM_MAX_ADDRESS
) {
1990 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1993 if (end
> VM_MAX_ADDRESS
) {
1994 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1999 cache_trap_error(thread
, start
& ((1 << ARM64_CLINE_SHIFT
) - 1));
2002 set_cache_trap_recover(thread
);
2004 /* Invalidate iCache to point of unification */
2005 InvalidatePoU_IcacheRegion(start
, (uint32_t)size
);
2007 /* Restore recovery function */
2008 thread
->recover
= old_recover
;
2010 /* Return (caller does exception return) */
2013 __attribute__((noreturn
))
2015 platform_syscall(arm_saved_state_t
*state
)
2019 #define platform_syscall_kprintf(x...) /* kprintf("platform_syscall: " x) */
2021 code
= (uint32_t)get_saved_state_reg(state
, 3);
2025 platform_syscall_kprintf("icache flush requested.\n");
2026 icache_invalidate_trap(get_saved_state_reg(state
, 0), get_saved_state_reg(state
, 1));
2030 platform_syscall_kprintf("dcache flush requested.\n");
2031 dcache_flush_trap(get_saved_state_reg(state
, 0), get_saved_state_reg(state
, 1));
2035 platform_syscall_kprintf("set cthread self.\n");
2036 thread_set_cthread_self(get_saved_state_reg(state
, 0));
2040 platform_syscall_kprintf("get cthread self.\n");
2041 set_saved_state_reg(state
, 0, thread_get_cthread_self());
2044 platform_syscall_kprintf("unknown: %d\n", code
);
2048 thread_exception_return();
2052 _enable_timebase_event_stream(uint32_t bit_index
)
2054 uint64_t cntkctl
; /* One wants to use 32 bits, but "mrs" prefers it this way */
2056 if (bit_index
>= 64) {
2057 panic("%s: invalid bit index (%u)", __FUNCTION__
, bit_index
);
2060 __asm__
volatile ("mrs %0, CNTKCTL_EL1" : "=r"(cntkctl
));
2062 cntkctl
|= (bit_index
<< CNTKCTL_EL1_EVENTI_SHIFT
);
2063 cntkctl
|= CNTKCTL_EL1_EVNTEN
;
2064 cntkctl
|= CNTKCTL_EL1_EVENTDIR
; /* 1->0; why not? */
2067 * If the SOC supports it (and it isn't broken), enable
2068 * EL0 access to the physical timebase register.
2070 if (user_timebase_type() != USER_TIMEBASE_NONE
) {
2071 cntkctl
|= CNTKCTL_EL1_PL0PCTEN
;
2074 __asm__
volatile ("msr CNTKCTL_EL1, %0" : : "r"(cntkctl
));
2078 * Turn timer on, unmask that interrupt.
2081 _enable_virtual_timer(void)
2083 uint64_t cntvctl
= CNTP_CTL_EL0_ENABLE
; /* One wants to use 32 bits, but "mrs" prefers it this way */
2085 __asm__
volatile ("msr CNTP_CTL_EL0, %0" : : "r"(cntvctl
));
2088 uint64_t events_per_sec
= 0;
2091 fiq_context_init(boolean_t enable_fiq __unused
)
2093 _enable_timebase_event_stream(fiq_eventi
);
2095 /* Interrupts still disabled. */
2096 assert(ml_get_interrupts_enabled() == FALSE
);
2097 _enable_virtual_timer();
2101 fiq_context_bootstrap(boolean_t enable_fiq
)
2103 #if defined(APPLE_ARM64_ARCH_FAMILY) || defined(BCM2837)
2104 /* Could fill in our own ops here, if we needed them */
2105 uint64_t ticks_per_sec
, ticks_per_event
;
2108 ticks_per_sec
= gPEClockFrequencyInfo
.timebase_frequency_hz
;
2109 ticks_per_event
= ticks_per_sec
/ events_per_sec
;
2110 bit_index
= flsll(ticks_per_event
) - 1; /* Highest bit set */
2112 /* Round up to power of two */
2113 if ((ticks_per_event
& ((1 << bit_index
) - 1)) != 0) {
2118 * The timer can only trigger on rising or falling edge,
2119 * not both; we don't care which we trigger on, but we
2120 * do need to adjust which bit we are interested in to
2123 if (bit_index
!= 0) {
2127 fiq_eventi
= bit_index
;
2129 #error Need a board configuration.
2131 fiq_context_init(enable_fiq
);
2135 ml_delay_should_spin(uint64_t interval
)
2137 cpu_data_t
*cdp
= getCpuDatap();
2139 if (cdp
->cpu_idle_latency
) {
2140 return (interval
< cdp
->cpu_idle_latency
) ? TRUE
: FALSE
;
2143 * Early boot, latency is unknown. Err on the side of blocking,
2144 * which should always be safe, even if slow
2151 ml_thread_is64bit(thread_t thread
)
2153 return thread_is_64bit_addr(thread
);
2157 ml_delay_on_yield(void)
2159 #if DEVELOPMENT || DEBUG
2160 if (yield_delay_us
) {
2161 delay(yield_delay_us
);
2167 ml_timer_evaluate(void)
2172 ml_timer_forced_evaluation(void)
2178 ml_energy_stat(thread_t t
)
2180 return t
->machine
.energy_estimate_nj
;
2185 ml_gpu_stat_update(__unused
uint64_t gpu_ns_delta
)
2189 * For now: update the resource coalition stats of the
2190 * current thread's coalition
2192 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta
);
2197 ml_gpu_stat(__unused thread_t t
)
2202 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
2204 timer_state_event(boolean_t switch_to_kernel
)
2206 thread_t thread
= current_thread();
2207 if (!thread
->precise_user_kernel_time
) {
2211 processor_data_t
*pd
= &getCpuDatap()->cpu_processor
->processor_data
;
2212 uint64_t now
= ml_get_timebase();
2214 timer_stop(pd
->current_state
, now
);
2215 pd
->current_state
= (switch_to_kernel
) ? &pd
->system_state
: &pd
->user_state
;
2216 timer_start(pd
->current_state
, now
);
2218 timer_stop(pd
->thread_timer
, now
);
2219 pd
->thread_timer
= (switch_to_kernel
) ? &thread
->system_timer
: &thread
->user_timer
;
2220 timer_start(pd
->thread_timer
, now
);
2224 timer_state_event_user_to_kernel(void)
2226 timer_state_event(TRUE
);
2230 timer_state_event_kernel_to_user(void)
2232 timer_state_event(FALSE
);
2234 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
2237 * The following are required for parts of the kernel
2238 * that cannot resolve these functions as inlines:
2240 extern thread_t
current_act(void) __attribute__((const));
2244 return current_thread_fast();
2247 #undef current_thread
2248 extern thread_t
current_thread(void) __attribute__((const));
2250 current_thread(void)
2252 return current_thread_fast();
2261 ex_cb_info_t ex_cb_info
[EXCB_CLASS_MAX
];
2264 * Callback registration
2265 * Currently we support only one registered callback per class but
2266 * it should be possible to support more callbacks
2270 ex_cb_class_t cb_class
,
2274 ex_cb_info_t
*pInfo
= &ex_cb_info
[cb_class
];
2276 if ((NULL
== cb
) || (cb_class
>= EXCB_CLASS_MAX
)) {
2277 return KERN_INVALID_VALUE
;
2280 if (NULL
== pInfo
->cb
) {
2282 pInfo
->refcon
= refcon
;
2283 return KERN_SUCCESS
;
2285 return KERN_FAILURE
;
2289 * Called internally by platform kernel to invoke the registered callback for class
2293 ex_cb_class_t cb_class
,
2296 ex_cb_info_t
*pInfo
= &ex_cb_info
[cb_class
];
2297 ex_cb_state_t state
= {far
};
2299 if (cb_class
>= EXCB_CLASS_MAX
) {
2300 panic("Invalid exception callback class 0x%x\n", cb_class
);
2304 return pInfo
->cb(cb_class
, pInfo
->refcon
, &state
);
2306 return EXCB_ACTION_NONE
;
2309 #if defined(HAS_APPLE_PAC)
2311 ml_task_set_disable_user_jop(task_t task
, boolean_t disable_user_jop
)
2314 task
->disable_user_jop
= disable_user_jop
;
2318 ml_thread_set_disable_user_jop(thread_t thread
, boolean_t disable_user_jop
)
2321 thread
->machine
.disable_user_jop
= disable_user_jop
;
2325 ml_task_set_rop_pid(task_t task
, task_t parent_task
, boolean_t inherit
)
2328 task
->rop_pid
= parent_task
->rop_pid
;
2330 task
->rop_pid
= early_random();
2333 #endif /* defined(HAS_APPLE_PAC) */
2336 #if defined(HAS_APPLE_PAC)
2339 * ml_auth_ptr_unchecked: call this instead of ptrauth_auth_data
2340 * instrinsic when you don't want to trap on auth fail.
2345 ml_auth_ptr_unchecked(void *ptr
, ptrauth_key key
, uint64_t modifier
)
2347 switch (key
& 0x3) {
2348 case ptrauth_key_asia
:
2349 asm volatile ("autia %[ptr], %[modifier]" : [ptr
] "+r"(ptr
) : [modifier
] "r"(modifier
));
2351 case ptrauth_key_asib
:
2352 asm volatile ("autib %[ptr], %[modifier]" : [ptr
] "+r"(ptr
) : [modifier
] "r"(modifier
));
2354 case ptrauth_key_asda
:
2355 asm volatile ("autda %[ptr], %[modifier]" : [ptr
] "+r"(ptr
) : [modifier
] "r"(modifier
));
2357 case ptrauth_key_asdb
:
2358 asm volatile ("autdb %[ptr], %[modifier]" : [ptr
] "+r"(ptr
) : [modifier
] "r"(modifier
));
2364 #endif /* defined(HAS_APPLE_PAC) */