2 * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm64/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/caches_internal.h>
37 #include <arm/misc_protos.h>
38 #include <arm/machdep_call.h>
39 #include <arm/rtclock.h>
40 #include <console/serial_protos.h>
41 #include <kern/machine.h>
42 #include <prng/random.h>
43 #include <kern/startup.h>
44 #include <kern/thread.h>
45 #include <mach/machine.h>
46 #include <machine/atomic.h>
48 #include <vm/vm_page.h>
49 #include <sys/kdebug.h>
50 #include <kern/coalition.h>
51 #include <pexpert/device_tree.h>
53 #include <IOKit/IOPlatformExpert.h>
55 #if defined(KERNEL_INTEGRITY_KTRR)
56 #include <libkern/kernel_mach_header.h>
64 static int max_cpus_initialized
= 0;
65 #define MAX_CPUS_SET 0x1
66 #define MAX_CPUS_WAIT 0x2
69 uint32_t LockTimeOutUsec
;
71 boolean_t is_clock_configured
= FALSE
;
73 extern int mach_assert
;
74 extern volatile uint32_t debug_enabled
;
77 void machine_conf(void);
79 thread_t
Idle_context(void);
81 static uint32_t cpu_phys_ids
[MAX_CPUS
] = {[0 ... MAX_CPUS
- 1] = (uint32_t)-1};
82 static unsigned int avail_cpus
= 0;
83 static int boot_cpu
= -1;
84 static int max_cpu_number
= 0;
85 cluster_type_t boot_cluster
= CLUSTER_TYPE_SMP
;
87 lockdown_handler_t lockdown_handler
;
89 lck_mtx_t lockdown_handler_lck
;
90 lck_grp_t
*lockdown_handler_grp
;
93 void ml_lockdown_init(void);
94 void ml_lockdown_run_handler(void);
95 uint32_t get_arm_cpu_version(void);
98 void ml_cpu_signal(unsigned int cpu_id __unused
)
100 panic("Platform does not support ACC Fast IPI");
103 void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs
) {
105 panic("Platform does not support ACC Fast IPI");
108 uint64_t ml_cpu_signal_deferred_get_timer() {
112 void ml_cpu_signal_deferred(unsigned int cpu_id __unused
)
114 panic("Platform does not support ACC Fast IPI deferral");
117 void ml_cpu_signal_retract(unsigned int cpu_id __unused
)
119 panic("Platform does not support ACC Fast IPI retraction");
122 void machine_idle(void)
124 __asm__
volatile ("msr DAIFSet, %[mask]" ::[mask
] "i" (DAIFSC_IRQF
| DAIFSC_FIQF
));
126 __asm__
volatile ("msr DAIFClr, %[mask]" ::[mask
] "i" (DAIFSC_IRQF
| DAIFSC_FIQF
));
134 boolean_t
get_vfp_enabled(void)
139 void OSSynchronizeIO(void)
141 __builtin_arm_dsb(DSB_SY
);
144 uint64_t get_aux_control(void)
148 MRS(value
, "ACTLR_EL1");
152 uint64_t get_mmu_control(void)
156 MRS(value
, "SCTLR_EL1");
160 uint64_t get_tcr(void)
164 MRS(value
, "TCR_EL1");
168 boolean_t
ml_get_interrupts_enabled(void)
173 if (value
& DAIF_IRQF
)
178 pmap_paddr_t
get_mmu_ttb(void)
182 MRS(value
, "TTBR0_EL1");
187 void set_mmu_ttb(pmap_paddr_t value
)
189 #if __ARM_KERNEL_PROTECT__
190 /* All EL1-mode ASIDs are odd. */
191 value
|= (1ULL << TTBR_ASID_SHIFT
);
192 #endif /* __ARM_KERNEL_PROTECT__ */
194 __builtin_arm_dsb(DSB_ISH
);
195 MSR("TTBR0_EL1", value
);
196 __builtin_arm_isb(ISB_SY
);
199 static uint32_t get_midr_el1(void)
203 MRS(value
, "MIDR_EL1");
205 /* This is a 32-bit register. */
206 return (uint32_t) value
;
209 uint32_t get_arm_cpu_version(void)
211 uint32_t value
= get_midr_el1();
213 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
214 return ((value
& MIDR_EL1_REV_MASK
) >> MIDR_EL1_REV_SHIFT
) | ((value
& MIDR_EL1_VAR_MASK
) >> (MIDR_EL1_VAR_SHIFT
- 4));
218 * user_cont_hwclock_allowed()
220 * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0)
221 * as a continuous time source (e.g. from mach_continuous_time)
223 boolean_t
user_cont_hwclock_allowed(void)
229 * user_timebase_allowed()
231 * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0).
233 boolean_t
user_timebase_allowed(void)
238 boolean_t
arm64_wfe_allowed(void)
243 #if defined(KERNEL_INTEGRITY_KTRR)
245 uint64_t rorgn_begin
__attribute__((section("__DATA, __const"))) = 0;
246 uint64_t rorgn_end
__attribute__((section("__DATA, __const"))) = 0;
247 vm_offset_t amcc_base
;
249 static void assert_unlocked(void);
250 static void assert_amcc_cache_disabled(void);
251 static void lock_amcc(void);
252 static void lock_mmu(uint64_t begin
, uint64_t end
);
254 void rorgn_stash_range(void)
257 #if DEVELOPMENT || DEBUG
258 boolean_t rorgn_disable
= FALSE
;
260 PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable
, sizeof(rorgn_disable
));
263 /* take early out if boot arg present, don't query any machine registers to avoid
264 * dependency on amcc DT entry
270 /* Get the AMC values, and stash them into rorgn_begin, rorgn_end. */
272 #if defined(KERNEL_INTEGRITY_KTRR)
273 uint64_t soc_base
= 0;
274 DTEntry entryP
= NULL
;
275 uintptr_t *reg_prop
= NULL
;
276 uint32_t prop_size
= 0;
279 soc_base
= pe_arm_get_soc_base_phys();
280 rc
= DTFindEntry("name", "mcc", &entryP
);
281 assert(rc
== kSuccess
);
282 rc
= DTGetProperty(entryP
, "reg", (void **)®_prop
, &prop_size
);
283 assert(rc
== kSuccess
);
284 amcc_base
= ml_io_map(soc_base
+ *reg_prop
, *(reg_prop
+ 1));
286 #error "KERNEL_INTEGRITY config error"
289 #if defined(KERNEL_INTEGRITY_KTRR)
290 assert(rRORGNENDADDR
> rRORGNBASEADDR
);
291 rorgn_begin
= (rRORGNBASEADDR
<< ARM_PGSHIFT
) + gPhysBase
;
292 rorgn_end
= (rRORGNENDADDR
<< ARM_PGSHIFT
) + gPhysBase
;
294 #error KERNEL_INTEGRITY config error
295 #endif /* defined (KERNEL_INTEGRITY_KTRR) */
298 static void assert_unlocked() {
299 uint64_t ktrr_lock
= 0;
300 uint32_t rorgn_lock
= 0;
303 #if defined(KERNEL_INTEGRITY_KTRR)
304 rorgn_lock
= rRORGNLOCK
;
305 ktrr_lock
= __builtin_arm_rsr64(ARM64_REG_KTRR_LOCK_EL1
);
307 #error KERNEL_INTEGRITY config error
308 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
314 static void lock_amcc() {
315 #if defined(KERNEL_INTEGRITY_KTRR)
317 __builtin_arm_isb(ISB_SY
);
319 #error KERNEL_INTEGRITY config error
323 static void lock_mmu(uint64_t begin
, uint64_t end
) {
325 #if defined(KERNEL_INTEGRITY_KTRR)
327 __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1
, begin
);
328 __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1
, end
);
329 __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1
, 1ULL);
333 __builtin_arm_isb(ISB_SY
);
337 #error KERNEL_INTEGRITY config error
342 static void assert_amcc_cache_disabled() {
343 #if defined(KERNEL_INTEGRITY_KTRR)
344 assert((rMCCGEN
& 1) == 0); /* assert M$ disabled or LLC clean will be unreliable */
346 #error KERNEL_INTEGRITY config error
351 * void rorgn_lockdown(void)
353 * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked
355 * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in
356 * start.s:start_cpu() for subsequent wake/resume of all cores
358 void rorgn_lockdown(void)
360 vm_offset_t ktrr_begin
, ktrr_end
;
361 unsigned long plt_segsz
, last_segsz
;
363 #if DEVELOPMENT || DEBUG
364 boolean_t ktrr_disable
= FALSE
;
366 PE_parse_boot_argn("-unsafe_kernel_text", &ktrr_disable
, sizeof(ktrr_disable
));
370 * take early out if boot arg present, since we may not have amcc DT entry present
371 * we can't assert that iboot hasn't programmed the RO region lockdown registers
375 #endif /* DEVELOPMENT || DEBUG */
379 /* [x] - Use final method of determining all kernel text range or expect crashes */
381 ktrr_begin
= (uint64_t) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_TEXT", &plt_segsz
);
382 assert(ktrr_begin
&& gVirtBase
&& gPhysBase
);
384 ktrr_begin
= kvtophys(ktrr_begin
);
386 /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC KTRR region) */
387 ktrr_end
= (uint64_t) getsegdatafromheader(&_mh_execute_header
, "__LAST", &last_segsz
);
388 ktrr_end
= (kvtophys(ktrr_end
) - 1) & ~PAGE_MASK
;
390 /* ensure that iboot and xnu agree on the ktrr range */
391 assert(rorgn_begin
== ktrr_begin
&& rorgn_end
== (ktrr_end
+ last_segsz
));
392 /* assert that __LAST segment containing privileged insns is only a single page */
393 assert(last_segsz
== PAGE_SIZE
);
396 printf("KTRR Begin: %p End: %p, setting lockdown\n", (void *)ktrr_begin
, (void *)ktrr_end
);
399 /* [x] - ensure all in flight writes are flushed to AMCC before enabling RO Region Lock */
401 assert_amcc_cache_disabled();
403 CleanPoC_DcacheRegion_Force(phystokv(ktrr_begin
),
404 (unsigned)((ktrr_end
+ last_segsz
) - ktrr_begin
+ PAGE_MASK
));
408 lock_mmu(ktrr_begin
, ktrr_end
);
410 #if DEVELOPMENT || DEBUG
414 /* now we can run lockdown handler */
415 ml_lockdown_run_handler();
418 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
421 machine_startup(__unused boot_args
* args
)
426 PE_parse_boot_argn("assert", &mach_assert
, sizeof (mach_assert
));
428 if (PE_parse_boot_argn("preempt", &boot_arg
, sizeof (boot_arg
))) {
429 default_preemption_rate
= boot_arg
;
431 if (PE_parse_boot_argn("bg_preempt", &boot_arg
, sizeof (boot_arg
))) {
432 default_bg_preemption_rate
= boot_arg
;
438 * Kick off the kernel bootstrap.
444 void machine_lockdown_preflight(void)
446 #if CONFIG_KERNEL_INTEGRITY
448 #if defined(KERNEL_INTEGRITY_KTRR)
455 void machine_lockdown(void)
457 #if CONFIG_KERNEL_INTEGRITY
458 #if KERNEL_INTEGRITY_WT
461 * Notify the monitor about the completion of early kernel bootstrap.
462 * From this point forward it will enforce the integrity of kernel text,
463 * rodata and page tables.
467 monitor_call(MONITOR_LOCKDOWN
, 0, 0, 0);
469 #endif /* KERNEL_INTEGRITY_WT */
472 #if defined(KERNEL_INTEGRITY_KTRR)
475 * Lock physical KTRR region. KTRR region is read-only. Memory outside
476 * the region is not executable at EL1.
480 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
483 #endif /* CONFIG_KERNEL_INTEGRITY */
489 __unused vm_size_t size
)
491 return (PE_boot_args());
498 * This is known to be inaccurate. mem_size should always be capped at 2 GB
500 machine_info
.memory_size
= (uint32_t)mem_size
;
508 is_clock_configured
= TRUE
;
514 slave_machine_init(__unused
void *param
)
516 cpu_machine_init(); /* Initialize the processor */
517 clock_init(); /* Init the clock */
521 * Routine: machine_processor_shutdown
525 machine_processor_shutdown(
526 __unused thread_t thread
,
527 void (*doshutdown
) (processor_t
),
528 processor_t processor
)
530 return (Shutdown_context(doshutdown
, processor
));
534 * Routine: ml_init_max_cpus
538 ml_init_max_cpus(unsigned int max_cpus
)
540 boolean_t current_state
;
542 current_state
= ml_set_interrupts_enabled(FALSE
);
543 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
544 machine_info
.max_cpus
= max_cpus
;
545 machine_info
.physical_cpu_max
= max_cpus
;
546 machine_info
.logical_cpu_max
= max_cpus
;
547 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
548 thread_wakeup((event_t
) & max_cpus_initialized
);
549 max_cpus_initialized
= MAX_CPUS_SET
;
551 (void) ml_set_interrupts_enabled(current_state
);
555 * Routine: ml_get_max_cpus
559 ml_get_max_cpus(void)
561 boolean_t current_state
;
563 current_state
= ml_set_interrupts_enabled(FALSE
);
564 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
565 max_cpus_initialized
= MAX_CPUS_WAIT
;
566 assert_wait((event_t
) & max_cpus_initialized
, THREAD_UNINT
);
567 (void) thread_block(THREAD_CONTINUE_NULL
);
569 (void) ml_set_interrupts_enabled(current_state
);
570 return (machine_info
.max_cpus
);
574 * Routine: ml_init_lock_timeout
578 ml_init_lock_timeout(void)
582 uint64_t default_timeout_ns
= NSEC_PER_SEC
>>2;
585 if (PE_parse_boot_argn("slto_us", &slto
, sizeof (slto
)))
586 default_timeout_ns
= slto
* NSEC_PER_USEC
;
588 nanoseconds_to_absolutetime(default_timeout_ns
, &abstime
);
589 LockTimeOutUsec
= (uint32_t)(abstime
/ NSEC_PER_USEC
);
590 LockTimeOut
= (uint32_t)abstime
;
592 if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof (mtxspin
))) {
593 if (mtxspin
> USEC_PER_SEC
>>4)
594 mtxspin
= USEC_PER_SEC
>>4;
595 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
597 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
603 * This is called from the machine-independent routine cpu_up()
604 * to perform machine-dependent info updates.
609 hw_atomic_add(&machine_info
.physical_cpu
, 1);
610 hw_atomic_add(&machine_info
.logical_cpu
, 1);
614 * This is called from the machine-independent routine cpu_down()
615 * to perform machine-dependent info updates.
620 cpu_data_t
*cpu_data_ptr
;
622 hw_atomic_sub(&machine_info
.physical_cpu
, 1);
623 hw_atomic_sub(&machine_info
.logical_cpu
, 1);
626 * If we want to deal with outstanding IPIs, we need to
627 * do relatively early in the processor_doshutdown path,
628 * as we pend decrementer interrupts using the IPI
629 * mechanism if we cannot immediately service them (if
630 * IRQ is masked). Do so now.
632 * We aren't on the interrupt stack here; would it make
633 * more sense to disable signaling and then enable
634 * interrupts? It might be a bit cleaner.
636 cpu_data_ptr
= getCpuDatap();
637 cpu_data_ptr
->cpu_running
= FALSE
;
638 cpu_signal_handler_internal(TRUE
);
642 * Routine: ml_cpu_get_info
646 ml_cpu_get_info(ml_cpu_info_t
* ml_cpu_info
)
648 cache_info_t
*cpuid_cache_info
;
650 cpuid_cache_info
= cache_info();
651 ml_cpu_info
->vector_unit
= 0;
652 ml_cpu_info
->cache_line_size
= cpuid_cache_info
->c_linesz
;
653 ml_cpu_info
->l1_icache_size
= cpuid_cache_info
->c_isize
;
654 ml_cpu_info
->l1_dcache_size
= cpuid_cache_info
->c_dsize
;
656 #if (__ARM_ARCH__ >= 7)
657 ml_cpu_info
->l2_settings
= 1;
658 ml_cpu_info
->l2_cache_size
= cpuid_cache_info
->c_l2size
;
660 ml_cpu_info
->l2_settings
= 0;
661 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
663 ml_cpu_info
->l3_settings
= 0;
664 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
668 ml_get_machine_mem(void)
670 return (machine_info
.memory_size
);
673 __attribute__((noreturn
))
675 halt_all_cpus(boolean_t reboot
)
678 printf("MACH Reboot\n");
679 PEHaltRestart(kPERestartCPU
);
681 printf("CPU halted\n");
682 PEHaltRestart(kPEHaltCPU
);
687 __attribute__((noreturn
))
691 halt_all_cpus(FALSE
);
695 * Routine: machine_signal_idle
700 processor_t processor
)
702 cpu_signal(processor_to_cpu_datap(processor
), SIGPnop
, (void *)NULL
, (void *)NULL
);
703 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
707 machine_signal_idle_deferred(
708 processor_t processor
)
710 cpu_signal_deferred(processor_to_cpu_datap(processor
));
711 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_DEFERRED_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
715 machine_signal_idle_cancel(
716 processor_t processor
)
718 cpu_signal_cancel(processor_to_cpu_datap(processor
));
719 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_CANCEL_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
723 * Routine: ml_install_interrupt_handler
724 * Function: Initialize Interrupt Handler
727 ml_install_interrupt_handler(
731 IOInterruptHandler handler
,
734 cpu_data_t
*cpu_data_ptr
;
735 boolean_t current_state
;
737 current_state
= ml_set_interrupts_enabled(FALSE
);
738 cpu_data_ptr
= getCpuDatap();
740 cpu_data_ptr
->interrupt_nub
= nub
;
741 cpu_data_ptr
->interrupt_source
= source
;
742 cpu_data_ptr
->interrupt_target
= target
;
743 cpu_data_ptr
->interrupt_handler
= handler
;
744 cpu_data_ptr
->interrupt_refCon
= refCon
;
746 cpu_data_ptr
->interrupts_enabled
= TRUE
;
747 (void) ml_set_interrupts_enabled(current_state
);
749 initialize_screen(NULL
, kPEAcquireScreen
);
753 * Routine: ml_init_interrupt
754 * Function: Initialize Interrupts
757 ml_init_interrupt(void)
762 * Routine: ml_init_timebase
763 * Function: register and setup Timebase, Decremeter services
765 void ml_init_timebase(
768 vm_offset_t int_address
,
769 vm_offset_t int_value __unused
)
771 cpu_data_t
*cpu_data_ptr
;
773 cpu_data_ptr
= (cpu_data_t
*)args
;
775 if ((cpu_data_ptr
== &BootCpuData
)
776 && (rtclock_timebase_func
.tbd_fiq_handler
== (void *)NULL
)) {
777 rtclock_timebase_func
= *tbd_funcs
;
778 rtclock_timebase_addr
= int_address
;
783 ml_parse_cpu_topology(void)
785 DTEntry entry
, child __unused
;
786 OpaqueDTEntryIterator iter
;
787 uint32_t cpu_boot_arg
;
790 cpu_boot_arg
= MAX_CPUS
;
792 PE_parse_boot_argn("cpus", &cpu_boot_arg
, sizeof(cpu_boot_arg
));
794 err
= DTLookupEntry(NULL
, "/cpus", &entry
);
795 assert(err
== kSuccess
);
797 err
= DTInitEntryIterator(entry
, &iter
);
798 assert(err
== kSuccess
);
800 while (kSuccess
== DTIterateEntries(&iter
, &child
)) {
801 unsigned int propSize
;
803 int cpu_id
= avail_cpus
++;
805 if (kSuccess
== DTGetProperty(child
, "cpu-id", &prop
, &propSize
))
806 cpu_id
= *((int32_t*)prop
);
808 assert(cpu_id
< MAX_CPUS
);
809 assert(cpu_phys_ids
[cpu_id
] == (uint32_t)-1);
811 if (boot_cpu
== -1) {
812 if (kSuccess
!= DTGetProperty(child
, "state", &prop
, &propSize
))
813 panic("unable to retrieve state for cpu %d", cpu_id
);
815 if (strncmp((char*)prop
, "running", propSize
) == 0) {
819 if (kSuccess
!= DTGetProperty(child
, "reg", &prop
, &propSize
))
820 panic("unable to retrieve physical ID for cpu %d", cpu_id
);
822 cpu_phys_ids
[cpu_id
] = *((uint32_t*)prop
);
824 if ((cpu_id
> max_cpu_number
) && ((cpu_id
== boot_cpu
) || (avail_cpus
<= cpu_boot_arg
)))
825 max_cpu_number
= cpu_id
;
828 if (avail_cpus
> cpu_boot_arg
)
829 avail_cpus
= cpu_boot_arg
;
832 panic("No cpus found!");
835 panic("unable to determine boot cpu!");
839 ml_get_cpu_count(void)
845 ml_get_boot_cpu_number(void)
851 ml_get_boot_cluster(void)
857 ml_get_cpu_number(uint32_t phys_id
)
859 for (int log_id
= 0; log_id
<= ml_get_max_cpu_number(); ++log_id
) {
860 if (cpu_phys_ids
[log_id
] == phys_id
)
867 ml_get_max_cpu_number(void)
869 return max_cpu_number
;
873 void ml_lockdown_init() {
874 lockdown_handler_grp
= lck_grp_alloc_init("lockdown_handler", NULL
);
875 assert(lockdown_handler_grp
!= NULL
);
877 lck_mtx_init(&lockdown_handler_lck
, lockdown_handler_grp
, NULL
);
881 ml_lockdown_handler_register(lockdown_handler_t f
, void *this)
883 if (lockdown_handler
|| !f
) {
887 lck_mtx_lock(&lockdown_handler_lck
);
888 lockdown_handler
= f
;
889 lockdown_this
= this;
891 #if !(defined(KERNEL_INTEGRITY_KTRR))
893 lockdown_handler(this);
896 lockdown_handler(this);
899 lck_mtx_unlock(&lockdown_handler_lck
);
904 void ml_lockdown_run_handler() {
905 lck_mtx_lock(&lockdown_handler_lck
);
906 assert(!lockdown_done
);
909 if (lockdown_handler
) {
910 lockdown_handler(lockdown_this
);
912 lck_mtx_unlock(&lockdown_handler_lck
);
916 ml_processor_register(
917 ml_processor_info_t
* in_processor_info
,
918 processor_t
* processor_out
,
919 ipi_handler_t
* ipi_handler
)
921 cpu_data_t
*this_cpu_datap
;
922 processor_set_t pset
;
923 boolean_t is_boot_cpu
;
924 static unsigned int reg_cpu_count
= 0;
926 if (in_processor_info
->log_id
> (uint32_t)ml_get_max_cpu_number())
929 if ((unsigned int)OSIncrementAtomic((SInt32
*)®_cpu_count
) >= avail_cpus
)
932 if (in_processor_info
->log_id
!= (uint32_t)ml_get_boot_cpu_number()) {
934 this_cpu_datap
= cpu_data_alloc(FALSE
);
935 cpu_data_init(this_cpu_datap
);
937 this_cpu_datap
= &BootCpuData
;
941 assert(in_processor_info
->log_id
< MAX_CPUS
);
943 this_cpu_datap
->cpu_id
= in_processor_info
->cpu_id
;
945 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(is_boot_cpu
);
946 if (this_cpu_datap
->cpu_console_buf
== (void *)(NULL
))
947 goto processor_register_error
;
950 this_cpu_datap
->cpu_number
= in_processor_info
->log_id
;
952 if (cpu_data_register(this_cpu_datap
) != KERN_SUCCESS
)
953 goto processor_register_error
;
956 this_cpu_datap
->cpu_idle_notify
= (void *) in_processor_info
->processor_idle
;
957 this_cpu_datap
->cpu_cache_dispatch
= in_processor_info
->platform_cache_dispatch
;
958 nanoseconds_to_absolutetime((uint64_t) in_processor_info
->powergate_latency
, &this_cpu_datap
->cpu_idle_latency
);
959 this_cpu_datap
->cpu_reset_assist
= kvtophys(in_processor_info
->powergate_stub_addr
);
961 this_cpu_datap
->idle_timer_notify
= (void *) in_processor_info
->idle_timer
;
962 this_cpu_datap
->idle_timer_refcon
= in_processor_info
->idle_timer_refcon
;
964 this_cpu_datap
->platform_error_handler
= (void *) in_processor_info
->platform_error_handler
;
965 this_cpu_datap
->cpu_regmap_paddr
= in_processor_info
->regmap_paddr
;
966 this_cpu_datap
->cpu_phys_id
= in_processor_info
->phys_id
;
967 this_cpu_datap
->cpu_l2_access_penalty
= in_processor_info
->l2_access_penalty
;
969 this_cpu_datap
->cpu_cluster_type
= in_processor_info
->cluster_type
;
970 this_cpu_datap
->cpu_cluster_id
= in_processor_info
->cluster_id
;
971 this_cpu_datap
->cpu_l2_id
= in_processor_info
->l2_cache_id
;
972 this_cpu_datap
->cpu_l2_size
= in_processor_info
->l2_cache_size
;
973 this_cpu_datap
->cpu_l3_id
= in_processor_info
->l3_cache_id
;
974 this_cpu_datap
->cpu_l3_size
= in_processor_info
->l3_cache_size
;
976 this_cpu_datap
->cluster_master
= is_boot_cpu
;
978 pset
= pset_find(in_processor_info
->cluster_id
, processor_pset(master_processor
));
979 assert(pset
!= NULL
);
980 kprintf("%s>cpu_id %p cluster_id %d cpu_number %d is type %d\n", __FUNCTION__
, in_processor_info
->cpu_id
, in_processor_info
->cluster_id
, this_cpu_datap
->cpu_number
, in_processor_info
->cluster_type
);
983 processor_init((struct processor
*)this_cpu_datap
->cpu_processor
,
984 this_cpu_datap
->cpu_number
, pset
);
986 if (this_cpu_datap
->cpu_l2_access_penalty
) {
988 * Cores that have a non-zero L2 access penalty compared
989 * to the boot processor should be de-prioritized by the
990 * scheduler, so that threads use the cores with better L2
993 processor_set_primary(this_cpu_datap
->cpu_processor
,
998 *processor_out
= this_cpu_datap
->cpu_processor
;
999 *ipi_handler
= cpu_signal_handler
;
1000 if (in_processor_info
->idle_tickle
!= (idle_tickle_t
*) NULL
)
1001 *in_processor_info
->idle_tickle
= (idle_tickle_t
) cpu_idle_tickle
;
1004 if (kpc_register_cpu(this_cpu_datap
) != TRUE
)
1005 goto processor_register_error
;
1009 prng_cpu_init(this_cpu_datap
->cpu_number
);
1010 // now let next CPU register itself
1011 OSIncrementAtomic((SInt32
*)&real_ncpus
);
1014 return KERN_SUCCESS
;
1016 processor_register_error
:
1018 kpc_unregister_cpu(this_cpu_datap
);
1021 cpu_data_free(this_cpu_datap
);
1023 return KERN_FAILURE
;
1027 ml_init_arm_debug_interface(
1028 void * in_cpu_datap
,
1029 vm_offset_t virt_address
)
1031 ((cpu_data_t
*)in_cpu_datap
)->cpu_debug_interface_map
= virt_address
;
1036 * Routine: init_ast_check
1041 __unused processor_t processor
)
1046 * Routine: cause_ast_check
1051 processor_t processor
)
1053 if (current_processor() != processor
) {
1054 cpu_signal(processor_to_cpu_datap(processor
), SIGPast
, (void *)NULL
, (void *)NULL
);
1055 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 1 /* ast */, 0, 0, 0);
1061 * Routine: ml_at_interrupt_context
1062 * Function: Check if running at interrupt context
1065 ml_at_interrupt_context(void)
1068 vm_offset_t intstack_top_ptr
;
1070 intstack_top_ptr
= getCpuDatap()->intstack_top
;
1071 return (((vm_offset_t
)(&local
) < intstack_top_ptr
) && ((vm_offset_t
)(&local
) > (intstack_top_ptr
- INTSTACK_SIZE
)));
1073 extern uint32_t cpu_idle_count
;
1075 void ml_get_power_state(boolean_t
*icp
, boolean_t
*pidlep
) {
1076 *icp
= ml_at_interrupt_context();
1077 *pidlep
= (cpu_idle_count
== real_ncpus
);
1081 * Routine: ml_cause_interrupt
1082 * Function: Generate a fake interrupt
1085 ml_cause_interrupt(void)
1087 return; /* BS_XXX */
1090 /* Map memory map IO space */
1093 vm_offset_t phys_addr
,
1096 return (io_map(phys_addr
, size
, VM_WIMG_IO
));
1101 vm_offset_t phys_addr
,
1104 return (io_map(phys_addr
, size
, VM_WIMG_WCOMB
));
1107 /* boot memory allocation */
1110 __unused vm_size_t size
)
1112 return ((vm_offset_t
) NULL
);
1117 vm_offset_t phys_addr
,
1120 return pmap_map_high_window_bd(phys_addr
, len
, VM_PROT_READ
| VM_PROT_WRITE
);
1127 return phystokv(paddr
);
1134 if (((vm_address_t
)(vaddr
) - gVirtBase
) >= gPhysSize
)
1135 panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr
);
1136 return ((vm_address_t
)(vaddr
) - gVirtBase
+ gPhysBase
);
1141 vm_offset_t vaddr
, /* kernel virtual address */
1145 pt_entry_t arm_prot
= 0;
1146 pt_entry_t arm_block_prot
= 0;
1147 vm_offset_t vaddr_cur
;
1149 kern_return_t result
= KERN_SUCCESS
;
1151 if (vaddr
< VM_MIN_KERNEL_ADDRESS
) {
1152 panic("ml_static_protect(): %p < %p", (void *) vaddr
, (void *) VM_MIN_KERNEL_ADDRESS
);
1153 return KERN_FAILURE
;
1156 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
1158 if ((new_prot
& VM_PROT_WRITE
) && (new_prot
& VM_PROT_EXECUTE
)) {
1159 panic("ml_static_protect(): WX request on %p", (void *) vaddr
);
1162 /* Set up the protection bits, and block bits so we can validate block mappings. */
1163 if (new_prot
& VM_PROT_WRITE
) {
1164 arm_prot
|= ARM_PTE_AP(AP_RWNA
);
1165 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RWNA
);
1167 arm_prot
|= ARM_PTE_AP(AP_RONA
);
1168 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RONA
);
1171 arm_prot
|= ARM_PTE_NX
;
1172 arm_block_prot
|= ARM_TTE_BLOCK_NX
;
1174 if (!(new_prot
& VM_PROT_EXECUTE
)) {
1175 arm_prot
|= ARM_PTE_PNX
;
1176 arm_block_prot
|= ARM_TTE_BLOCK_PNX
;
1179 for (vaddr_cur
= vaddr
;
1180 vaddr_cur
< trunc_page_64(vaddr
+ size
);
1181 vaddr_cur
+= PAGE_SIZE
) {
1182 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
1183 if (ppn
!= (vm_offset_t
) NULL
) {
1184 #if __ARM64_TWO_LEVEL_PMAP__
1187 tt_entry_t
*tte1
, *tte2
;
1193 #if __ARM64_TWO_LEVEL_PMAP__
1194 tte2
= &kernel_pmap
->tte
[(((vaddr_cur
) & ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
)];
1196 tte1
= &kernel_pmap
->tte
[(((vaddr_cur
) & ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
)];
1197 tte2
= &((tt_entry_t
*) phystokv((*tte1
) & ARM_TTE_TABLE_MASK
))[(((vaddr_cur
) & ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
)];
1200 if (((*tte2
) & ARM_TTE_TYPE_MASK
) != ARM_TTE_TYPE_TABLE
) {
1201 if ((((*tte2
) & ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_BLOCK
) &&
1202 ((*tte2
& (ARM_TTE_BLOCK_NXMASK
| ARM_TTE_BLOCK_PNXMASK
| ARM_TTE_BLOCK_APMASK
)) == arm_block_prot
)) {
1204 * We can support ml_static_protect on a block mapping if the mapping already has
1205 * the desired protections. We still want to run checks on a per-page basis.
1210 result
= KERN_FAILURE
;
1214 pte_p
= (pt_entry_t
*)&((tt_entry_t
*)(phystokv((*tte2
) & ARM_TTE_TABLE_MASK
)))[(((vaddr_cur
) & ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
)];
1217 if ((ptmp
& ARM_PTE_HINT_MASK
) && ((ptmp
& (ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) != arm_prot
)) {
1219 * The contiguous hint is similar to a block mapping for ml_static_protect; if the existing
1220 * protections do not match the desired protections, then we will fail (as we cannot update
1221 * this mapping without updating other mappings as well).
1223 result
= KERN_FAILURE
;
1227 __unreachable_ok_push
1228 if (TEST_PAGE_RATIO_4
) {
1231 pt_entry_t
*ptep_iter
;
1234 for (i
=0; i
<4; i
++, ptep_iter
++) {
1235 /* Note that there is a hole in the HINT sanity checking here. */
1238 /* We only need to update the page tables if the protections do not match. */
1239 if ((ptmp
& (ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) != arm_prot
) {
1240 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) | arm_prot
;
1245 #ifndef __ARM_L1_PTW__
1246 FlushPoC_DcacheRegion( trunc_page_32(pte_p
), 4*sizeof(*pte_p
));
1251 /* We only need to update the page tables if the protections do not match. */
1252 if ((ptmp
& (ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) != arm_prot
) {
1253 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) | arm_prot
;
1257 #ifndef __ARM_L1_PTW__
1258 FlushPoC_DcacheRegion( trunc_page_32(pte_p
), sizeof(*pte_p
));
1261 __unreachable_ok_pop
1265 if (vaddr_cur
> vaddr
) {
1266 assert(((vaddr_cur
- vaddr
) & 0xFFFFFFFF00000000ULL
) == 0);
1267 flush_mmu_tlb_region(vaddr
, (uint32_t)(vaddr_cur
- vaddr
));
1275 * Routine: ml_static_mfree
1283 vm_offset_t vaddr_cur
;
1285 uint32_t freed_pages
= 0;
1287 /* It is acceptable (if bad) to fail to free. */
1288 if (vaddr
< VM_MIN_KERNEL_ADDRESS
)
1291 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
1293 for (vaddr_cur
= vaddr
;
1294 vaddr_cur
< trunc_page_64(vaddr
+ size
);
1295 vaddr_cur
+= PAGE_SIZE
) {
1297 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
1298 if (ppn
!= (vm_offset_t
) NULL
) {
1300 * It is not acceptable to fail to update the protections on a page
1301 * we will release to the VM. We need to either panic or continue.
1302 * For now, we'll panic (to help flag if there is memory we can
1305 if (ml_static_protect(vaddr_cur
, PAGE_SIZE
, VM_PROT_WRITE
| VM_PROT_READ
) != KERN_SUCCESS
) {
1306 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur
);
1311 * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
1312 * relies on the persistence of these mappings for all time.
1314 // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
1317 vm_page_create(ppn
, (ppn
+ 1));
1321 vm_page_lockspin_queues();
1322 vm_page_wire_count
-= freed_pages
;
1323 vm_page_wire_count_initial
-= freed_pages
;
1324 vm_page_unlock_queues();
1326 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages
, (void *)vaddr
, (uint64_t)size
, ppn
);
1331 /* virtual to physical on wired pages */
1333 ml_vtophys(vm_offset_t vaddr
)
1335 return kvtophys(vaddr
);
1339 * Routine: ml_nofault_copy
1340 * Function: Perform a physical mode copy if the source and destination have
1341 * valid translations in the kernel pmap. If translations are present, they are
1342 * assumed to be wired; e.g., no attempt is made to guarantee that the
1343 * translations obtained remain valid for the duration of the copy process.
1346 ml_nofault_copy(vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
1348 addr64_t cur_phys_dst
, cur_phys_src
;
1349 vm_size_t count
, nbytes
= 0;
1352 if (!(cur_phys_src
= kvtophys(virtsrc
)))
1354 if (!(cur_phys_dst
= kvtophys(virtdst
)))
1356 if (!pmap_valid_address(trunc_page_64(cur_phys_dst
)) ||
1357 !pmap_valid_address(trunc_page_64(cur_phys_src
)))
1359 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
1360 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
)))
1361 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
1365 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
1377 * Routine: ml_validate_nofault
1378 * Function: Validate that ths address range has a valid translations
1379 * in the kernel pmap. If translations are present, they are
1380 * assumed to be wired; i.e. no attempt is made to guarantee
1381 * that the translation persist after the check.
1382 * Returns: TRUE if the range is mapped and will not cause a fault,
1386 boolean_t
ml_validate_nofault(
1387 vm_offset_t virtsrc
, vm_size_t size
)
1389 addr64_t cur_phys_src
;
1393 if (!(cur_phys_src
= kvtophys(virtsrc
)))
1395 if (!pmap_valid_address(trunc_page_64(cur_phys_src
)))
1397 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
1399 count
= (uint32_t)size
;
1409 ml_get_bouncepool_info(vm_offset_t
* phys_addr
, vm_size_t
* size
)
1416 active_rt_threads(__unused boolean_t active
)
1420 static void cpu_qos_cb_default(__unused
int urgency
, __unused
uint64_t qos_param1
, __unused
uint64_t qos_param2
) {
1424 cpu_qos_update_t cpu_qos_update
= cpu_qos_cb_default
;
1426 void cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb
) {
1427 if (cpu_qos_cb
!= NULL
) {
1428 cpu_qos_update
= cpu_qos_cb
;
1430 cpu_qos_update
= cpu_qos_cb_default
;
1435 thread_tell_urgency(int urgency
, uint64_t rt_period
, uint64_t rt_deadline
, uint64_t sched_latency __unused
, __unused thread_t nthread
)
1437 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_URGENCY
) | DBG_FUNC_START
, urgency
, rt_period
, rt_deadline
, sched_latency
, 0);
1439 cpu_qos_update(urgency
, rt_period
, rt_deadline
);
1441 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_URGENCY
) | DBG_FUNC_END
, urgency
, rt_period
, rt_deadline
, 0, 0);
1445 machine_run_count(__unused
uint32_t count
)
1450 machine_choose_processor(__unused processor_set_t pset
, processor_t processor
)
1456 ml_stack_remaining(void)
1458 uintptr_t local
= (uintptr_t) &local
;
1460 if (ml_at_interrupt_context()) {
1461 return (local
- (getCpuDatap()->intstack_top
- INTSTACK_SIZE
));
1463 return (local
- current_thread()->kernel_stack
);
1468 vm_offset_t
ml_stack_base(void);
1469 vm_size_t
ml_stack_size(void);
1474 if (ml_at_interrupt_context()) {
1475 return getCpuDatap()->intstack_top
- INTSTACK_SIZE
;
1477 return current_thread()->kernel_stack
;
1483 if (ml_at_interrupt_context()) {
1484 return INTSTACK_SIZE
;
1486 return kernel_stack_size
;
1491 boolean_t
machine_timeout_suspended(void) {
1496 ml_interrupt_prewarm(__unused
uint64_t deadline
)
1498 return KERN_FAILURE
;
1502 * Assumes fiq, irq disabled.
1505 ml_set_decrementer(uint32_t dec_value
)
1507 cpu_data_t
*cdp
= getCpuDatap();
1509 assert(ml_get_interrupts_enabled() == FALSE
);
1510 cdp
->cpu_decrementer
= dec_value
;
1512 if (cdp
->cpu_set_decrementer_func
) {
1513 ((void (*)(uint32_t))cdp
->cpu_set_decrementer_func
)(dec_value
);
1515 __asm__
volatile("msr CNTP_TVAL_EL0, %0" : : "r"((uint64_t)dec_value
));
1519 uint64_t ml_get_hwclock()
1523 // ISB required by ARMV7C.b section B8.1.2 & ARMv8 section D6.1.2
1524 // "Reads of CNTPCT[_EL0] can occur speculatively and out of order relative
1525 // to other instructions executed on the same processor."
1526 __asm__
volatile("isb\n"
1527 "mrs %0, CNTPCT_EL0"
1536 return (ml_get_hwclock() + getCpuDatap()->cpu_base_timebase
);
1540 ml_get_decrementer()
1542 cpu_data_t
*cdp
= getCpuDatap();
1545 assert(ml_get_interrupts_enabled() == FALSE
);
1547 if (cdp
->cpu_get_decrementer_func
) {
1548 dec
= ((uint32_t (*)(void))cdp
->cpu_get_decrementer_func
)();
1552 __asm__
volatile("mrs %0, CNTP_TVAL_EL0" : "=r"(wide_val
));
1553 dec
= (uint32_t)wide_val
;
1554 assert(wide_val
== (uint64_t)dec
);
1561 ml_get_timer_pending()
1565 __asm__
volatile("mrs %0, CNTP_CTL_EL0" : "=r"(cntp_ctl
));
1566 return ((cntp_ctl
& CNTP_CTL_EL0_ISTATUS
) != 0) ? TRUE
: FALSE
;
1570 ml_wants_panic_trap_to_debugger(void)
1572 boolean_t result
= FALSE
;
1577 cache_trap_error(thread_t thread
, vm_map_address_t fault_addr
)
1579 mach_exception_data_type_t exc_data
[2];
1580 arm_saved_state_t
*regs
= get_user_regs(thread
);
1582 set_saved_state_far(regs
, fault_addr
);
1584 exc_data
[0] = KERN_INVALID_ADDRESS
;
1585 exc_data
[1] = fault_addr
;
1587 exception_triage(EXC_BAD_ACCESS
, exc_data
, 2);
1591 cache_trap_recover()
1593 vm_map_address_t fault_addr
;
1595 __asm__
volatile("mrs %0, FAR_EL1" : "=r"(fault_addr
));
1597 cache_trap_error(current_thread(), fault_addr
);
1601 dcache_flush_trap(vm_map_address_t start
, vm_map_size_t size
)
1603 vm_map_address_t end
= start
+ size
;
1604 thread_t thread
= current_thread();
1605 vm_offset_t old_recover
= thread
->recover
;
1608 if (task_has_64BitAddr(current_task())) {
1609 if (end
> MACH_VM_MAX_ADDRESS
) {
1610 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1613 if (end
> VM_MAX_ADDRESS
) {
1614 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1619 cache_trap_error(thread
, start
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1622 /* Set recovery function */
1623 thread
->recover
= (vm_address_t
)cache_trap_recover
;
1625 #if defined(APPLE_ARM64_ARCH_FAMILY)
1627 * We're coherent on Apple ARM64 CPUs, so this could be a nop. However,
1628 * if the region given us is bad, it would be good to catch it and
1629 * crash, ergo we still do the flush.
1631 assert((size
& 0xFFFFFFFF00000000ULL
) == 0);
1632 FlushPoC_DcacheRegion(start
, (uint32_t)size
);
1634 #error "Make sure you don't need to xcall."
1637 /* Restore recovery function */
1638 thread
->recover
= old_recover
;
1640 /* Return (caller does exception return) */
1644 icache_invalidate_trap(vm_map_address_t start
, vm_map_size_t size
)
1646 vm_map_address_t end
= start
+ size
;
1647 thread_t thread
= current_thread();
1648 vm_offset_t old_recover
= thread
->recover
;
1651 if (task_has_64BitAddr(current_task())) {
1652 if (end
> MACH_VM_MAX_ADDRESS
) {
1653 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1656 if (end
> VM_MAX_ADDRESS
) {
1657 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1662 cache_trap_error(thread
, start
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1665 /* Set recovery function */
1666 thread
->recover
= (vm_address_t
)cache_trap_recover
;
1668 #if defined(APPLE_ARM64_ARCH_FAMILY)
1669 /* Clean dcache to unification, except we're coherent on Apple ARM64 CPUs */
1671 #error Make sure not cleaning is right for this platform!
1674 /* Invalidate iCache to point of unification */
1675 assert((size
& 0xFFFFFFFF00000000ULL
) == 0);
1676 InvalidatePoU_IcacheRegion(start
, (uint32_t)size
);
1678 /* Restore recovery function */
1679 thread
->recover
= old_recover
;
1681 /* Return (caller does exception return) */
1684 __attribute__((noreturn
))
1686 platform_syscall(arm_saved_state_t
*state
)
1690 #define platform_syscall_kprintf(x...) /* kprintf("platform_syscall: " x) */
1692 code
= (uint32_t)get_saved_state_reg(state
, 3);
1696 platform_syscall_kprintf("icache flush requested.\n");
1697 icache_invalidate_trap(get_saved_state_reg(state
, 0), get_saved_state_reg(state
, 1));
1701 platform_syscall_kprintf("dcache flush requested.\n");
1702 dcache_flush_trap(get_saved_state_reg(state
, 0), get_saved_state_reg(state
, 1));
1706 platform_syscall_kprintf("set cthread self.\n");
1707 thread_set_cthread_self(get_saved_state_reg(state
, 0));
1711 platform_syscall_kprintf("get cthread self.\n");
1712 set_saved_state_reg(state
, 0, thread_get_cthread_self());
1715 platform_syscall_kprintf("unknown: %d\n", code
);
1719 thread_exception_return();
1723 _enable_timebase_event_stream(uint32_t bit_index
)
1725 uint64_t cntkctl
; /* One wants to use 32 bits, but "mrs" prefers it this way */
1727 if (bit_index
>= 64) {
1728 panic("%s: invalid bit index (%u)", __FUNCTION__
, bit_index
);
1731 __asm__
volatile ("mrs %0, CNTKCTL_EL1" : "=r"(cntkctl
));
1733 cntkctl
|= (bit_index
<< CNTKCTL_EL1_EVENTI_SHIFT
);
1734 cntkctl
|= CNTKCTL_EL1_EVNTEN
;
1735 cntkctl
|= CNTKCTL_EL1_EVENTDIR
; /* 1->0; why not? */
1738 * If the SOC supports it (and it isn't broken), enable
1739 * EL0 access to the physical timebase register.
1741 if (user_timebase_allowed()) {
1742 cntkctl
|= CNTKCTL_EL1_PL0PCTEN
;
1745 __asm__
volatile ("msr CNTKCTL_EL1, %0" : : "r"(cntkctl
));
1749 * Turn timer on, unmask that interrupt.
1752 _enable_virtual_timer(void)
1754 uint64_t cntvctl
= CNTP_CTL_EL0_ENABLE
; /* One wants to use 32 bits, but "mrs" prefers it this way */
1756 __asm__
volatile ("msr CNTP_CTL_EL0, %0" : : "r"(cntvctl
));
1760 fiq_context_init(boolean_t enable_fiq __unused
)
1762 #if defined(APPLE_ARM64_ARCH_FAMILY)
1763 /* Could fill in our own ops here, if we needed them */
1764 uint64_t ticks_per_sec
, ticks_per_event
, events_per_sec
;
1767 ticks_per_sec
= gPEClockFrequencyInfo
.timebase_frequency_hz
;
1768 #if defined(ARM_BOARD_WFE_TIMEOUT_NS)
1769 events_per_sec
= 1000000000 / ARM_BOARD_WFE_TIMEOUT_NS
;
1771 /* Default to 1usec (or as close as we can get) */
1772 events_per_sec
= 1000000;
1774 ticks_per_event
= ticks_per_sec
/ events_per_sec
;
1775 bit_index
= flsll(ticks_per_event
) - 1; /* Highest bit set */
1777 /* Round up to power of two */
1778 if ((ticks_per_event
& ((1 << bit_index
) - 1)) != 0) {
1783 * The timer can only trigger on rising or falling edge,
1784 * not both; we don't care which we trigger on, but we
1785 * do need to adjust which bit we are interested in to
1791 _enable_timebase_event_stream(bit_index
);
1793 #error Need a board configuration.
1796 /* Interrupts still disabled. */
1797 assert(ml_get_interrupts_enabled() == FALSE
);
1798 _enable_virtual_timer();
1802 * ARM64_TODO: remove me (just a convenience while we don't have crashreporter)
1804 extern int copyinframe(vm_address_t
, char *, boolean_t
);
1805 size_t _OSUserBacktrace(char *buffer
, size_t bufsize
);
1807 size_t _OSUserBacktrace(char *buffer
, size_t bufsize
)
1809 thread_t thread
= current_thread();
1810 boolean_t is64bit
= thread_is_64bit(thread
);
1811 size_t trace_size_bytes
= 0, lr_size
;
1812 vm_address_t frame_addr
; // Should really by mach_vm_offset_t...
1818 if (get_threadtask(thread
) == kernel_task
) {
1819 panic("%s: Should never be called from a kernel thread.", __FUNCTION__
);
1822 frame_addr
= get_saved_state_fp(thread
->machine
.upcb
);
1825 lr_size
= sizeof(frame
[1]);
1827 *((uint64_t*)buffer
) = get_saved_state_pc(thread
->machine
.upcb
);
1828 trace_size_bytes
= lr_size
;
1830 while (trace_size_bytes
+ lr_size
< bufsize
) {
1831 if (!(frame_addr
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
)) {
1835 if (0 != copyinframe(frame_addr
, (char*)frame
, TRUE
)) {
1839 *((uint64_t*)(buffer
+ trace_size_bytes
)) = frame
[1]; /* lr */
1840 frame_addr
= frame
[0];
1841 trace_size_bytes
+= lr_size
;
1843 if (frame
[0] == 0x0ULL
) {
1849 lr_size
= sizeof(frame
[1]);
1851 *((uint32_t*)buffer
) = (uint32_t)get_saved_state_pc(thread
->machine
.upcb
);
1852 trace_size_bytes
= lr_size
;
1854 while (trace_size_bytes
+ lr_size
< bufsize
) {
1855 if (!(frame_addr
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
)) {
1859 if (0 != copyinframe(frame_addr
, (char*)frame
, FALSE
)) {
1863 *((uint32_t*)(buffer
+ trace_size_bytes
)) = frame
[1]; /* lr */
1864 frame_addr
= frame
[0];
1865 trace_size_bytes
+= lr_size
;
1867 if (frame
[0] == 0x0ULL
) {
1873 return trace_size_bytes
;
1877 ml_delay_should_spin(uint64_t interval
)
1879 cpu_data_t
*cdp
= getCpuDatap();
1881 if (cdp
->cpu_idle_latency
) {
1882 return (interval
< cdp
->cpu_idle_latency
) ? TRUE
: FALSE
;
1885 * Early boot, latency is unknown. Err on the side of blocking,
1886 * which should always be safe, even if slow
1892 boolean_t
ml_thread_is64bit(thread_t thread
) {
1893 return (thread_is_64bit(thread
));
1896 void ml_timer_evaluate(void) {
1900 ml_timer_forced_evaluation(void) {
1905 ml_energy_stat(thread_t t
) {
1906 return t
->machine
.energy_estimate_nj
;
1911 ml_gpu_stat_update(__unused
uint64_t gpu_ns_delta
) {
1914 * For now: update the resource coalition stats of the
1915 * current thread's coalition
1917 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta
);
1922 ml_gpu_stat(__unused thread_t t
) {
1926 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1928 timer_state_event(boolean_t switch_to_kernel
)
1930 thread_t thread
= current_thread();
1931 if (!thread
->precise_user_kernel_time
) return;
1933 processor_data_t
*pd
= &getCpuDatap()->cpu_processor
->processor_data
;
1934 uint64_t now
= ml_get_timebase();
1936 timer_stop(pd
->current_state
, now
);
1937 pd
->current_state
= (switch_to_kernel
) ? &pd
->system_state
: &pd
->user_state
;
1938 timer_start(pd
->current_state
, now
);
1940 timer_stop(pd
->thread_timer
, now
);
1941 pd
->thread_timer
= (switch_to_kernel
) ? &thread
->system_timer
: &thread
->user_timer
;
1942 timer_start(pd
->thread_timer
, now
);
1946 timer_state_event_user_to_kernel(void)
1948 timer_state_event(TRUE
);
1952 timer_state_event_kernel_to_user(void)
1954 timer_state_event(FALSE
);
1956 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1959 * The following are required for parts of the kernel
1960 * that cannot resolve these functions as inlines:
1962 extern thread_t
current_act(void);
1966 return current_thread_fast();
1969 #undef current_thread
1970 extern thread_t
current_thread(void);
1972 current_thread(void)
1974 return current_thread_fast();
1984 ex_cb_info_t ex_cb_info
[EXCB_CLASS_MAX
];
1987 * Callback registration
1988 * Currently we support only one registered callback per class but
1989 * it should be possible to support more callbacks
1991 kern_return_t
ex_cb_register(
1992 ex_cb_class_t cb_class
,
1996 ex_cb_info_t
*pInfo
= &ex_cb_info
[cb_class
];
1998 if ((NULL
== cb
) || (cb_class
>= EXCB_CLASS_MAX
))
2000 return KERN_INVALID_VALUE
;
2003 if (NULL
== pInfo
->cb
)
2006 pInfo
->refcon
= refcon
;
2007 return KERN_SUCCESS
;
2009 return KERN_FAILURE
;
2013 * Called internally by platform kernel to invoke the registered callback for class
2015 ex_cb_action_t
ex_cb_invoke(
2016 ex_cb_class_t cb_class
,
2019 ex_cb_info_t
*pInfo
= &ex_cb_info
[cb_class
];
2020 ex_cb_state_t state
= {far
};
2022 if (cb_class
>= EXCB_CLASS_MAX
)
2024 panic("Invalid exception callback class 0x%x\n", cb_class
);
2029 return pInfo
->cb(cb_class
, pInfo
->refcon
, &state
);
2031 return EXCB_ACTION_NONE
;