2 * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm64/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/caches_internal.h>
37 #include <arm/misc_protos.h>
38 #include <arm/machdep_call.h>
39 #include <arm/rtclock.h>
40 #include <console/serial_protos.h>
41 #include <kern/machine.h>
42 #include <prng/random.h>
43 #include <kern/startup.h>
44 #include <kern/thread.h>
45 #include <mach/machine.h>
46 #include <machine/atomic.h>
48 #include <vm/vm_page.h>
49 #include <sys/kdebug.h>
50 #include <kern/coalition.h>
51 #include <pexpert/device_tree.h>
53 #include <IOKit/IOPlatformExpert.h>
54 #include <libkern/section_keywords.h>
56 #if defined(KERNEL_INTEGRITY_KTRR)
57 #include <libkern/kernel_mach_header.h>
65 static int max_cpus_initialized
= 0;
66 #define MAX_CPUS_SET 0x1
67 #define MAX_CPUS_WAIT 0x2
70 uint32_t LockTimeOutUsec
;
72 boolean_t is_clock_configured
= FALSE
;
74 extern int mach_assert
;
75 extern volatile uint32_t debug_enabled
;
76 SECURITY_READ_ONLY_LATE(unsigned int) debug_boot_arg
;
79 void machine_conf(void);
81 thread_t
Idle_context(void);
83 static uint32_t cpu_phys_ids
[MAX_CPUS
] = {[0 ... MAX_CPUS
- 1] = (uint32_t)-1};
84 static unsigned int avail_cpus
= 0;
85 static int boot_cpu
= -1;
86 static int max_cpu_number
= 0;
87 cluster_type_t boot_cluster
= CLUSTER_TYPE_SMP
;
89 lockdown_handler_t lockdown_handler
;
91 lck_mtx_t lockdown_handler_lck
;
92 lck_grp_t
*lockdown_handler_grp
;
95 void ml_lockdown_init(void);
96 void ml_lockdown_run_handler(void);
97 uint32_t get_arm_cpu_version(void);
100 void ml_cpu_signal(unsigned int cpu_id __unused
)
102 panic("Platform does not support ACC Fast IPI");
105 void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs
) {
107 panic("Platform does not support ACC Fast IPI");
110 uint64_t ml_cpu_signal_deferred_get_timer() {
114 void ml_cpu_signal_deferred(unsigned int cpu_id __unused
)
116 panic("Platform does not support ACC Fast IPI deferral");
119 void ml_cpu_signal_retract(unsigned int cpu_id __unused
)
121 panic("Platform does not support ACC Fast IPI retraction");
124 void machine_idle(void)
126 __asm__
volatile ("msr DAIFSet, %[mask]" ::[mask
] "i" (DAIFSC_IRQF
| DAIFSC_FIQF
));
128 __asm__
volatile ("msr DAIFClr, %[mask]" ::[mask
] "i" (DAIFSC_IRQF
| DAIFSC_FIQF
));
136 boolean_t
get_vfp_enabled(void)
141 void OSSynchronizeIO(void)
143 __builtin_arm_dsb(DSB_SY
);
146 uint64_t get_aux_control(void)
150 MRS(value
, "ACTLR_EL1");
154 uint64_t get_mmu_control(void)
158 MRS(value
, "SCTLR_EL1");
162 uint64_t get_tcr(void)
166 MRS(value
, "TCR_EL1");
170 boolean_t
ml_get_interrupts_enabled(void)
175 if (value
& DAIF_IRQF
)
180 pmap_paddr_t
get_mmu_ttb(void)
184 MRS(value
, "TTBR0_EL1");
189 void set_mmu_ttb(pmap_paddr_t value
)
191 __builtin_arm_dsb(DSB_ISH
);
192 MSR("TTBR0_EL1", value
);
193 __builtin_arm_isb(ISB_SY
);
196 static uint32_t get_midr_el1(void)
200 MRS(value
, "MIDR_EL1");
202 /* This is a 32-bit register. */
203 return (uint32_t) value
;
206 uint32_t get_arm_cpu_version(void)
208 uint32_t value
= get_midr_el1();
210 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
211 return ((value
& MIDR_EL1_REV_MASK
) >> MIDR_EL1_REV_SHIFT
) | ((value
& MIDR_EL1_VAR_MASK
) >> (MIDR_EL1_VAR_SHIFT
- 4));
215 * user_cont_hwclock_allowed()
217 * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0)
218 * as a continuous time source (e.g. from mach_continuous_time)
220 boolean_t
user_cont_hwclock_allowed(void)
226 * user_timebase_allowed()
228 * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0).
230 boolean_t
user_timebase_allowed(void)
235 boolean_t
arm64_wfe_allowed(void)
240 #if defined(KERNEL_INTEGRITY_KTRR)
242 uint64_t rorgn_begin
__attribute__((section("__DATA, __const"))) = 0;
243 uint64_t rorgn_end
__attribute__((section("__DATA, __const"))) = 0;
244 vm_offset_t amcc_base
;
246 static void assert_unlocked(void);
247 static void assert_amcc_cache_disabled(void);
248 static void lock_amcc(void);
249 static void lock_mmu(uint64_t begin
, uint64_t end
);
251 void rorgn_stash_range(void)
254 #if DEVELOPMENT || DEBUG
255 boolean_t rorgn_disable
= FALSE
;
257 PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable
, sizeof(rorgn_disable
));
260 /* take early out if boot arg present, don't query any machine registers to avoid
261 * dependency on amcc DT entry
267 /* Get the AMC values, and stash them into rorgn_begin, rorgn_end. */
269 #if defined(KERNEL_INTEGRITY_KTRR)
270 uint64_t soc_base
= 0;
271 DTEntry entryP
= NULL
;
272 uintptr_t *reg_prop
= NULL
;
273 uint32_t prop_size
= 0;
276 soc_base
= pe_arm_get_soc_base_phys();
277 rc
= DTFindEntry("name", "mcc", &entryP
);
278 assert(rc
== kSuccess
);
279 rc
= DTGetProperty(entryP
, "reg", (void **)®_prop
, &prop_size
);
280 assert(rc
== kSuccess
);
281 amcc_base
= ml_io_map(soc_base
+ *reg_prop
, *(reg_prop
+ 1));
283 #error "KERNEL_INTEGRITY config error"
286 #if defined(KERNEL_INTEGRITY_KTRR)
287 assert(rRORGNENDADDR
> rRORGNBASEADDR
);
288 rorgn_begin
= (rRORGNBASEADDR
<< ARM_PGSHIFT
) + gPhysBase
;
289 rorgn_end
= (rRORGNENDADDR
<< ARM_PGSHIFT
) + gPhysBase
;
291 #error KERNEL_INTEGRITY config error
292 #endif /* defined (KERNEL_INTEGRITY_KTRR) */
295 static void assert_unlocked() {
296 uint64_t ktrr_lock
= 0;
297 uint32_t rorgn_lock
= 0;
300 #if defined(KERNEL_INTEGRITY_KTRR)
301 rorgn_lock
= rRORGNLOCK
;
302 ktrr_lock
= __builtin_arm_rsr64(ARM64_REG_KTRR_LOCK_EL1
);
304 #error KERNEL_INTEGRITY config error
305 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
311 static void lock_amcc() {
312 #if defined(KERNEL_INTEGRITY_KTRR)
314 __builtin_arm_isb(ISB_SY
);
316 #error KERNEL_INTEGRITY config error
320 static void lock_mmu(uint64_t begin
, uint64_t end
) {
322 #if defined(KERNEL_INTEGRITY_KTRR)
324 __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1
, begin
);
325 __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1
, end
);
326 __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1
, 1ULL);
330 __builtin_arm_isb(ISB_SY
);
334 #error KERNEL_INTEGRITY config error
339 static void assert_amcc_cache_disabled() {
340 #if defined(KERNEL_INTEGRITY_KTRR)
341 assert((rMCCGEN
& 1) == 0); /* assert M$ disabled or LLC clean will be unreliable */
343 #error KERNEL_INTEGRITY config error
348 * void rorgn_lockdown(void)
350 * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked
352 * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in
353 * start.s:start_cpu() for subsequent wake/resume of all cores
355 void rorgn_lockdown(void)
357 vm_offset_t ktrr_begin
, ktrr_end
;
358 unsigned long plt_segsz
, last_segsz
;
360 #if DEVELOPMENT || DEBUG
361 boolean_t ktrr_disable
= FALSE
;
363 PE_parse_boot_argn("-unsafe_kernel_text", &ktrr_disable
, sizeof(ktrr_disable
));
367 * take early out if boot arg present, since we may not have amcc DT entry present
368 * we can't assert that iboot hasn't programmed the RO region lockdown registers
372 #endif /* DEVELOPMENT || DEBUG */
376 /* [x] - Use final method of determining all kernel text range or expect crashes */
378 ktrr_begin
= (uint64_t) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_TEXT", &plt_segsz
);
379 assert(ktrr_begin
&& gVirtBase
&& gPhysBase
);
381 ktrr_begin
= kvtophys(ktrr_begin
);
383 /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC KTRR region) */
384 ktrr_end
= (uint64_t) getsegdatafromheader(&_mh_execute_header
, "__LAST", &last_segsz
);
385 ktrr_end
= (kvtophys(ktrr_end
) - 1) & ~PAGE_MASK
;
387 /* ensure that iboot and xnu agree on the ktrr range */
388 assert(rorgn_begin
== ktrr_begin
&& rorgn_end
== (ktrr_end
+ last_segsz
));
389 /* assert that __LAST segment containing privileged insns is only a single page */
390 assert(last_segsz
== PAGE_SIZE
);
393 printf("KTRR Begin: %p End: %p, setting lockdown\n", (void *)ktrr_begin
, (void *)ktrr_end
);
396 /* [x] - ensure all in flight writes are flushed to AMCC before enabling RO Region Lock */
398 assert_amcc_cache_disabled();
400 CleanPoC_DcacheRegion_Force(phystokv(ktrr_begin
),
401 (unsigned)((ktrr_end
+ last_segsz
) - ktrr_begin
+ PAGE_MASK
));
405 lock_mmu(ktrr_begin
, ktrr_end
);
407 #if DEVELOPMENT || DEBUG
411 /* now we can run lockdown handler */
412 ml_lockdown_run_handler();
415 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
418 machine_startup(__unused boot_args
* args
)
424 if (PE_parse_boot_argn("debug", &debug_boot_arg
, sizeof (debug_boot_arg
)) &&
426 if (debug_boot_arg
& DB_HALT
)
427 halt_in_debugger
= 1;
428 if (debug_boot_arg
& DB_NMI
)
429 panicDebugging
= TRUE
;
436 PE_parse_boot_argn("assert", &mach_assert
, sizeof (mach_assert
));
438 if (PE_parse_boot_argn("preempt", &boot_arg
, sizeof (boot_arg
))) {
439 default_preemption_rate
= boot_arg
;
441 if (PE_parse_boot_argn("bg_preempt", &boot_arg
, sizeof (boot_arg
))) {
442 default_bg_preemption_rate
= boot_arg
;
448 * Kick off the kernel bootstrap.
454 void machine_lockdown_preflight(void)
456 #if CONFIG_KERNEL_INTEGRITY
458 #if defined(KERNEL_INTEGRITY_KTRR)
465 void machine_lockdown(void)
467 #if CONFIG_KERNEL_INTEGRITY
468 #if KERNEL_INTEGRITY_WT
471 * Notify the monitor about the completion of early kernel bootstrap.
472 * From this point forward it will enforce the integrity of kernel text,
473 * rodata and page tables.
477 monitor_call(MONITOR_LOCKDOWN
, 0, 0, 0);
479 #endif /* KERNEL_INTEGRITY_WT */
482 #if defined(KERNEL_INTEGRITY_KTRR)
485 * Lock physical KTRR region. KTRR region is read-only. Memory outside
486 * the region is not executable at EL1.
490 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
493 #endif /* CONFIG_KERNEL_INTEGRITY */
499 __unused vm_size_t size
)
501 return (PE_boot_args());
508 * This is known to be inaccurate. mem_size should always be capped at 2 GB
510 machine_info
.memory_size
= (uint32_t)mem_size
;
518 is_clock_configured
= TRUE
;
524 slave_machine_init(__unused
void *param
)
526 cpu_machine_init(); /* Initialize the processor */
527 clock_init(); /* Init the clock */
531 * Routine: machine_processor_shutdown
535 machine_processor_shutdown(
536 __unused thread_t thread
,
537 void (*doshutdown
) (processor_t
),
538 processor_t processor
)
540 return (Shutdown_context(doshutdown
, processor
));
544 * Routine: ml_init_max_cpus
548 ml_init_max_cpus(unsigned int max_cpus
)
550 boolean_t current_state
;
552 current_state
= ml_set_interrupts_enabled(FALSE
);
553 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
554 machine_info
.max_cpus
= max_cpus
;
555 machine_info
.physical_cpu_max
= max_cpus
;
556 machine_info
.logical_cpu_max
= max_cpus
;
557 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
558 thread_wakeup((event_t
) & max_cpus_initialized
);
559 max_cpus_initialized
= MAX_CPUS_SET
;
561 (void) ml_set_interrupts_enabled(current_state
);
565 * Routine: ml_get_max_cpus
569 ml_get_max_cpus(void)
571 boolean_t current_state
;
573 current_state
= ml_set_interrupts_enabled(FALSE
);
574 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
575 max_cpus_initialized
= MAX_CPUS_WAIT
;
576 assert_wait((event_t
) & max_cpus_initialized
, THREAD_UNINT
);
577 (void) thread_block(THREAD_CONTINUE_NULL
);
579 (void) ml_set_interrupts_enabled(current_state
);
580 return (machine_info
.max_cpus
);
584 * Routine: ml_init_lock_timeout
588 ml_init_lock_timeout(void)
592 uint64_t default_timeout_ns
= NSEC_PER_SEC
>>2;
595 if (PE_parse_boot_argn("slto_us", &slto
, sizeof (slto
)))
596 default_timeout_ns
= slto
* NSEC_PER_USEC
;
598 nanoseconds_to_absolutetime(default_timeout_ns
, &abstime
);
599 LockTimeOutUsec
= (uint32_t)(abstime
/ NSEC_PER_USEC
);
600 LockTimeOut
= (uint32_t)abstime
;
602 if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof (mtxspin
))) {
603 if (mtxspin
> USEC_PER_SEC
>>4)
604 mtxspin
= USEC_PER_SEC
>>4;
605 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
607 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
613 * This is called from the machine-independent routine cpu_up()
614 * to perform machine-dependent info updates.
619 hw_atomic_add(&machine_info
.physical_cpu
, 1);
620 hw_atomic_add(&machine_info
.logical_cpu
, 1);
624 * This is called from the machine-independent routine cpu_down()
625 * to perform machine-dependent info updates.
630 cpu_data_t
*cpu_data_ptr
;
632 hw_atomic_sub(&machine_info
.physical_cpu
, 1);
633 hw_atomic_sub(&machine_info
.logical_cpu
, 1);
636 * If we want to deal with outstanding IPIs, we need to
637 * do relatively early in the processor_doshutdown path,
638 * as we pend decrementer interrupts using the IPI
639 * mechanism if we cannot immediately service them (if
640 * IRQ is masked). Do so now.
642 * We aren't on the interrupt stack here; would it make
643 * more sense to disable signaling and then enable
644 * interrupts? It might be a bit cleaner.
646 cpu_data_ptr
= getCpuDatap();
647 cpu_data_ptr
->cpu_running
= FALSE
;
648 cpu_signal_handler_internal(TRUE
);
652 * Routine: ml_cpu_get_info
656 ml_cpu_get_info(ml_cpu_info_t
* ml_cpu_info
)
658 cache_info_t
*cpuid_cache_info
;
660 cpuid_cache_info
= cache_info();
661 ml_cpu_info
->vector_unit
= 0;
662 ml_cpu_info
->cache_line_size
= cpuid_cache_info
->c_linesz
;
663 ml_cpu_info
->l1_icache_size
= cpuid_cache_info
->c_isize
;
664 ml_cpu_info
->l1_dcache_size
= cpuid_cache_info
->c_dsize
;
666 #if (__ARM_ARCH__ >= 7)
667 ml_cpu_info
->l2_settings
= 1;
668 ml_cpu_info
->l2_cache_size
= cpuid_cache_info
->c_l2size
;
670 ml_cpu_info
->l2_settings
= 0;
671 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
673 ml_cpu_info
->l3_settings
= 0;
674 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
678 ml_get_machine_mem(void)
680 return (machine_info
.memory_size
);
683 __attribute__((noreturn
))
685 halt_all_cpus(boolean_t reboot
)
688 printf("MACH Reboot\n");
689 PEHaltRestart(kPERestartCPU
);
691 printf("CPU halted\n");
692 PEHaltRestart(kPEHaltCPU
);
697 __attribute__((noreturn
))
701 halt_all_cpus(FALSE
);
705 * Routine: machine_signal_idle
710 processor_t processor
)
712 cpu_signal(processor_to_cpu_datap(processor
), SIGPnop
, (void *)NULL
, (void *)NULL
);
713 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
717 machine_signal_idle_deferred(
718 processor_t processor
)
720 cpu_signal_deferred(processor_to_cpu_datap(processor
));
721 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_DEFERRED_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
725 machine_signal_idle_cancel(
726 processor_t processor
)
728 cpu_signal_cancel(processor_to_cpu_datap(processor
));
729 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_CANCEL_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
733 * Routine: ml_install_interrupt_handler
734 * Function: Initialize Interrupt Handler
737 ml_install_interrupt_handler(
741 IOInterruptHandler handler
,
744 cpu_data_t
*cpu_data_ptr
;
745 boolean_t current_state
;
747 current_state
= ml_set_interrupts_enabled(FALSE
);
748 cpu_data_ptr
= getCpuDatap();
750 cpu_data_ptr
->interrupt_nub
= nub
;
751 cpu_data_ptr
->interrupt_source
= source
;
752 cpu_data_ptr
->interrupt_target
= target
;
753 cpu_data_ptr
->interrupt_handler
= handler
;
754 cpu_data_ptr
->interrupt_refCon
= refCon
;
756 cpu_data_ptr
->interrupts_enabled
= TRUE
;
757 (void) ml_set_interrupts_enabled(current_state
);
759 initialize_screen(NULL
, kPEAcquireScreen
);
763 * Routine: ml_init_interrupt
764 * Function: Initialize Interrupts
767 ml_init_interrupt(void)
772 * Routine: ml_init_timebase
773 * Function: register and setup Timebase, Decremeter services
775 void ml_init_timebase(
778 vm_offset_t int_address
,
779 vm_offset_t int_value __unused
)
781 cpu_data_t
*cpu_data_ptr
;
783 cpu_data_ptr
= (cpu_data_t
*)args
;
785 if ((cpu_data_ptr
== &BootCpuData
)
786 && (rtclock_timebase_func
.tbd_fiq_handler
== (void *)NULL
)) {
787 rtclock_timebase_func
= *tbd_funcs
;
788 rtclock_timebase_addr
= int_address
;
793 ml_parse_cpu_topology(void)
795 DTEntry entry
, child __unused
;
796 OpaqueDTEntryIterator iter
;
797 uint32_t cpu_boot_arg
;
800 cpu_boot_arg
= MAX_CPUS
;
802 PE_parse_boot_argn("cpus", &cpu_boot_arg
, sizeof(cpu_boot_arg
));
804 err
= DTLookupEntry(NULL
, "/cpus", &entry
);
805 assert(err
== kSuccess
);
807 err
= DTInitEntryIterator(entry
, &iter
);
808 assert(err
== kSuccess
);
810 while (kSuccess
== DTIterateEntries(&iter
, &child
)) {
811 unsigned int propSize
;
813 int cpu_id
= avail_cpus
++;
815 if (kSuccess
== DTGetProperty(child
, "cpu-id", &prop
, &propSize
))
816 cpu_id
= *((int32_t*)prop
);
818 assert(cpu_id
< MAX_CPUS
);
819 assert(cpu_phys_ids
[cpu_id
] == (uint32_t)-1);
821 if (boot_cpu
== -1) {
822 if (kSuccess
!= DTGetProperty(child
, "state", &prop
, &propSize
))
823 panic("unable to retrieve state for cpu %d", cpu_id
);
825 if (strncmp((char*)prop
, "running", propSize
) == 0) {
829 if (kSuccess
!= DTGetProperty(child
, "reg", &prop
, &propSize
))
830 panic("unable to retrieve physical ID for cpu %d", cpu_id
);
832 cpu_phys_ids
[cpu_id
] = *((uint32_t*)prop
);
834 if ((cpu_id
> max_cpu_number
) && ((cpu_id
== boot_cpu
) || (avail_cpus
<= cpu_boot_arg
)))
835 max_cpu_number
= cpu_id
;
838 if (avail_cpus
> cpu_boot_arg
)
839 avail_cpus
= cpu_boot_arg
;
842 panic("No cpus found!");
845 panic("unable to determine boot cpu!");
849 ml_get_cpu_count(void)
855 ml_get_boot_cpu_number(void)
861 ml_get_boot_cluster(void)
867 ml_get_cpu_number(uint32_t phys_id
)
869 for (int log_id
= 0; log_id
<= ml_get_max_cpu_number(); ++log_id
) {
870 if (cpu_phys_ids
[log_id
] == phys_id
)
877 ml_get_max_cpu_number(void)
879 return max_cpu_number
;
883 void ml_lockdown_init() {
884 lockdown_handler_grp
= lck_grp_alloc_init("lockdown_handler", NULL
);
885 assert(lockdown_handler_grp
!= NULL
);
887 lck_mtx_init(&lockdown_handler_lck
, lockdown_handler_grp
, NULL
);
891 ml_lockdown_handler_register(lockdown_handler_t f
, void *this)
893 if (lockdown_handler
|| !f
) {
897 lck_mtx_lock(&lockdown_handler_lck
);
898 lockdown_handler
= f
;
899 lockdown_this
= this;
901 #if !(defined(KERNEL_INTEGRITY_KTRR))
903 lockdown_handler(this);
906 lockdown_handler(this);
909 lck_mtx_unlock(&lockdown_handler_lck
);
914 void ml_lockdown_run_handler() {
915 lck_mtx_lock(&lockdown_handler_lck
);
916 assert(!lockdown_done
);
919 if (lockdown_handler
) {
920 lockdown_handler(lockdown_this
);
922 lck_mtx_unlock(&lockdown_handler_lck
);
926 ml_processor_register(
927 ml_processor_info_t
* in_processor_info
,
928 processor_t
* processor_out
,
929 ipi_handler_t
* ipi_handler
)
931 cpu_data_t
*this_cpu_datap
;
932 processor_set_t pset
;
933 boolean_t is_boot_cpu
;
934 static unsigned int reg_cpu_count
= 0;
936 if (in_processor_info
->log_id
> (uint32_t)ml_get_max_cpu_number())
939 if ((unsigned int)OSIncrementAtomic((SInt32
*)®_cpu_count
) >= avail_cpus
)
942 if (in_processor_info
->log_id
!= (uint32_t)ml_get_boot_cpu_number()) {
944 this_cpu_datap
= cpu_data_alloc(FALSE
);
945 cpu_data_init(this_cpu_datap
);
947 this_cpu_datap
= &BootCpuData
;
951 assert(in_processor_info
->log_id
< MAX_CPUS
);
953 this_cpu_datap
->cpu_id
= in_processor_info
->cpu_id
;
955 this_cpu_datap
->cpu_chud
= chudxnu_cpu_alloc(is_boot_cpu
);
956 if (this_cpu_datap
->cpu_chud
== (void *)NULL
)
957 goto processor_register_error
;
958 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(is_boot_cpu
);
959 if (this_cpu_datap
->cpu_console_buf
== (void *)(NULL
))
960 goto processor_register_error
;
963 this_cpu_datap
->cpu_number
= in_processor_info
->log_id
;
965 if (cpu_data_register(this_cpu_datap
) != KERN_SUCCESS
)
966 goto processor_register_error
;
969 this_cpu_datap
->cpu_idle_notify
= (void *) in_processor_info
->processor_idle
;
970 this_cpu_datap
->cpu_cache_dispatch
= in_processor_info
->platform_cache_dispatch
;
971 nanoseconds_to_absolutetime((uint64_t) in_processor_info
->powergate_latency
, &this_cpu_datap
->cpu_idle_latency
);
972 this_cpu_datap
->cpu_reset_assist
= kvtophys(in_processor_info
->powergate_stub_addr
);
974 this_cpu_datap
->idle_timer_notify
= (void *) in_processor_info
->idle_timer
;
975 this_cpu_datap
->idle_timer_refcon
= in_processor_info
->idle_timer_refcon
;
977 this_cpu_datap
->platform_error_handler
= (void *) in_processor_info
->platform_error_handler
;
978 this_cpu_datap
->cpu_regmap_paddr
= in_processor_info
->regmap_paddr
;
979 this_cpu_datap
->cpu_phys_id
= in_processor_info
->phys_id
;
980 this_cpu_datap
->cpu_l2_access_penalty
= in_processor_info
->l2_access_penalty
;
982 this_cpu_datap
->cpu_cluster_type
= in_processor_info
->cluster_type
;
983 this_cpu_datap
->cpu_cluster_id
= in_processor_info
->cluster_id
;
984 this_cpu_datap
->cpu_l2_id
= in_processor_info
->l2_cache_id
;
985 this_cpu_datap
->cpu_l2_size
= in_processor_info
->l2_cache_size
;
986 this_cpu_datap
->cpu_l3_id
= in_processor_info
->l3_cache_id
;
987 this_cpu_datap
->cpu_l3_size
= in_processor_info
->l3_cache_size
;
989 this_cpu_datap
->cluster_master
= is_boot_cpu
;
991 pset
= pset_find(in_processor_info
->cluster_id
, processor_pset(master_processor
));
992 assert(pset
!= NULL
);
993 kprintf("%s>cpu_id %p cluster_id %d cpu_number %d is type %d\n", __FUNCTION__
, in_processor_info
->cpu_id
, in_processor_info
->cluster_id
, this_cpu_datap
->cpu_number
, in_processor_info
->cluster_type
);
996 processor_init((struct processor
*)this_cpu_datap
->cpu_processor
,
997 this_cpu_datap
->cpu_number
, pset
);
999 if (this_cpu_datap
->cpu_l2_access_penalty
) {
1001 * Cores that have a non-zero L2 access penalty compared
1002 * to the boot processor should be de-prioritized by the
1003 * scheduler, so that threads use the cores with better L2
1006 processor_set_primary(this_cpu_datap
->cpu_processor
,
1011 *processor_out
= this_cpu_datap
->cpu_processor
;
1012 *ipi_handler
= cpu_signal_handler
;
1013 if (in_processor_info
->idle_tickle
!= (idle_tickle_t
*) NULL
)
1014 *in_processor_info
->idle_tickle
= (idle_tickle_t
) cpu_idle_tickle
;
1017 if (kpc_register_cpu(this_cpu_datap
) != TRUE
)
1018 goto processor_register_error
;
1022 prng_cpu_init(this_cpu_datap
->cpu_number
);
1023 // now let next CPU register itself
1024 OSIncrementAtomic((SInt32
*)&real_ncpus
);
1027 return KERN_SUCCESS
;
1029 processor_register_error
:
1031 kpc_unregister_cpu(this_cpu_datap
);
1033 if (this_cpu_datap
->cpu_chud
!= (void *)NULL
)
1034 chudxnu_cpu_free(this_cpu_datap
->cpu_chud
);
1036 cpu_data_free(this_cpu_datap
);
1038 return KERN_FAILURE
;
1042 ml_init_arm_debug_interface(
1043 void * in_cpu_datap
,
1044 vm_offset_t virt_address
)
1046 ((cpu_data_t
*)in_cpu_datap
)->cpu_debug_interface_map
= virt_address
;
1051 * Routine: init_ast_check
1056 __unused processor_t processor
)
1061 * Routine: cause_ast_check
1066 processor_t processor
)
1068 if (current_processor() != processor
) {
1069 cpu_signal(processor_to_cpu_datap(processor
), SIGPast
, (void *)NULL
, (void *)NULL
);
1070 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 1 /* ast */, 0, 0, 0);
1076 * Routine: ml_at_interrupt_context
1077 * Function: Check if running at interrupt context
1080 ml_at_interrupt_context(void)
1083 vm_offset_t intstack_top_ptr
;
1085 intstack_top_ptr
= getCpuDatap()->intstack_top
;
1086 return (((vm_offset_t
)(&local
) < intstack_top_ptr
) && ((vm_offset_t
)(&local
) > (intstack_top_ptr
- INTSTACK_SIZE
)));
1088 extern uint32_t cpu_idle_count
;
1090 void ml_get_power_state(boolean_t
*icp
, boolean_t
*pidlep
) {
1091 *icp
= ml_at_interrupt_context();
1092 *pidlep
= (cpu_idle_count
== real_ncpus
);
1096 * Routine: ml_cause_interrupt
1097 * Function: Generate a fake interrupt
1100 ml_cause_interrupt(void)
1102 return; /* BS_XXX */
1105 /* Map memory map IO space */
1108 vm_offset_t phys_addr
,
1111 return (io_map(phys_addr
, size
, VM_WIMG_IO
));
1116 vm_offset_t phys_addr
,
1119 return (io_map(phys_addr
, size
, VM_WIMG_WCOMB
));
1122 /* boot memory allocation */
1125 __unused vm_size_t size
)
1127 return ((vm_offset_t
) NULL
);
1132 vm_offset_t phys_addr
,
1135 return pmap_map_high_window_bd(phys_addr
, len
, VM_PROT_READ
| VM_PROT_WRITE
);
1142 return phystokv(paddr
);
1149 if (((vm_address_t
)(vaddr
) - gVirtBase
) >= gPhysSize
)
1150 panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr
);
1151 return ((vm_address_t
)(vaddr
) - gVirtBase
+ gPhysBase
);
1156 vm_offset_t vaddr
, /* kernel virtual address */
1160 pt_entry_t arm_prot
= 0;
1161 pt_entry_t arm_block_prot
= 0;
1162 vm_offset_t vaddr_cur
;
1164 kern_return_t result
= KERN_SUCCESS
;
1166 if (vaddr
< VM_MIN_KERNEL_ADDRESS
) {
1167 panic("ml_static_protect(): %p < %p", (void *) vaddr
, (void *) VM_MIN_KERNEL_ADDRESS
);
1168 return KERN_FAILURE
;
1171 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
1173 if ((new_prot
& VM_PROT_WRITE
) && (new_prot
& VM_PROT_EXECUTE
)) {
1174 panic("ml_static_protect(): WX request on %p", (void *) vaddr
);
1177 /* Set up the protection bits, and block bits so we can validate block mappings. */
1178 if (new_prot
& VM_PROT_WRITE
) {
1179 arm_prot
|= ARM_PTE_AP(AP_RWNA
);
1180 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RWNA
);
1182 arm_prot
|= ARM_PTE_AP(AP_RONA
);
1183 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RONA
);
1186 arm_prot
|= ARM_PTE_NX
;
1187 arm_block_prot
|= ARM_TTE_BLOCK_NX
;
1189 if (!(new_prot
& VM_PROT_EXECUTE
)) {
1190 arm_prot
|= ARM_PTE_PNX
;
1191 arm_block_prot
|= ARM_TTE_BLOCK_PNX
;
1194 for (vaddr_cur
= vaddr
;
1195 vaddr_cur
< trunc_page_64(vaddr
+ size
);
1196 vaddr_cur
+= PAGE_SIZE
) {
1197 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
1198 if (ppn
!= (vm_offset_t
) NULL
) {
1199 #if __ARM64_TWO_LEVEL_PMAP__
1202 tt_entry_t
*tte1
, *tte2
;
1208 #if __ARM64_TWO_LEVEL_PMAP__
1209 tte2
= &kernel_pmap
->tte
[(((vaddr_cur
) & ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
)];
1211 tte1
= &kernel_pmap
->tte
[(((vaddr_cur
) & ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
)];
1212 tte2
= &((tt_entry_t
*) phystokv((*tte1
) & ARM_TTE_TABLE_MASK
))[(((vaddr_cur
) & ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
)];
1215 if (((*tte2
) & ARM_TTE_TYPE_MASK
) != ARM_TTE_TYPE_TABLE
) {
1216 if ((((*tte2
) & ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_BLOCK
) &&
1217 ((*tte2
& (ARM_TTE_BLOCK_NXMASK
| ARM_TTE_BLOCK_PNXMASK
| ARM_TTE_BLOCK_APMASK
)) == arm_block_prot
)) {
1219 * We can support ml_static_protect on a block mapping if the mapping already has
1220 * the desired protections. We still want to run checks on a per-page basis.
1225 result
= KERN_FAILURE
;
1229 pte_p
= (pt_entry_t
*)&((tt_entry_t
*)(phystokv((*tte2
) & ARM_TTE_TABLE_MASK
)))[(((vaddr_cur
) & ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
)];
1232 if ((ptmp
& ARM_PTE_HINT_MASK
) && ((ptmp
& (ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) != arm_prot
)) {
1234 * The contiguous hint is similar to a block mapping for ml_static_protect; if the existing
1235 * protections do not match the desired protections, then we will fail (as we cannot update
1236 * this mapping without updating other mappings as well).
1238 result
= KERN_FAILURE
;
1242 __unreachable_ok_push
1243 if (TEST_PAGE_RATIO_4
) {
1246 pt_entry_t
*ptep_iter
;
1249 for (i
=0; i
<4; i
++, ptep_iter
++) {
1250 /* Note that there is a hole in the HINT sanity checking here. */
1253 /* We only need to update the page tables if the protections do not match. */
1254 if ((ptmp
& (ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) != arm_prot
) {
1255 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) | arm_prot
;
1260 #ifndef __ARM_L1_PTW__
1261 FlushPoC_DcacheRegion( trunc_page_32(pte_p
), 4*sizeof(*pte_p
));
1266 /* We only need to update the page tables if the protections do not match. */
1267 if ((ptmp
& (ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) != arm_prot
) {
1268 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_PNXMASK
| ARM_PTE_NXMASK
)) | arm_prot
;
1272 #ifndef __ARM_L1_PTW__
1273 FlushPoC_DcacheRegion( trunc_page_32(pte_p
), sizeof(*pte_p
));
1276 __unreachable_ok_pop
1280 if (vaddr_cur
> vaddr
) {
1281 assert(((vaddr_cur
- vaddr
) & 0xFFFFFFFF00000000ULL
) == 0);
1282 flush_mmu_tlb_region(vaddr
, (uint32_t)(vaddr_cur
- vaddr
));
1290 * Routine: ml_static_mfree
1298 vm_offset_t vaddr_cur
;
1300 uint32_t freed_pages
= 0;
1302 /* It is acceptable (if bad) to fail to free. */
1303 if (vaddr
< VM_MIN_KERNEL_ADDRESS
)
1306 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
1308 for (vaddr_cur
= vaddr
;
1309 vaddr_cur
< trunc_page_64(vaddr
+ size
);
1310 vaddr_cur
+= PAGE_SIZE
) {
1312 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
1313 if (ppn
!= (vm_offset_t
) NULL
) {
1315 * It is not acceptable to fail to update the protections on a page
1316 * we will release to the VM. We need to either panic or continue.
1317 * For now, we'll panic (to help flag if there is memory we can
1320 if (ml_static_protect(vaddr_cur
, PAGE_SIZE
, VM_PROT_WRITE
| VM_PROT_READ
) != KERN_SUCCESS
) {
1321 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur
);
1326 * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
1327 * relies on the persistence of these mappings for all time.
1329 // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
1332 vm_page_create(ppn
, (ppn
+ 1));
1336 vm_page_lockspin_queues();
1337 vm_page_wire_count
-= freed_pages
;
1338 vm_page_wire_count_initial
-= freed_pages
;
1339 vm_page_unlock_queues();
1341 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages
, (void *)vaddr
, (uint64_t)size
, ppn
);
1346 /* virtual to physical on wired pages */
1348 ml_vtophys(vm_offset_t vaddr
)
1350 return kvtophys(vaddr
);
1354 * Routine: ml_nofault_copy
1355 * Function: Perform a physical mode copy if the source and destination have
1356 * valid translations in the kernel pmap. If translations are present, they are
1357 * assumed to be wired; e.g., no attempt is made to guarantee that the
1358 * translations obtained remain valid for the duration of the copy process.
1361 ml_nofault_copy(vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
1363 addr64_t cur_phys_dst
, cur_phys_src
;
1364 vm_size_t count
, nbytes
= 0;
1367 if (!(cur_phys_src
= kvtophys(virtsrc
)))
1369 if (!(cur_phys_dst
= kvtophys(virtdst
)))
1371 if (!pmap_valid_address(trunc_page_64(cur_phys_dst
)) ||
1372 !pmap_valid_address(trunc_page_64(cur_phys_src
)))
1374 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
1375 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
)))
1376 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
1380 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
1392 * Routine: ml_validate_nofault
1393 * Function: Validate that ths address range has a valid translations
1394 * in the kernel pmap. If translations are present, they are
1395 * assumed to be wired; i.e. no attempt is made to guarantee
1396 * that the translation persist after the check.
1397 * Returns: TRUE if the range is mapped and will not cause a fault,
1401 boolean_t
ml_validate_nofault(
1402 vm_offset_t virtsrc
, vm_size_t size
)
1404 addr64_t cur_phys_src
;
1408 if (!(cur_phys_src
= kvtophys(virtsrc
)))
1410 if (!pmap_valid_address(trunc_page_64(cur_phys_src
)))
1412 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
1414 count
= (uint32_t)size
;
1424 ml_get_bouncepool_info(vm_offset_t
* phys_addr
, vm_size_t
* size
)
1431 active_rt_threads(__unused boolean_t active
)
1435 static void cpu_qos_cb_default(__unused
int urgency
, __unused
uint64_t qos_param1
, __unused
uint64_t qos_param2
) {
1439 cpu_qos_update_t cpu_qos_update
= cpu_qos_cb_default
;
1441 void cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb
) {
1442 if (cpu_qos_cb
!= NULL
) {
1443 cpu_qos_update
= cpu_qos_cb
;
1445 cpu_qos_update
= cpu_qos_cb_default
;
1450 thread_tell_urgency(int urgency
, uint64_t rt_period
, uint64_t rt_deadline
, uint64_t sched_latency __unused
, __unused thread_t nthread
)
1452 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_URGENCY
) | DBG_FUNC_START
, urgency
, rt_period
, rt_deadline
, sched_latency
, 0);
1454 cpu_qos_update(urgency
, rt_period
, rt_deadline
);
1456 SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_URGENCY
) | DBG_FUNC_END
, urgency
, rt_period
, rt_deadline
, 0, 0);
1460 machine_run_count(__unused
uint32_t count
)
1465 machine_choose_processor(__unused processor_set_t pset
, processor_t processor
)
1471 ml_stack_remaining(void)
1473 uintptr_t local
= (uintptr_t) &local
;
1475 if (ml_at_interrupt_context()) {
1476 return (local
- (getCpuDatap()->intstack_top
- INTSTACK_SIZE
));
1478 return (local
- current_thread()->kernel_stack
);
1483 vm_offset_t
ml_stack_base(void);
1484 vm_size_t
ml_stack_size(void);
1489 if (ml_at_interrupt_context()) {
1490 return getCpuDatap()->intstack_top
- INTSTACK_SIZE
;
1492 return current_thread()->kernel_stack
;
1498 if (ml_at_interrupt_context()) {
1499 return INTSTACK_SIZE
;
1501 return kernel_stack_size
;
1506 boolean_t
machine_timeout_suspended(void) {
1511 ml_interrupt_prewarm(__unused
uint64_t deadline
)
1513 return KERN_FAILURE
;
1517 * Assumes fiq, irq disabled.
1520 ml_set_decrementer(uint32_t dec_value
)
1522 cpu_data_t
*cdp
= getCpuDatap();
1524 assert(ml_get_interrupts_enabled() == FALSE
);
1525 cdp
->cpu_decrementer
= dec_value
;
1527 if (cdp
->cpu_set_decrementer_func
) {
1528 ((void (*)(uint32_t))cdp
->cpu_set_decrementer_func
)(dec_value
);
1530 __asm__
volatile("msr CNTP_TVAL_EL0, %0" : : "r"((uint64_t)dec_value
));
1534 uint64_t ml_get_hwclock()
1538 // ISB required by ARMV7C.b section B8.1.2 & ARMv8 section D6.1.2
1539 // "Reads of CNTPCT[_EL0] can occur speculatively and out of order relative
1540 // to other instructions executed on the same processor."
1541 __asm__
volatile("isb\n"
1542 "mrs %0, CNTPCT_EL0"
1551 return (ml_get_hwclock() + getCpuDatap()->cpu_base_timebase
);
1555 ml_get_decrementer()
1557 cpu_data_t
*cdp
= getCpuDatap();
1560 assert(ml_get_interrupts_enabled() == FALSE
);
1562 if (cdp
->cpu_get_decrementer_func
) {
1563 dec
= ((uint32_t (*)(void))cdp
->cpu_get_decrementer_func
)();
1567 __asm__
volatile("mrs %0, CNTP_TVAL_EL0" : "=r"(wide_val
));
1568 dec
= (uint32_t)wide_val
;
1569 assert(wide_val
== (uint64_t)dec
);
1576 ml_get_timer_pending()
1580 __asm__
volatile("mrs %0, CNTP_CTL_EL0" : "=r"(cntp_ctl
));
1581 return ((cntp_ctl
& CNTP_CTL_EL0_ISTATUS
) != 0) ? TRUE
: FALSE
;
1585 ml_wants_panic_trap_to_debugger(void)
1587 boolean_t result
= FALSE
;
1592 cache_trap_error(thread_t thread
, vm_map_address_t fault_addr
)
1594 mach_exception_data_type_t exc_data
[2];
1595 arm_saved_state_t
*regs
= get_user_regs(thread
);
1597 set_saved_state_far(regs
, fault_addr
);
1599 exc_data
[0] = KERN_INVALID_ADDRESS
;
1600 exc_data
[1] = fault_addr
;
1602 exception_triage(EXC_BAD_ACCESS
, exc_data
, 2);
1606 cache_trap_recover()
1608 vm_map_address_t fault_addr
;
1610 __asm__
volatile("mrs %0, FAR_EL1" : "=r"(fault_addr
));
1612 cache_trap_error(current_thread(), fault_addr
);
1616 dcache_flush_trap(vm_map_address_t start
, vm_map_size_t size
)
1618 vm_map_address_t end
= start
+ size
;
1619 thread_t thread
= current_thread();
1620 vm_offset_t old_recover
= thread
->recover
;
1623 if (task_has_64BitAddr(current_task())) {
1624 if (end
> MACH_VM_MAX_ADDRESS
) {
1625 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1628 if (end
> VM_MAX_ADDRESS
) {
1629 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1634 cache_trap_error(thread
, start
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1637 /* Set recovery function */
1638 thread
->recover
= (vm_address_t
)cache_trap_recover
;
1640 #if defined(APPLE_ARM64_ARCH_FAMILY)
1642 * We're coherent on Apple ARM64 CPUs, so this could be a nop. However,
1643 * if the region given us is bad, it would be good to catch it and
1644 * crash, ergo we still do the flush.
1646 assert((size
& 0xFFFFFFFF00000000ULL
) == 0);
1647 FlushPoC_DcacheRegion(start
, (uint32_t)size
);
1649 #error "Make sure you don't need to xcall."
1652 /* Restore recovery function */
1653 thread
->recover
= old_recover
;
1655 /* Return (caller does exception return) */
1659 icache_invalidate_trap(vm_map_address_t start
, vm_map_size_t size
)
1661 vm_map_address_t end
= start
+ size
;
1662 thread_t thread
= current_thread();
1663 vm_offset_t old_recover
= thread
->recover
;
1666 if (task_has_64BitAddr(current_task())) {
1667 if (end
> MACH_VM_MAX_ADDRESS
) {
1668 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1671 if (end
> VM_MAX_ADDRESS
) {
1672 cache_trap_error(thread
, end
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1677 cache_trap_error(thread
, start
& ((1 << ARM64_CLINE_SHIFT
) - 1));
1680 /* Set recovery function */
1681 thread
->recover
= (vm_address_t
)cache_trap_recover
;
1683 #if defined(APPLE_ARM64_ARCH_FAMILY)
1684 /* Clean dcache to unification, except we're coherent on Apple ARM64 CPUs */
1686 #error Make sure not cleaning is right for this platform!
1689 /* Invalidate iCache to point of unification */
1690 assert((size
& 0xFFFFFFFF00000000ULL
) == 0);
1691 InvalidatePoU_IcacheRegion(start
, (uint32_t)size
);
1693 /* Restore recovery function */
1694 thread
->recover
= old_recover
;
1696 /* Return (caller does exception return) */
1699 __attribute__((noreturn
))
1701 platform_syscall(arm_saved_state_t
*state
)
1705 #define platform_syscall_kprintf(x...) /* kprintf("platform_syscall: " x) */
1707 code
= (uint32_t)get_saved_state_reg(state
, 3);
1711 platform_syscall_kprintf("icache flush requested.\n");
1712 icache_invalidate_trap(get_saved_state_reg(state
, 0), get_saved_state_reg(state
, 1));
1716 platform_syscall_kprintf("dcache flush requested.\n");
1717 dcache_flush_trap(get_saved_state_reg(state
, 0), get_saved_state_reg(state
, 1));
1721 platform_syscall_kprintf("set cthread self.\n");
1722 thread_set_cthread_self(get_saved_state_reg(state
, 0));
1726 platform_syscall_kprintf("get cthread self.\n");
1727 set_saved_state_reg(state
, 0, thread_get_cthread_self());
1730 platform_syscall_kprintf("unknown: %d\n", code
);
1734 thread_exception_return();
1738 _enable_timebase_event_stream(uint32_t bit_index
)
1740 uint64_t cntkctl
; /* One wants to use 32 bits, but "mrs" prefers it this way */
1742 if (bit_index
>= 64) {
1743 panic("%s: invalid bit index (%u)", __FUNCTION__
, bit_index
);
1746 __asm__
volatile ("mrs %0, CNTKCTL_EL1" : "=r"(cntkctl
));
1748 cntkctl
|= (bit_index
<< CNTKCTL_EL1_EVENTI_SHIFT
);
1749 cntkctl
|= CNTKCTL_EL1_EVNTEN
;
1750 cntkctl
|= CNTKCTL_EL1_EVENTDIR
; /* 1->0; why not? */
1753 * If the SOC supports it (and it isn't broken), enable
1754 * EL0 access to the physical timebase register.
1756 if (user_timebase_allowed()) {
1757 cntkctl
|= CNTKCTL_EL1_PL0PCTEN
;
1760 __asm__
volatile ("msr CNTKCTL_EL1, %0" : : "r"(cntkctl
));
1764 * Turn timer on, unmask that interrupt.
1767 _enable_virtual_timer(void)
1769 uint64_t cntvctl
= CNTP_CTL_EL0_ENABLE
; /* One wants to use 32 bits, but "mrs" prefers it this way */
1771 __asm__
volatile ("msr CNTP_CTL_EL0, %0" : : "r"(cntvctl
));
1775 fiq_context_init(boolean_t enable_fiq __unused
)
1777 #if defined(APPLE_ARM64_ARCH_FAMILY)
1778 /* Could fill in our own ops here, if we needed them */
1779 uint64_t ticks_per_sec
, ticks_per_event
, events_per_sec
;
1782 ticks_per_sec
= gPEClockFrequencyInfo
.timebase_frequency_hz
;
1783 #if defined(ARM_BOARD_WFE_TIMEOUT_NS)
1784 events_per_sec
= 1000000000 / ARM_BOARD_WFE_TIMEOUT_NS
;
1786 /* Default to 1usec (or as close as we can get) */
1787 events_per_sec
= 1000000;
1789 ticks_per_event
= ticks_per_sec
/ events_per_sec
;
1790 bit_index
= flsll(ticks_per_event
) - 1; /* Highest bit set */
1792 /* Round up to power of two */
1793 if ((ticks_per_event
& ((1 << bit_index
) - 1)) != 0) {
1798 * The timer can only trigger on rising or falling edge,
1799 * not both; we don't care which we trigger on, but we
1800 * do need to adjust which bit we are interested in to
1806 _enable_timebase_event_stream(bit_index
);
1808 #error Need a board configuration.
1811 /* Interrupts still disabled. */
1812 assert(ml_get_interrupts_enabled() == FALSE
);
1813 _enable_virtual_timer();
1817 * ARM64_TODO: remove me (just a convenience while we don't have crashreporter)
1819 extern int copyinframe(vm_address_t
, char *, boolean_t
);
1820 size_t _OSUserBacktrace(char *buffer
, size_t bufsize
);
1822 size_t _OSUserBacktrace(char *buffer
, size_t bufsize
)
1824 thread_t thread
= current_thread();
1825 boolean_t is64bit
= thread_is_64bit(thread
);
1826 size_t trace_size_bytes
= 0, lr_size
;
1827 vm_address_t frame_addr
; // Should really by mach_vm_offset_t...
1833 if (get_threadtask(thread
) == kernel_task
) {
1834 panic("%s: Should never be called from a kernel thread.", __FUNCTION__
);
1837 frame_addr
= get_saved_state_fp(thread
->machine
.upcb
);
1840 lr_size
= sizeof(frame
[1]);
1842 *((uint64_t*)buffer
) = get_saved_state_pc(thread
->machine
.upcb
);
1843 trace_size_bytes
= lr_size
;
1845 while (trace_size_bytes
+ lr_size
< bufsize
) {
1846 if (!(frame_addr
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
)) {
1850 if (0 != copyinframe(frame_addr
, (char*)frame
, TRUE
)) {
1854 *((uint64_t*)(buffer
+ trace_size_bytes
)) = frame
[1]; /* lr */
1855 frame_addr
= frame
[0];
1856 trace_size_bytes
+= lr_size
;
1858 if (frame
[0] == 0x0ULL
) {
1864 lr_size
= sizeof(frame
[1]);
1866 *((uint32_t*)buffer
) = (uint32_t)get_saved_state_pc(thread
->machine
.upcb
);
1867 trace_size_bytes
= lr_size
;
1869 while (trace_size_bytes
+ lr_size
< bufsize
) {
1870 if (!(frame_addr
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
)) {
1874 if (0 != copyinframe(frame_addr
, (char*)frame
, FALSE
)) {
1878 *((uint32_t*)(buffer
+ trace_size_bytes
)) = frame
[1]; /* lr */
1879 frame_addr
= frame
[0];
1880 trace_size_bytes
+= lr_size
;
1882 if (frame
[0] == 0x0ULL
) {
1888 return trace_size_bytes
;
1892 ml_delay_should_spin(uint64_t interval
)
1894 cpu_data_t
*cdp
= getCpuDatap();
1896 if (cdp
->cpu_idle_latency
) {
1897 return (interval
< cdp
->cpu_idle_latency
) ? TRUE
: FALSE
;
1900 * Early boot, latency is unknown. Err on the side of blocking,
1901 * which should always be safe, even if slow
1907 boolean_t
ml_thread_is64bit(thread_t thread
) {
1908 return (thread_is_64bit(thread
));
1911 void ml_timer_evaluate(void) {
1915 ml_timer_forced_evaluation(void) {
1920 ml_energy_stat(thread_t t
) {
1921 return t
->machine
.energy_estimate_nj
;
1926 ml_gpu_stat_update(__unused
uint64_t gpu_ns_delta
) {
1929 * For now: update the resource coalition stats of the
1930 * current thread's coalition
1932 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta
);
1937 ml_gpu_stat(__unused thread_t t
) {
1941 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1943 timer_state_event(boolean_t switch_to_kernel
)
1945 thread_t thread
= current_thread();
1946 if (!thread
->precise_user_kernel_time
) return;
1948 processor_data_t
*pd
= &getCpuDatap()->cpu_processor
->processor_data
;
1949 uint64_t now
= ml_get_timebase();
1951 timer_stop(pd
->current_state
, now
);
1952 pd
->current_state
= (switch_to_kernel
) ? &pd
->system_state
: &pd
->user_state
;
1953 timer_start(pd
->current_state
, now
);
1955 timer_stop(pd
->thread_timer
, now
);
1956 pd
->thread_timer
= (switch_to_kernel
) ? &thread
->system_timer
: &thread
->user_timer
;
1957 timer_start(pd
->thread_timer
, now
);
1961 timer_state_event_user_to_kernel(void)
1963 timer_state_event(TRUE
);
1967 timer_state_event_kernel_to_user(void)
1969 timer_state_event(FALSE
);
1971 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1974 * The following are required for parts of the kernel
1975 * that cannot resolve these functions as inlines:
1977 extern thread_t
current_act(void);
1981 return current_thread_fast();
1984 #undef current_thread
1985 extern thread_t
current_thread(void);
1987 current_thread(void)
1989 return current_thread_fast();
1999 ex_cb_info_t ex_cb_info
[EXCB_CLASS_MAX
];
2002 * Callback registration
2003 * Currently we support only one registered callback per class but
2004 * it should be possible to support more callbacks
2006 kern_return_t
ex_cb_register(
2007 ex_cb_class_t cb_class
,
2011 ex_cb_info_t
*pInfo
= &ex_cb_info
[cb_class
];
2013 if ((NULL
== cb
) || (cb_class
>= EXCB_CLASS_MAX
))
2015 return KERN_INVALID_VALUE
;
2018 if (NULL
== pInfo
->cb
)
2021 pInfo
->refcon
= refcon
;
2022 return KERN_SUCCESS
;
2024 return KERN_FAILURE
;
2028 * Called internally by platform kernel to invoke the registered callback for class
2030 ex_cb_action_t
ex_cb_invoke(
2031 ex_cb_class_t cb_class
,
2034 ex_cb_info_t
*pInfo
= &ex_cb_info
[cb_class
];
2035 ex_cb_state_t state
= {far
};
2037 if (cb_class
>= EXCB_CLASS_MAX
)
2039 panic("Invalid exception callback class 0x%x\n", cb_class
);
2044 return pInfo
->cb(cb_class
, pInfo
->refcon
, &state
);
2046 return EXCB_ACTION_NONE
;