2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/machine_routines.h>
37 #include <arm/misc_protos.h>
38 #include <arm/rtclock.h>
39 #include <arm/caches_internal.h>
40 #include <console/serial_protos.h>
41 #include <kern/machine.h>
42 #include <prng/random.h>
43 #include <kern/startup.h>
44 #include <kern/sched.h>
45 #include <kern/thread.h>
46 #include <mach/machine.h>
47 #include <machine/atomic.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_map.h>
51 #include <sys/kdebug.h>
52 #include <kern/coalition.h>
53 #include <pexpert/device_tree.h>
54 #include <arm/cpuid_internal.h>
55 #include <arm/cpu_capabilities.h>
57 #include <IOKit/IOPlatformExpert.h>
63 /* arm32 only supports a highly simplified topology, fixed at 1 cluster */
64 static ml_topology_cpu_t topology_cpu_array
[MAX_CPUS
];
65 static ml_topology_cluster_t topology_cluster
= {
67 .cluster_type
= CLUSTER_TYPE_SMP
,
70 static ml_topology_info_t topology_info
= {
71 .version
= CPU_TOPOLOGY_VERSION
,
74 .cpus
= topology_cpu_array
,
75 .clusters
= &topology_cluster
,
76 .boot_cpu
= &topology_cpu_array
[0],
77 .boot_cluster
= &topology_cluster
,
81 uint32_t LockTimeOutUsec
;
82 uint64_t TLockTimeOut
;
84 extern uint32_t lockdown_done
;
85 uint64_t low_MutexSpin
;
86 int64_t high_MutexSpin
;
89 machine_startup(__unused boot_args
* args
)
94 * Kick off the kernel bootstrap.
103 __unused vm_size_t size
)
105 return PE_boot_args();
109 slave_machine_init(__unused
void *param
)
111 cpu_machine_init(); /* Initialize the processor */
112 clock_init(); /* Init the clock */
116 * Routine: machine_processor_shutdown
120 machine_processor_shutdown(
121 __unused thread_t thread
,
122 void (*doshutdown
)(processor_t
),
123 processor_t processor
)
125 return Shutdown_context(doshutdown
, processor
);
129 * Routine: ml_init_lock_timeout
133 ml_init_lock_timeout(void)
137 uint64_t default_timeout_ns
= NSEC_PER_SEC
>> 2;
140 if (PE_parse_boot_argn("slto_us", &slto
, sizeof(slto
))) {
141 default_timeout_ns
= slto
* NSEC_PER_USEC
;
144 nanoseconds_to_absolutetime(default_timeout_ns
, &abstime
);
145 LockTimeOutUsec
= (uint32_t)(default_timeout_ns
/ NSEC_PER_USEC
);
146 LockTimeOut
= (uint32_t)abstime
;
147 TLockTimeOut
= LockTimeOut
;
149 if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof(mtxspin
))) {
150 if (mtxspin
> USEC_PER_SEC
>> 4) {
151 mtxspin
= USEC_PER_SEC
>> 4;
153 nanoseconds_to_absolutetime(mtxspin
* NSEC_PER_USEC
, &abstime
);
155 nanoseconds_to_absolutetime(10 * NSEC_PER_USEC
, &abstime
);
158 low_MutexSpin
= MutexSpin
;
160 * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
161 * real_ncpus is not set at this time
163 * NOTE: active spinning is disabled in arm. It can be activated
164 * by setting high_MutexSpin through the sysctl.
166 high_MutexSpin
= low_MutexSpin
;
170 * This is called when all of the ml_processor_info_t structures have been
171 * initialized and all the processors have been started through processor_start().
173 * Required by the scheduler subsystem.
176 ml_cpu_init_completed(void)
181 * This is called from the machine-independent routine cpu_up()
182 * to perform machine-dependent info updates.
187 os_atomic_inc(&machine_info
.physical_cpu
, relaxed
);
188 os_atomic_inc(&machine_info
.logical_cpu
, relaxed
);
192 * This is called from the machine-independent routine cpu_down()
193 * to perform machine-dependent info updates.
198 cpu_data_t
*cpu_data_ptr
;
200 os_atomic_dec(&machine_info
.physical_cpu
, relaxed
);
201 os_atomic_dec(&machine_info
.logical_cpu
, relaxed
);
204 * If we want to deal with outstanding IPIs, we need to
205 * do relatively early in the processor_doshutdown path,
206 * as we pend decrementer interrupts using the IPI
207 * mechanism if we cannot immediately service them (if
208 * IRQ is masked). Do so now.
210 * We aren't on the interrupt stack here; would it make
211 * more sense to disable signaling and then enable
212 * interrupts? It might be a bit cleaner.
214 cpu_data_ptr
= getCpuDatap();
215 cpu_data_ptr
->cpu_running
= FALSE
;
217 cpu_signal_handler_internal(TRUE
);
221 * Routine: ml_cpu_get_info
225 ml_cpu_get_info(ml_cpu_info_t
* ml_cpu_info
)
227 cache_info_t
*cpuid_cache_info
;
229 cpuid_cache_info
= cache_info();
230 ml_cpu_info
->vector_unit
= 0;
231 ml_cpu_info
->cache_line_size
= cpuid_cache_info
->c_linesz
;
232 ml_cpu_info
->l1_icache_size
= cpuid_cache_info
->c_isize
;
233 ml_cpu_info
->l1_dcache_size
= cpuid_cache_info
->c_dsize
;
235 #if (__ARM_ARCH__ >= 7)
236 ml_cpu_info
->l2_settings
= 1;
237 ml_cpu_info
->l2_cache_size
= cpuid_cache_info
->c_l2size
;
239 ml_cpu_info
->l2_settings
= 0;
240 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
242 ml_cpu_info
->l3_settings
= 0;
243 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
247 ml_get_machine_mem(void)
249 return machine_info
.memory_size
;
252 /* Return max offset */
258 unsigned int pmap_max_offset_option
= 0;
261 case MACHINE_MAX_OFFSET_DEFAULT
:
262 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_DEFAULT
;
264 case MACHINE_MAX_OFFSET_MIN
:
265 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_MIN
;
267 case MACHINE_MAX_OFFSET_MAX
:
268 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_MAX
;
270 case MACHINE_MAX_OFFSET_DEVICE
:
271 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_DEVICE
;
274 panic("ml_get_max_offset(): Illegal option 0x%x\n", option
);
277 return pmap_max_offset(is64
, pmap_max_offset_option
);
281 ml_panic_trap_to_debugger(__unused
const char *panic_format_str
,
282 __unused
va_list *panic_args
,
283 __unused
unsigned int reason
,
285 __unused
uint64_t panic_options_mask
,
286 __unused
unsigned long panic_caller
)
291 __attribute__((noreturn
))
293 halt_all_cpus(boolean_t reboot
)
296 printf("MACH Reboot\n");
297 PEHaltRestart(kPERestartCPU
);
299 printf("CPU halted\n");
300 PEHaltRestart(kPEHaltCPU
);
307 __attribute__((noreturn
))
311 halt_all_cpus(FALSE
);
315 * Routine: machine_signal_idle
320 processor_t processor
)
322 cpu_signal(processor_to_cpu_datap(processor
), SIGPnop
, (void *)NULL
, (void *)NULL
);
323 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
327 machine_signal_idle_deferred(
328 processor_t processor
)
330 cpu_signal_deferred(processor_to_cpu_datap(processor
));
331 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_DEFERRED_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
335 machine_signal_idle_cancel(
336 processor_t processor
)
338 cpu_signal_cancel(processor_to_cpu_datap(processor
));
339 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_CANCEL_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
343 * Routine: ml_install_interrupt_handler
344 * Function: Initialize Interrupt Handler
347 ml_install_interrupt_handler(
351 IOInterruptHandler handler
,
354 cpu_data_t
*cpu_data_ptr
;
355 boolean_t current_state
;
357 current_state
= ml_set_interrupts_enabled(FALSE
);
358 cpu_data_ptr
= getCpuDatap();
360 cpu_data_ptr
->interrupt_nub
= nub
;
361 cpu_data_ptr
->interrupt_source
= source
;
362 cpu_data_ptr
->interrupt_target
= target
;
363 cpu_data_ptr
->interrupt_handler
= handler
;
364 cpu_data_ptr
->interrupt_refCon
= refCon
;
366 (void) ml_set_interrupts_enabled(current_state
);
370 * Routine: ml_init_interrupt
371 * Function: Initialize Interrupts
374 ml_init_interrupt(void)
379 * Routine: ml_init_timebase
380 * Function: register and setup Timebase, Decremeter services
386 vm_offset_t int_address
,
387 vm_offset_t int_value
)
389 cpu_data_t
*cpu_data_ptr
;
391 cpu_data_ptr
= (cpu_data_t
*)args
;
393 if ((cpu_data_ptr
== &BootCpuData
)
394 && (rtclock_timebase_func
.tbd_fiq_handler
== (void *)NULL
)) {
395 rtclock_timebase_func
= *tbd_funcs
;
396 rtclock_timebase_addr
= int_address
;
397 rtclock_timebase_val
= int_value
;
402 ml_parse_cpu_topology(void)
404 DTEntry entry
, child
;
405 OpaqueDTEntryIterator iter
;
406 uint32_t cpu_boot_arg
;
409 err
= SecureDTLookupEntry(NULL
, "/cpus", &entry
);
410 assert(err
== kSuccess
);
412 err
= SecureDTInitEntryIterator(entry
, &iter
);
413 assert(err
== kSuccess
);
415 cpu_boot_arg
= MAX_CPUS
;
416 PE_parse_boot_argn("cpus", &cpu_boot_arg
, sizeof(cpu_boot_arg
));
418 ml_topology_cluster_t
*cluster
= &topology_info
.clusters
[0];
419 unsigned int cpu_id
= 0;
420 while (kSuccess
== SecureDTIterateEntries(&iter
, &child
)) {
422 unsigned int propSize
;
423 void const *prop
= NULL
;
425 if (kSuccess
!= SecureDTGetProperty(child
, "state", &prop
, &propSize
)) {
426 panic("unable to retrieve state for cpu %u", cpu_id
);
429 if (strncmp((char const *)prop
, "running", propSize
) != 0) {
430 panic("cpu 0 has not been marked as running!");
433 assert(kSuccess
== SecureDTGetProperty(child
, "reg", &prop
, &propSize
));
434 assert(cpu_id
== *((uint32_t const *)prop
));
436 if (cpu_id
>= cpu_boot_arg
) {
440 ml_topology_cpu_t
*cpu
= &topology_info
.cpus
[cpu_id
];
442 cpu
->cpu_id
= cpu_id
;
443 cpu
->phys_id
= cpu_id
;
444 cpu
->cluster_type
= cluster
->cluster_type
;
447 cluster
->cpu_mask
|= 1ULL << cpu_id
;
449 topology_info
.num_cpus
++;
450 topology_info
.max_cpu_id
= cpu_id
;
456 panic("No cpus found!");
460 const ml_topology_info_t
*
461 ml_get_topology_info(void)
463 return &topology_info
;
467 ml_get_cpu_count(void)
469 return topology_info
.num_cpus
;
473 ml_get_cluster_count(void)
475 return topology_info
.num_clusters
;
479 ml_get_boot_cpu_number(void)
485 ml_get_boot_cluster(void)
487 return CLUSTER_TYPE_SMP
;
491 ml_get_cpu_number(uint32_t phys_id
)
493 if (phys_id
> (uint32_t)ml_get_max_cpu_number()) {
501 ml_get_cluster_number(__unused
uint32_t phys_id
)
507 ml_get_max_cpu_number(void)
509 return topology_info
.num_cpus
- 1;
513 ml_get_max_cluster_number(void)
515 return topology_info
.max_cluster_id
;
519 ml_get_first_cpu_id(unsigned int cluster_id
)
521 return topology_info
.clusters
[cluster_id
].first_cpu_id
;
525 ml_processor_register(ml_processor_info_t
*in_processor_info
,
526 processor_t
* processor_out
, ipi_handler_t
*ipi_handler_out
,
527 perfmon_interrupt_handler_func
*pmi_handler_out
)
529 cpu_data_t
*this_cpu_datap
;
530 boolean_t is_boot_cpu
;
532 const unsigned int max_cpu_id
= ml_get_max_cpu_number();
533 if (in_processor_info
->phys_id
> max_cpu_id
) {
535 * The physical CPU ID indicates that we have more CPUs than
536 * this xnu build support. This probably means we have an
537 * incorrect board configuration.
539 * TODO: Should this just return a failure instead? A panic
540 * is simply a convenient way to catch bugs in the pexpert
543 panic("phys_id %u is too large for max_cpu_id (%u)", in_processor_info
->phys_id
, max_cpu_id
);
546 /* Fail the registration if the number of CPUs has been limited by boot-arg. */
547 if ((in_processor_info
->phys_id
>= topology_info
.num_cpus
) ||
548 (in_processor_info
->log_id
> (uint32_t)ml_get_max_cpu_number())) {
552 if (in_processor_info
->log_id
!= (uint32_t)ml_get_boot_cpu_number()) {
554 this_cpu_datap
= cpu_data_alloc(FALSE
);
555 cpu_data_init(this_cpu_datap
);
557 this_cpu_datap
= &BootCpuData
;
561 this_cpu_datap
->cpu_id
= in_processor_info
->cpu_id
;
563 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(is_boot_cpu
);
564 if (this_cpu_datap
->cpu_console_buf
== (void *)(NULL
)) {
565 goto processor_register_error
;
569 if (cpu_data_register(this_cpu_datap
) != KERN_SUCCESS
) {
570 goto processor_register_error
;
574 this_cpu_datap
->cpu_idle_notify
= in_processor_info
->processor_idle
;
575 this_cpu_datap
->cpu_cache_dispatch
= (cache_dispatch_t
) in_processor_info
->platform_cache_dispatch
;
576 nanoseconds_to_absolutetime((uint64_t) in_processor_info
->powergate_latency
, &this_cpu_datap
->cpu_idle_latency
);
577 this_cpu_datap
->cpu_reset_assist
= kvtophys(in_processor_info
->powergate_stub_addr
);
579 this_cpu_datap
->idle_timer_notify
= in_processor_info
->idle_timer
;
580 this_cpu_datap
->idle_timer_refcon
= in_processor_info
->idle_timer_refcon
;
582 this_cpu_datap
->platform_error_handler
= in_processor_info
->platform_error_handler
;
583 this_cpu_datap
->cpu_regmap_paddr
= in_processor_info
->regmap_paddr
;
584 this_cpu_datap
->cpu_phys_id
= in_processor_info
->phys_id
;
585 this_cpu_datap
->cpu_l2_access_penalty
= in_processor_info
->l2_access_penalty
;
587 processor_t processor
= PERCPU_GET_RELATIVE(processor
, cpu_data
, this_cpu_datap
);
589 processor_init(processor
, this_cpu_datap
->cpu_number
,
590 processor_pset(master_processor
));
592 if (this_cpu_datap
->cpu_l2_access_penalty
) {
594 * Cores that have a non-zero L2 access penalty compared
595 * to the boot processor should be de-prioritized by the
596 * scheduler, so that threads use the cores with better L2
599 processor_set_primary(processor
, master_processor
);
603 *processor_out
= processor
;
604 *ipi_handler_out
= cpu_signal_handler
;
605 *pmi_handler_out
= NULL
;
606 if (in_processor_info
->idle_tickle
!= (idle_tickle_t
*) NULL
) {
607 *in_processor_info
->idle_tickle
= (idle_tickle_t
) cpu_idle_tickle
;
611 if (kpc_register_cpu(this_cpu_datap
) != TRUE
) {
612 goto processor_register_error
;
617 random_cpu_init(this_cpu_datap
->cpu_number
);
622 processor_register_error
:
624 kpc_unregister_cpu(this_cpu_datap
);
627 cpu_data_free(this_cpu_datap
);
633 ml_init_arm_debug_interface(
635 vm_offset_t virt_address
)
637 ((cpu_data_t
*)in_cpu_datap
)->cpu_debug_interface_map
= virt_address
;
642 * Routine: init_ast_check
647 __unused processor_t processor
)
652 * Routine: cause_ast_check
657 processor_t processor
)
659 if (current_processor() != processor
) {
660 cpu_signal(processor_to_cpu_datap(processor
), SIGPast
, (void *)NULL
, (void *)NULL
);
661 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 1 /* ast */, 0, 0, 0);
665 extern uint32_t cpu_idle_count
;
668 ml_get_power_state(boolean_t
*icp
, boolean_t
*pidlep
)
670 *icp
= ml_at_interrupt_context();
671 *pidlep
= (cpu_idle_count
== real_ncpus
);
675 * Routine: ml_cause_interrupt
676 * Function: Generate a fake interrupt
679 ml_cause_interrupt(void)
684 /* Map memory map IO space */
687 vm_offset_t phys_addr
,
690 return io_map(phys_addr
, size
, VM_WIMG_IO
);
693 /* Map memory map IO space (with protections specified) */
696 vm_offset_t phys_addr
,
700 return io_map_with_prot(phys_addr
, size
, VM_WIMG_IO
, prot
);
705 vm_offset_t phys_addr
,
708 return io_map(phys_addr
, size
, VM_WIMG_WCOMB
);
712 ml_io_unmap(vm_offset_t addr
, vm_size_t sz
)
714 pmap_remove(kernel_pmap
, addr
, addr
+ sz
);
715 kmem_free(kernel_map
, addr
, sz
);
718 /* boot memory allocation */
721 __unused vm_size_t size
)
723 return (vm_offset_t
) NULL
;
728 vm_offset_t phys_addr
,
731 return pmap_map_high_window_bd(phys_addr
, len
, VM_PROT_READ
| VM_PROT_WRITE
);
738 return phystokv(paddr
);
745 assertf(((vm_address_t
)(vaddr
) - gVirtBase
) < gPhysSize
, "%s: illegal vaddr: %p", __func__
, (void*)vaddr
);
746 return (vm_address_t
)(vaddr
) - gVirtBase
+ gPhysBase
;
750 * Return the maximum contiguous KVA range that can be accessed from this
751 * physical address. For arm64, we employ a segmented physical aperture
752 * relocation table which can limit the available range for a given PA to
753 * something less than the extent of physical memory. But here, we still
754 * have a flat physical aperture, so no such requirement exists.
757 phystokv_range(pmap_paddr_t pa
, vm_size_t
*max_len
)
759 vm_size_t len
= gPhysSize
- (pa
- gPhysBase
);
760 if (*max_len
> len
) {
763 assertf((pa
- gPhysBase
) < gPhysSize
, "%s: illegal PA: 0x%lx", __func__
, (unsigned long)pa
);
764 return pa
- gPhysBase
+ gVirtBase
;
771 return VM_KERNEL_SLIDE(vaddr
);
775 ml_static_verify_page_protections(
776 uint64_t base
, uint64_t size
, vm_prot_t prot
)
778 /* XXX Implement Me */
790 return VM_KERNEL_UNSLIDE(vaddr
);
795 vm_offset_t vaddr
, /* kernel virtual address */
799 pt_entry_t arm_prot
= 0;
800 pt_entry_t arm_block_prot
= 0;
801 vm_offset_t vaddr_cur
;
803 kern_return_t result
= KERN_SUCCESS
;
805 if (vaddr
< VM_MIN_KERNEL_ADDRESS
) {
809 assert((vaddr
& (ARM_PGBYTES
- 1)) == 0); /* must be page aligned */
811 if ((new_prot
& VM_PROT_WRITE
) && (new_prot
& VM_PROT_EXECUTE
)) {
812 panic("ml_static_protect(): WX request on %p", (void *) vaddr
);
814 if (lockdown_done
&& (new_prot
& VM_PROT_EXECUTE
)) {
815 panic("ml_static_protect(): attempt to inject executable mapping on %p", (void *) vaddr
);
818 /* Set up the protection bits, and block bits so we can validate block mappings. */
819 if (new_prot
& VM_PROT_WRITE
) {
820 arm_prot
|= ARM_PTE_AP(AP_RWNA
);
821 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RWNA
);
823 arm_prot
|= ARM_PTE_AP(AP_RONA
);
824 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RONA
);
827 if (!(new_prot
& VM_PROT_EXECUTE
)) {
828 arm_prot
|= ARM_PTE_NX
;
829 arm_block_prot
|= ARM_TTE_BLOCK_NX
;
832 for (vaddr_cur
= vaddr
;
833 vaddr_cur
< ((vaddr
+ size
) & ~ARM_PGMASK
);
834 vaddr_cur
+= ARM_PGBYTES
) {
835 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
836 if (ppn
!= (vm_offset_t
) NULL
) {
837 tt_entry_t
*ttp
= &kernel_pmap
->tte
[ttenum(vaddr_cur
)];
838 tt_entry_t tte
= *ttp
;
840 if ((tte
& ARM_TTE_TYPE_MASK
) != ARM_TTE_TYPE_TABLE
) {
841 if (((tte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_BLOCK
) &&
842 ((tte
& (ARM_TTE_BLOCK_APMASK
| ARM_TTE_BLOCK_NX_MASK
)) == arm_block_prot
)) {
844 * We can support ml_static_protect on a block mapping if the mapping already has
845 * the desired protections. We still want to run checks on a per-page basis.
850 result
= KERN_FAILURE
;
854 pt_entry_t
*pte_p
= (pt_entry_t
*) ttetokv(tte
) + ptenum(vaddr_cur
);
855 pt_entry_t ptmp
= *pte_p
;
857 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_NX_MASK
)) | arm_prot
;
862 if (vaddr_cur
> vaddr
) {
863 flush_mmu_tlb_region(vaddr
, (vm_size_t
)(vaddr_cur
- vaddr
));
870 * Routine: ml_static_mfree
878 vm_offset_t vaddr_cur
;
880 uint32_t freed_pages
= 0;
881 uint32_t freed_kernelcache_pages
= 0;
883 /* It is acceptable (if bad) to fail to free. */
884 if (vaddr
< VM_MIN_KERNEL_ADDRESS
) {
888 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
890 for (vaddr_cur
= vaddr
;
891 vaddr_cur
< trunc_page_32(vaddr
+ size
);
892 vaddr_cur
+= PAGE_SIZE
) {
893 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
894 if (ppn
!= (vm_offset_t
) NULL
) {
896 * It is not acceptable to fail to update the protections on a page
897 * we will release to the VM. We need to either panic or continue.
898 * For now, we'll panic (to help flag if there is memory we can
901 if (ml_static_protect(vaddr_cur
, PAGE_SIZE
, VM_PROT_WRITE
| VM_PROT_READ
) != KERN_SUCCESS
) {
902 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur
);
904 vm_page_create(ppn
, (ppn
+ 1));
906 if (vaddr_cur
>= segLOWEST
&& vaddr_cur
< end_kern
) {
907 freed_kernelcache_pages
++;
911 vm_page_lockspin_queues();
912 vm_page_wire_count
-= freed_pages
;
913 vm_page_wire_count_initial
-= freed_pages
;
914 vm_page_kernelcache_count
-= freed_kernelcache_pages
;
915 vm_page_unlock_queues();
917 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages
, (void *)vaddr
, (uint64_t)size
, ppn
);
922 /* virtual to physical on wired pages */
924 ml_vtophys(vm_offset_t vaddr
)
926 return kvtophys(vaddr
);
930 * Routine: ml_nofault_copy
931 * Function: Perform a physical mode copy if the source and destination have
932 * valid translations in the kernel pmap. If translations are present, they are
933 * assumed to be wired; e.g., no attempt is made to guarantee that the
934 * translations obtained remain valid for the duration of the copy process.
937 ml_nofault_copy(vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
939 addr64_t cur_phys_dst
, cur_phys_src
;
940 uint32_t count
, nbytes
= 0;
943 if (!(cur_phys_src
= kvtophys(virtsrc
))) {
946 if (!(cur_phys_dst
= kvtophys(virtdst
))) {
949 if (!pmap_valid_address(trunc_page_64(cur_phys_dst
)) ||
950 !pmap_valid_address(trunc_page_64(cur_phys_src
))) {
953 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
954 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
))) {
955 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
961 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
973 * Routine: ml_validate_nofault
974 * Function: Validate that ths address range has a valid translations
975 * in the kernel pmap. If translations are present, they are
976 * assumed to be wired; i.e. no attempt is made to guarantee
977 * that the translation persist after the check.
978 * Returns: TRUE if the range is mapped and will not cause a fault,
984 vm_offset_t virtsrc
, vm_size_t size
)
986 addr64_t cur_phys_src
;
990 if (!(cur_phys_src
= kvtophys(virtsrc
))) {
993 if (!pmap_valid_address(trunc_page_64(cur_phys_src
))) {
996 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
998 count
= (uint32_t)size
;
1009 ml_get_bouncepool_info(vm_offset_t
* phys_addr
, vm_size_t
* size
)
1016 * Stubs for CPU Stepper
1019 active_rt_threads(__unused boolean_t active
)
1024 thread_tell_urgency(__unused thread_urgency_t urgency
,
1025 __unused
uint64_t rt_period
,
1026 __unused
uint64_t rt_deadline
,
1027 __unused
uint64_t sched_latency
,
1028 __unused thread_t nthread
)
1033 machine_run_count(__unused
uint32_t count
)
1038 machine_choose_processor(__unused processor_set_t pset
, processor_t processor
)
1044 machine_timeout_suspended(void)
1050 ml_interrupt_prewarm(__unused
uint64_t deadline
)
1052 return KERN_FAILURE
;
1056 ml_get_hwclock(void)
1058 uint64_t high_first
= 0;
1059 uint64_t high_second
= 0;
1062 __builtin_arm_isb(ISB_SY
);
1065 high_first
= __builtin_arm_mrrc(15, 0, 14) >> 32;
1066 low
= __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL
;
1067 high_second
= __builtin_arm_mrrc(15, 0, 14) >> 32;
1068 } while (high_first
!= high_second
);
1070 return (high_first
<< 32) | (low
);
1074 ml_delay_should_spin(uint64_t interval
)
1076 cpu_data_t
*cdp
= getCpuDatap();
1078 if (cdp
->cpu_idle_latency
) {
1079 return (interval
< cdp
->cpu_idle_latency
) ? TRUE
: FALSE
;
1082 * Early boot, latency is unknown. Err on the side of blocking,
1083 * which should always be safe, even if slow
1090 ml_delay_on_yield(void)
1095 ml_thread_is64bit(thread_t thread
)
1097 return thread_is_64bit_addr(thread
);
1101 ml_timer_evaluate(void)
1106 ml_timer_forced_evaluation(void)
1112 ml_energy_stat(__unused thread_t t
)
1119 ml_gpu_stat_update(__unused
uint64_t gpu_ns_delta
)
1122 * For now: update the resource coalition stats of the
1123 * current thread's coalition
1125 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta
);
1129 ml_gpu_stat(__unused thread_t t
)
1134 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1136 timer_state_event(boolean_t switch_to_kernel
)
1138 thread_t thread
= current_thread();
1139 if (!thread
->precise_user_kernel_time
) {
1143 processor_t pd
= current_processor();
1144 uint64_t now
= ml_get_timebase();
1146 timer_stop(pd
->current_state
, now
);
1147 pd
->current_state
= (switch_to_kernel
) ? &pd
->system_state
: &pd
->user_state
;
1148 timer_start(pd
->current_state
, now
);
1150 timer_stop(pd
->thread_timer
, now
);
1151 pd
->thread_timer
= (switch_to_kernel
) ? &thread
->system_timer
: &thread
->user_timer
;
1152 timer_start(pd
->thread_timer
, now
);
1156 timer_state_event_user_to_kernel(void)
1158 timer_state_event(TRUE
);
1162 timer_state_event_kernel_to_user(void)
1164 timer_state_event(FALSE
);
1166 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1169 get_arm_cpu_version(void)
1171 uint32_t value
= machine_read_midr();
1173 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
1174 return ((value
& MIDR_REV_MASK
) >> MIDR_REV_SHIFT
) | ((value
& MIDR_VAR_MASK
) >> (MIDR_VAR_SHIFT
- 4));
1178 user_cont_hwclock_allowed(void)
1184 user_timebase_type(void)
1187 return USER_TIMEBASE_SPEC
;
1189 return USER_TIMEBASE_NONE
;
1194 * The following are required for parts of the kernel
1195 * that cannot resolve these functions as inlines:
1197 extern thread_t
current_act(void) __attribute__((const));
1201 return current_thread_fast();
1204 #undef current_thread
1205 extern thread_t
current_thread(void) __attribute__((const));
1207 current_thread(void)
1209 return current_thread_fast();
1212 #if __ARM_USER_PROTECT__
1214 arm_user_protect_begin(thread_t thread
)
1216 uintptr_t ttbr0
, asid
= 0; // kernel asid
1218 ttbr0
= __builtin_arm_mrc(15, 0, 2, 0, 0); // Get TTBR0
1219 if (ttbr0
!= thread
->machine
.kptw_ttb
) {
1220 __builtin_arm_mcr(15, 0, thread
->machine
.kptw_ttb
, 2, 0, 0); // Set TTBR0
1221 __builtin_arm_mcr(15, 0, asid
, 13, 0, 1); // Set CONTEXTIDR
1222 __builtin_arm_isb(ISB_SY
);
1228 arm_user_protect_end(thread_t thread
, uintptr_t ttbr0
, boolean_t disable_interrupts
)
1230 if ((ttbr0
!= thread
->machine
.kptw_ttb
) && (thread
->machine
.uptw_ttb
!= thread
->machine
.kptw_ttb
)) {
1231 if (disable_interrupts
) {
1232 __asm__
volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
1234 __builtin_arm_mcr(15, 0, thread
->machine
.uptw_ttb
, 2, 0, 0); // Set TTBR0
1235 __builtin_arm_mcr(15, 0, thread
->machine
.asid
, 13, 0, 1); // Set CONTEXTIDR with thread asid
1236 __builtin_arm_dsb(DSB_ISH
);
1237 __builtin_arm_isb(ISB_SY
);
1240 #endif // __ARM_USER_PROTECT__
1243 machine_lockdown(void)
1245 arm_vm_prot_finalize(PE_state
.bootArgs
);
1250 ml_lockdown_init(void)
1255 ml_hibernate_active_pre(void)
1260 ml_hibernate_active_post(void)
1265 ml_get_vm_reserved_regions(bool vm_is64bit
, struct vm_reserved_region
**regions
)
1267 #pragma unused(vm_is64bit)
1268 assert(regions
!= NULL
);