2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/machine_routines.h>
37 #include <arm/misc_protos.h>
38 #include <arm/rtclock.h>
39 #include <arm/caches_internal.h>
40 #include <console/serial_protos.h>
41 #include <kern/machine.h>
42 #include <prng/random.h>
43 #include <kern/startup.h>
44 #include <kern/sched.h>
45 #include <kern/thread.h>
46 #include <mach/machine.h>
47 #include <machine/atomic.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_map.h>
51 #include <sys/kdebug.h>
52 #include <kern/coalition.h>
53 #include <pexpert/device_tree.h>
54 #include <arm/cpuid_internal.h>
55 #include <arm/cpu_capabilities.h>
57 #include <IOKit/IOPlatformExpert.h>
63 /* arm32 only supports a highly simplified topology, fixed at 1 cluster */
64 static ml_topology_cpu_t topology_cpu_array
[MAX_CPUS
];
65 static ml_topology_cluster_t topology_cluster
= {
67 .cluster_type
= CLUSTER_TYPE_SMP
,
70 static ml_topology_info_t topology_info
= {
71 .version
= CPU_TOPOLOGY_VERSION
,
74 .cpus
= topology_cpu_array
,
75 .clusters
= &topology_cluster
,
76 .boot_cpu
= &topology_cpu_array
[0],
77 .boot_cluster
= &topology_cluster
,
81 uint32_t LockTimeOutUsec
;
82 uint64_t TLockTimeOut
;
84 extern uint32_t lockdown_done
;
85 uint64_t low_MutexSpin
;
86 int64_t high_MutexSpin
;
89 machine_startup(__unused boot_args
* args
)
94 * Kick off the kernel bootstrap.
103 __unused vm_size_t size
)
105 return PE_boot_args();
109 slave_machine_init(__unused
void *param
)
111 cpu_machine_init(); /* Initialize the processor */
112 clock_init(); /* Init the clock */
116 * Routine: machine_processor_shutdown
120 machine_processor_shutdown(
121 __unused thread_t thread
,
122 void (*doshutdown
)(processor_t
),
123 processor_t processor
)
125 return Shutdown_context(doshutdown
, processor
);
129 * Routine: ml_init_lock_timeout
133 ml_init_lock_timeout(void)
137 uint64_t default_timeout_ns
= NSEC_PER_SEC
>> 2;
140 if (PE_parse_boot_argn("slto_us", &slto
, sizeof(slto
))) {
141 default_timeout_ns
= slto
* NSEC_PER_USEC
;
144 nanoseconds_to_absolutetime(default_timeout_ns
, &abstime
);
145 LockTimeOutUsec
= (uint32_t)(default_timeout_ns
/ NSEC_PER_USEC
);
146 LockTimeOut
= (uint32_t)abstime
;
147 TLockTimeOut
= LockTimeOut
;
149 if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof(mtxspin
))) {
150 if (mtxspin
> USEC_PER_SEC
>> 4) {
151 mtxspin
= USEC_PER_SEC
>> 4;
153 nanoseconds_to_absolutetime(mtxspin
* NSEC_PER_USEC
, &abstime
);
155 nanoseconds_to_absolutetime(10 * NSEC_PER_USEC
, &abstime
);
158 low_MutexSpin
= MutexSpin
;
160 * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
161 * real_ncpus is not set at this time
163 * NOTE: active spinning is disabled in arm. It can be activated
164 * by setting high_MutexSpin through the sysctl.
166 high_MutexSpin
= low_MutexSpin
;
170 * This is called from the machine-independent routine cpu_up()
171 * to perform machine-dependent info updates.
176 os_atomic_inc(&machine_info
.physical_cpu
, relaxed
);
177 os_atomic_inc(&machine_info
.logical_cpu
, relaxed
);
181 * This is called from the machine-independent routine cpu_down()
182 * to perform machine-dependent info updates.
187 cpu_data_t
*cpu_data_ptr
;
189 os_atomic_dec(&machine_info
.physical_cpu
, relaxed
);
190 os_atomic_dec(&machine_info
.logical_cpu
, relaxed
);
193 * If we want to deal with outstanding IPIs, we need to
194 * do relatively early in the processor_doshutdown path,
195 * as we pend decrementer interrupts using the IPI
196 * mechanism if we cannot immediately service them (if
197 * IRQ is masked). Do so now.
199 * We aren't on the interrupt stack here; would it make
200 * more sense to disable signaling and then enable
201 * interrupts? It might be a bit cleaner.
203 cpu_data_ptr
= getCpuDatap();
204 cpu_data_ptr
->cpu_running
= FALSE
;
206 cpu_signal_handler_internal(TRUE
);
210 * Routine: ml_cpu_get_info
214 ml_cpu_get_info(ml_cpu_info_t
* ml_cpu_info
)
216 cache_info_t
*cpuid_cache_info
;
218 cpuid_cache_info
= cache_info();
219 ml_cpu_info
->vector_unit
= 0;
220 ml_cpu_info
->cache_line_size
= cpuid_cache_info
->c_linesz
;
221 ml_cpu_info
->l1_icache_size
= cpuid_cache_info
->c_isize
;
222 ml_cpu_info
->l1_dcache_size
= cpuid_cache_info
->c_dsize
;
224 #if (__ARM_ARCH__ >= 7)
225 ml_cpu_info
->l2_settings
= 1;
226 ml_cpu_info
->l2_cache_size
= cpuid_cache_info
->c_l2size
;
228 ml_cpu_info
->l2_settings
= 0;
229 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
231 ml_cpu_info
->l3_settings
= 0;
232 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
236 ml_get_machine_mem(void)
238 return machine_info
.memory_size
;
241 /* Return max offset */
247 unsigned int pmap_max_offset_option
= 0;
250 case MACHINE_MAX_OFFSET_DEFAULT
:
251 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_DEFAULT
;
253 case MACHINE_MAX_OFFSET_MIN
:
254 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_MIN
;
256 case MACHINE_MAX_OFFSET_MAX
:
257 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_MAX
;
259 case MACHINE_MAX_OFFSET_DEVICE
:
260 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_DEVICE
;
263 panic("ml_get_max_offset(): Illegal option 0x%x\n", option
);
266 return pmap_max_offset(is64
, pmap_max_offset_option
);
270 ml_panic_trap_to_debugger(__unused
const char *panic_format_str
,
271 __unused
va_list *panic_args
,
272 __unused
unsigned int reason
,
274 __unused
uint64_t panic_options_mask
,
275 __unused
unsigned long panic_caller
)
280 __attribute__((noreturn
))
282 halt_all_cpus(boolean_t reboot
)
285 printf("MACH Reboot\n");
286 PEHaltRestart(kPERestartCPU
);
288 printf("CPU halted\n");
289 PEHaltRestart(kPEHaltCPU
);
296 __attribute__((noreturn
))
300 halt_all_cpus(FALSE
);
304 * Routine: machine_signal_idle
309 processor_t processor
)
311 cpu_signal(processor_to_cpu_datap(processor
), SIGPnop
, (void *)NULL
, (void *)NULL
);
312 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
316 machine_signal_idle_deferred(
317 processor_t processor
)
319 cpu_signal_deferred(processor_to_cpu_datap(processor
));
320 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_DEFERRED_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
324 machine_signal_idle_cancel(
325 processor_t processor
)
327 cpu_signal_cancel(processor_to_cpu_datap(processor
));
328 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_CANCEL_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
332 * Routine: ml_install_interrupt_handler
333 * Function: Initialize Interrupt Handler
336 ml_install_interrupt_handler(
340 IOInterruptHandler handler
,
343 cpu_data_t
*cpu_data_ptr
;
344 boolean_t current_state
;
346 current_state
= ml_set_interrupts_enabled(FALSE
);
347 cpu_data_ptr
= getCpuDatap();
349 cpu_data_ptr
->interrupt_nub
= nub
;
350 cpu_data_ptr
->interrupt_source
= source
;
351 cpu_data_ptr
->interrupt_target
= target
;
352 cpu_data_ptr
->interrupt_handler
= handler
;
353 cpu_data_ptr
->interrupt_refCon
= refCon
;
355 (void) ml_set_interrupts_enabled(current_state
);
359 * Routine: ml_init_interrupt
360 * Function: Initialize Interrupts
363 ml_init_interrupt(void)
368 * Routine: ml_init_timebase
369 * Function: register and setup Timebase, Decremeter services
375 vm_offset_t int_address
,
376 vm_offset_t int_value
)
378 cpu_data_t
*cpu_data_ptr
;
380 cpu_data_ptr
= (cpu_data_t
*)args
;
382 if ((cpu_data_ptr
== &BootCpuData
)
383 && (rtclock_timebase_func
.tbd_fiq_handler
== (void *)NULL
)) {
384 rtclock_timebase_func
= *tbd_funcs
;
385 rtclock_timebase_addr
= int_address
;
386 rtclock_timebase_val
= int_value
;
391 ml_parse_cpu_topology(void)
393 DTEntry entry
, child
;
394 OpaqueDTEntryIterator iter
;
395 uint32_t cpu_boot_arg
;
398 err
= SecureDTLookupEntry(NULL
, "/cpus", &entry
);
399 assert(err
== kSuccess
);
401 err
= SecureDTInitEntryIterator(entry
, &iter
);
402 assert(err
== kSuccess
);
404 cpu_boot_arg
= MAX_CPUS
;
405 PE_parse_boot_argn("cpus", &cpu_boot_arg
, sizeof(cpu_boot_arg
));
407 ml_topology_cluster_t
*cluster
= &topology_info
.clusters
[0];
408 unsigned int cpu_id
= 0;
409 while (kSuccess
== SecureDTIterateEntries(&iter
, &child
)) {
411 unsigned int propSize
;
412 void const *prop
= NULL
;
414 if (kSuccess
!= SecureDTGetProperty(child
, "state", &prop
, &propSize
)) {
415 panic("unable to retrieve state for cpu %u", cpu_id
);
418 if (strncmp((char const *)prop
, "running", propSize
) != 0) {
419 panic("cpu 0 has not been marked as running!");
422 assert(kSuccess
== SecureDTGetProperty(child
, "reg", &prop
, &propSize
));
423 assert(cpu_id
== *((uint32_t const *)prop
));
425 if (cpu_id
>= cpu_boot_arg
) {
429 ml_topology_cpu_t
*cpu
= &topology_info
.cpus
[cpu_id
];
431 cpu
->cpu_id
= cpu_id
;
432 cpu
->phys_id
= cpu_id
;
433 cpu
->cluster_type
= cluster
->cluster_type
;
436 cluster
->cpu_mask
|= 1ULL << cpu_id
;
438 topology_info
.num_cpus
++;
439 topology_info
.max_cpu_id
= cpu_id
;
445 panic("No cpus found!");
449 const ml_topology_info_t
*
450 ml_get_topology_info(void)
452 return &topology_info
;
456 ml_get_cpu_count(void)
458 return topology_info
.num_cpus
;
462 ml_get_cluster_count(void)
464 return topology_info
.num_clusters
;
468 ml_get_boot_cpu_number(void)
474 ml_get_boot_cluster(void)
476 return CLUSTER_TYPE_SMP
;
480 ml_get_cpu_number(uint32_t phys_id
)
482 if (phys_id
> (uint32_t)ml_get_max_cpu_number()) {
490 ml_get_cluster_number(__unused
uint32_t phys_id
)
496 ml_get_max_cpu_number(void)
498 return topology_info
.num_cpus
- 1;
502 ml_get_max_cluster_number(void)
504 return topology_info
.max_cluster_id
;
508 ml_get_first_cpu_id(unsigned int cluster_id
)
510 return topology_info
.clusters
[cluster_id
].first_cpu_id
;
514 ml_processor_register(ml_processor_info_t
*in_processor_info
,
515 processor_t
* processor_out
, ipi_handler_t
*ipi_handler_out
,
516 perfmon_interrupt_handler_func
*pmi_handler_out
)
518 cpu_data_t
*this_cpu_datap
;
519 boolean_t is_boot_cpu
;
521 const unsigned int max_cpu_id
= ml_get_max_cpu_number();
522 if (in_processor_info
->phys_id
> max_cpu_id
) {
524 * The physical CPU ID indicates that we have more CPUs than
525 * this xnu build support. This probably means we have an
526 * incorrect board configuration.
528 * TODO: Should this just return a failure instead? A panic
529 * is simply a convenient way to catch bugs in the pexpert
532 panic("phys_id %u is too large for max_cpu_id (%u)", in_processor_info
->phys_id
, max_cpu_id
);
535 /* Fail the registration if the number of CPUs has been limited by boot-arg. */
536 if ((in_processor_info
->phys_id
>= topology_info
.num_cpus
) ||
537 (in_processor_info
->log_id
> (uint32_t)ml_get_max_cpu_number())) {
541 if (in_processor_info
->log_id
!= (uint32_t)ml_get_boot_cpu_number()) {
543 this_cpu_datap
= cpu_data_alloc(FALSE
);
544 cpu_data_init(this_cpu_datap
);
546 this_cpu_datap
= &BootCpuData
;
550 this_cpu_datap
->cpu_id
= in_processor_info
->cpu_id
;
552 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(is_boot_cpu
);
553 if (this_cpu_datap
->cpu_console_buf
== (void *)(NULL
)) {
554 goto processor_register_error
;
558 if (cpu_data_register(this_cpu_datap
) != KERN_SUCCESS
) {
559 goto processor_register_error
;
563 this_cpu_datap
->cpu_idle_notify
= in_processor_info
->processor_idle
;
564 this_cpu_datap
->cpu_cache_dispatch
= (cache_dispatch_t
) in_processor_info
->platform_cache_dispatch
;
565 nanoseconds_to_absolutetime((uint64_t) in_processor_info
->powergate_latency
, &this_cpu_datap
->cpu_idle_latency
);
566 this_cpu_datap
->cpu_reset_assist
= kvtophys(in_processor_info
->powergate_stub_addr
);
568 this_cpu_datap
->idle_timer_notify
= in_processor_info
->idle_timer
;
569 this_cpu_datap
->idle_timer_refcon
= in_processor_info
->idle_timer_refcon
;
571 this_cpu_datap
->platform_error_handler
= in_processor_info
->platform_error_handler
;
572 this_cpu_datap
->cpu_regmap_paddr
= in_processor_info
->regmap_paddr
;
573 this_cpu_datap
->cpu_phys_id
= in_processor_info
->phys_id
;
574 this_cpu_datap
->cpu_l2_access_penalty
= in_processor_info
->l2_access_penalty
;
576 processor_t processor
= PERCPU_GET_RELATIVE(processor
, cpu_data
, this_cpu_datap
);
578 processor_init(processor
, this_cpu_datap
->cpu_number
,
579 processor_pset(master_processor
));
581 if (this_cpu_datap
->cpu_l2_access_penalty
) {
583 * Cores that have a non-zero L2 access penalty compared
584 * to the boot processor should be de-prioritized by the
585 * scheduler, so that threads use the cores with better L2
588 processor_set_primary(processor
, master_processor
);
592 *processor_out
= processor
;
593 *ipi_handler_out
= cpu_signal_handler
;
594 *pmi_handler_out
= NULL
;
595 if (in_processor_info
->idle_tickle
!= (idle_tickle_t
*) NULL
) {
596 *in_processor_info
->idle_tickle
= (idle_tickle_t
) cpu_idle_tickle
;
600 if (kpc_register_cpu(this_cpu_datap
) != TRUE
) {
601 goto processor_register_error
;
606 random_cpu_init(this_cpu_datap
->cpu_number
);
611 processor_register_error
:
613 kpc_unregister_cpu(this_cpu_datap
);
616 cpu_data_free(this_cpu_datap
);
622 ml_init_arm_debug_interface(
624 vm_offset_t virt_address
)
626 ((cpu_data_t
*)in_cpu_datap
)->cpu_debug_interface_map
= virt_address
;
631 * Routine: init_ast_check
636 __unused processor_t processor
)
641 * Routine: cause_ast_check
646 processor_t processor
)
648 if (current_processor() != processor
) {
649 cpu_signal(processor_to_cpu_datap(processor
), SIGPast
, (void *)NULL
, (void *)NULL
);
650 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 1 /* ast */, 0, 0, 0);
654 extern uint32_t cpu_idle_count
;
657 ml_get_power_state(boolean_t
*icp
, boolean_t
*pidlep
)
659 *icp
= ml_at_interrupt_context();
660 *pidlep
= (cpu_idle_count
== real_ncpus
);
664 * Routine: ml_cause_interrupt
665 * Function: Generate a fake interrupt
668 ml_cause_interrupt(void)
673 /* Map memory map IO space */
676 vm_offset_t phys_addr
,
679 return io_map(phys_addr
, size
, VM_WIMG_IO
);
682 /* Map memory map IO space (with protections specified) */
685 vm_offset_t phys_addr
,
689 return io_map_with_prot(phys_addr
, size
, VM_WIMG_IO
, prot
);
694 vm_offset_t phys_addr
,
697 return io_map(phys_addr
, size
, VM_WIMG_WCOMB
);
701 ml_io_unmap(vm_offset_t addr
, vm_size_t sz
)
703 pmap_remove(kernel_pmap
, addr
, addr
+ sz
);
704 kmem_free(kernel_map
, addr
, sz
);
707 /* boot memory allocation */
710 __unused vm_size_t size
)
712 return (vm_offset_t
) NULL
;
717 vm_offset_t phys_addr
,
720 return pmap_map_high_window_bd(phys_addr
, len
, VM_PROT_READ
| VM_PROT_WRITE
);
727 return phystokv(paddr
);
734 assertf(((vm_address_t
)(vaddr
) - gVirtBase
) < gPhysSize
, "%s: illegal vaddr: %p", __func__
, (void*)vaddr
);
735 return (vm_address_t
)(vaddr
) - gVirtBase
+ gPhysBase
;
739 * Return the maximum contiguous KVA range that can be accessed from this
740 * physical address. For arm64, we employ a segmented physical aperture
741 * relocation table which can limit the available range for a given PA to
742 * something less than the extent of physical memory. But here, we still
743 * have a flat physical aperture, so no such requirement exists.
746 phystokv_range(pmap_paddr_t pa
, vm_size_t
*max_len
)
748 vm_size_t len
= gPhysSize
- (pa
- gPhysBase
);
749 if (*max_len
> len
) {
752 assertf((pa
- gPhysBase
) < gPhysSize
, "%s: illegal PA: 0x%lx", __func__
, (unsigned long)pa
);
753 return pa
- gPhysBase
+ gVirtBase
;
760 return VM_KERNEL_SLIDE(vaddr
);
764 ml_static_verify_page_protections(
765 uint64_t base
, uint64_t size
, vm_prot_t prot
)
767 /* XXX Implement Me */
779 return VM_KERNEL_UNSLIDE(vaddr
);
784 vm_offset_t vaddr
, /* kernel virtual address */
788 pt_entry_t arm_prot
= 0;
789 pt_entry_t arm_block_prot
= 0;
790 vm_offset_t vaddr_cur
;
792 kern_return_t result
= KERN_SUCCESS
;
794 if (vaddr
< VM_MIN_KERNEL_ADDRESS
) {
798 assert((vaddr
& (ARM_PGBYTES
- 1)) == 0); /* must be page aligned */
800 if ((new_prot
& VM_PROT_WRITE
) && (new_prot
& VM_PROT_EXECUTE
)) {
801 panic("ml_static_protect(): WX request on %p", (void *) vaddr
);
803 if (lockdown_done
&& (new_prot
& VM_PROT_EXECUTE
)) {
804 panic("ml_static_protect(): attempt to inject executable mapping on %p", (void *) vaddr
);
807 /* Set up the protection bits, and block bits so we can validate block mappings. */
808 if (new_prot
& VM_PROT_WRITE
) {
809 arm_prot
|= ARM_PTE_AP(AP_RWNA
);
810 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RWNA
);
812 arm_prot
|= ARM_PTE_AP(AP_RONA
);
813 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RONA
);
816 if (!(new_prot
& VM_PROT_EXECUTE
)) {
817 arm_prot
|= ARM_PTE_NX
;
818 arm_block_prot
|= ARM_TTE_BLOCK_NX
;
821 for (vaddr_cur
= vaddr
;
822 vaddr_cur
< ((vaddr
+ size
) & ~ARM_PGMASK
);
823 vaddr_cur
+= ARM_PGBYTES
) {
824 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
825 if (ppn
!= (vm_offset_t
) NULL
) {
826 tt_entry_t
*ttp
= &kernel_pmap
->tte
[ttenum(vaddr_cur
)];
827 tt_entry_t tte
= *ttp
;
829 if ((tte
& ARM_TTE_TYPE_MASK
) != ARM_TTE_TYPE_TABLE
) {
830 if (((tte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_BLOCK
) &&
831 ((tte
& (ARM_TTE_BLOCK_APMASK
| ARM_TTE_BLOCK_NX_MASK
)) == arm_block_prot
)) {
833 * We can support ml_static_protect on a block mapping if the mapping already has
834 * the desired protections. We still want to run checks on a per-page basis.
839 result
= KERN_FAILURE
;
843 pt_entry_t
*pte_p
= (pt_entry_t
*) ttetokv(tte
) + ptenum(vaddr_cur
);
844 pt_entry_t ptmp
= *pte_p
;
846 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_NX_MASK
)) | arm_prot
;
851 if (vaddr_cur
> vaddr
) {
852 flush_mmu_tlb_region(vaddr
, (vm_size_t
)(vaddr_cur
- vaddr
));
859 * Routine: ml_static_mfree
867 vm_offset_t vaddr_cur
;
869 uint32_t freed_pages
= 0;
870 uint32_t freed_kernelcache_pages
= 0;
872 /* It is acceptable (if bad) to fail to free. */
873 if (vaddr
< VM_MIN_KERNEL_ADDRESS
) {
877 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
879 for (vaddr_cur
= vaddr
;
880 vaddr_cur
< trunc_page_32(vaddr
+ size
);
881 vaddr_cur
+= PAGE_SIZE
) {
882 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
883 if (ppn
!= (vm_offset_t
) NULL
) {
885 * It is not acceptable to fail to update the protections on a page
886 * we will release to the VM. We need to either panic or continue.
887 * For now, we'll panic (to help flag if there is memory we can
890 if (ml_static_protect(vaddr_cur
, PAGE_SIZE
, VM_PROT_WRITE
| VM_PROT_READ
) != KERN_SUCCESS
) {
891 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur
);
893 vm_page_create(ppn
, (ppn
+ 1));
895 if (vaddr_cur
>= segLOWEST
&& vaddr_cur
< end_kern
) {
896 freed_kernelcache_pages
++;
900 vm_page_lockspin_queues();
901 vm_page_wire_count
-= freed_pages
;
902 vm_page_wire_count_initial
-= freed_pages
;
903 vm_page_kernelcache_count
-= freed_kernelcache_pages
;
904 vm_page_unlock_queues();
906 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages
, (void *)vaddr
, (uint64_t)size
, ppn
);
911 /* virtual to physical on wired pages */
913 ml_vtophys(vm_offset_t vaddr
)
915 return kvtophys(vaddr
);
919 * Routine: ml_nofault_copy
920 * Function: Perform a physical mode copy if the source and destination have
921 * valid translations in the kernel pmap. If translations are present, they are
922 * assumed to be wired; e.g., no attempt is made to guarantee that the
923 * translations obtained remain valid for the duration of the copy process.
926 ml_nofault_copy(vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
928 addr64_t cur_phys_dst
, cur_phys_src
;
929 uint32_t count
, nbytes
= 0;
932 if (!(cur_phys_src
= kvtophys(virtsrc
))) {
935 if (!(cur_phys_dst
= kvtophys(virtdst
))) {
938 if (!pmap_valid_address(trunc_page_64(cur_phys_dst
)) ||
939 !pmap_valid_address(trunc_page_64(cur_phys_src
))) {
942 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
943 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
))) {
944 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
950 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
962 * Routine: ml_validate_nofault
963 * Function: Validate that ths address range has a valid translations
964 * in the kernel pmap. If translations are present, they are
965 * assumed to be wired; i.e. no attempt is made to guarantee
966 * that the translation persist after the check.
967 * Returns: TRUE if the range is mapped and will not cause a fault,
973 vm_offset_t virtsrc
, vm_size_t size
)
975 addr64_t cur_phys_src
;
979 if (!(cur_phys_src
= kvtophys(virtsrc
))) {
982 if (!pmap_valid_address(trunc_page_64(cur_phys_src
))) {
985 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
987 count
= (uint32_t)size
;
998 ml_get_bouncepool_info(vm_offset_t
* phys_addr
, vm_size_t
* size
)
1005 * Stubs for CPU Stepper
1008 active_rt_threads(__unused boolean_t active
)
1013 thread_tell_urgency(__unused thread_urgency_t urgency
,
1014 __unused
uint64_t rt_period
,
1015 __unused
uint64_t rt_deadline
,
1016 __unused
uint64_t sched_latency
,
1017 __unused thread_t nthread
)
1022 machine_run_count(__unused
uint32_t count
)
1027 machine_choose_processor(__unused processor_set_t pset
, processor_t processor
)
1033 machine_timeout_suspended(void)
1039 ml_interrupt_prewarm(__unused
uint64_t deadline
)
1041 return KERN_FAILURE
;
1045 ml_get_hwclock(void)
1047 uint64_t high_first
= 0;
1048 uint64_t high_second
= 0;
1051 __builtin_arm_isb(ISB_SY
);
1054 high_first
= __builtin_arm_mrrc(15, 0, 14) >> 32;
1055 low
= __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL
;
1056 high_second
= __builtin_arm_mrrc(15, 0, 14) >> 32;
1057 } while (high_first
!= high_second
);
1059 return (high_first
<< 32) | (low
);
1063 ml_delay_should_spin(uint64_t interval
)
1065 cpu_data_t
*cdp
= getCpuDatap();
1067 if (cdp
->cpu_idle_latency
) {
1068 return (interval
< cdp
->cpu_idle_latency
) ? TRUE
: FALSE
;
1071 * Early boot, latency is unknown. Err on the side of blocking,
1072 * which should always be safe, even if slow
1079 ml_delay_on_yield(void)
1084 ml_thread_is64bit(thread_t thread
)
1086 return thread_is_64bit_addr(thread
);
1090 ml_timer_evaluate(void)
1095 ml_timer_forced_evaluation(void)
1101 ml_energy_stat(__unused thread_t t
)
1108 ml_gpu_stat_update(__unused
uint64_t gpu_ns_delta
)
1111 * For now: update the resource coalition stats of the
1112 * current thread's coalition
1114 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta
);
1118 ml_gpu_stat(__unused thread_t t
)
1123 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1125 timer_state_event(boolean_t switch_to_kernel
)
1127 thread_t thread
= current_thread();
1128 if (!thread
->precise_user_kernel_time
) {
1132 processor_t pd
= current_processor();
1133 uint64_t now
= ml_get_timebase();
1135 timer_stop(pd
->current_state
, now
);
1136 pd
->current_state
= (switch_to_kernel
) ? &pd
->system_state
: &pd
->user_state
;
1137 timer_start(pd
->current_state
, now
);
1139 timer_stop(pd
->thread_timer
, now
);
1140 pd
->thread_timer
= (switch_to_kernel
) ? &thread
->system_timer
: &thread
->user_timer
;
1141 timer_start(pd
->thread_timer
, now
);
1145 timer_state_event_user_to_kernel(void)
1147 timer_state_event(TRUE
);
1151 timer_state_event_kernel_to_user(void)
1153 timer_state_event(FALSE
);
1155 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1158 get_arm_cpu_version(void)
1160 uint32_t value
= machine_read_midr();
1162 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
1163 return ((value
& MIDR_REV_MASK
) >> MIDR_REV_SHIFT
) | ((value
& MIDR_VAR_MASK
) >> (MIDR_VAR_SHIFT
- 4));
1167 user_cont_hwclock_allowed(void)
1173 user_timebase_type(void)
1176 return USER_TIMEBASE_SPEC
;
1178 return USER_TIMEBASE_NONE
;
1183 * The following are required for parts of the kernel
1184 * that cannot resolve these functions as inlines:
1186 extern thread_t
current_act(void) __attribute__((const));
1190 return current_thread_fast();
1193 #undef current_thread
1194 extern thread_t
current_thread(void) __attribute__((const));
1196 current_thread(void)
1198 return current_thread_fast();
1201 #if __ARM_USER_PROTECT__
1203 arm_user_protect_begin(thread_t thread
)
1205 uintptr_t ttbr0
, asid
= 0; // kernel asid
1207 ttbr0
= __builtin_arm_mrc(15, 0, 2, 0, 0); // Get TTBR0
1208 if (ttbr0
!= thread
->machine
.kptw_ttb
) {
1209 __builtin_arm_mcr(15, 0, thread
->machine
.kptw_ttb
, 2, 0, 0); // Set TTBR0
1210 __builtin_arm_mcr(15, 0, asid
, 13, 0, 1); // Set CONTEXTIDR
1211 __builtin_arm_isb(ISB_SY
);
1217 arm_user_protect_end(thread_t thread
, uintptr_t ttbr0
, boolean_t disable_interrupts
)
1219 if ((ttbr0
!= thread
->machine
.kptw_ttb
) && (thread
->machine
.uptw_ttb
!= thread
->machine
.kptw_ttb
)) {
1220 if (disable_interrupts
) {
1221 __asm__
volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
1223 __builtin_arm_mcr(15, 0, thread
->machine
.uptw_ttb
, 2, 0, 0); // Set TTBR0
1224 __builtin_arm_mcr(15, 0, thread
->machine
.asid
, 13, 0, 1); // Set CONTEXTIDR with thread asid
1225 __builtin_arm_dsb(DSB_ISH
);
1226 __builtin_arm_isb(ISB_SY
);
1229 #endif // __ARM_USER_PROTECT__
1232 machine_lockdown(void)
1234 arm_vm_prot_finalize(PE_state
.bootArgs
);
1239 ml_lockdown_init(void)
1244 ml_hibernate_active_pre(void)
1249 ml_hibernate_active_post(void)
1254 ml_get_vm_reserved_regions(bool vm_is64bit
, struct vm_reserved_region
**regions
)
1256 #pragma unused(vm_is64bit)
1257 assert(regions
!= NULL
);