2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm/rtclock.h>
38 #include <arm/caches_internal.h>
39 #include <console/serial_protos.h>
40 #include <kern/machine.h>
41 #include <prng/random.h>
42 #include <kern/startup.h>
43 #include <kern/sched.h>
44 #include <kern/thread.h>
45 #include <mach/machine.h>
46 #include <machine/atomic.h>
48 #include <vm/vm_page.h>
49 #include <sys/kdebug.h>
50 #include <kern/coalition.h>
51 #include <pexpert/device_tree.h>
53 #include <IOKit/IOPlatformExpert.h>
59 static int max_cpus_initialized
= 0;
60 #define MAX_CPUS_SET 0x1
61 #define MAX_CPUS_WAIT 0x2
63 static unsigned int avail_cpus
= 0;
66 uint32_t LockTimeOutUsec
;
68 boolean_t is_clock_configured
= FALSE
;
70 extern int mach_assert
;
71 extern volatile uint32_t debug_enabled
;
73 void machine_conf(void);
76 machine_startup(__unused boot_args
* args
)
80 PE_parse_boot_argn("assert", &mach_assert
, sizeof (mach_assert
));
82 if (PE_parse_boot_argn("preempt", &boot_arg
, sizeof (boot_arg
))) {
83 default_preemption_rate
= boot_arg
;
85 if (PE_parse_boot_argn("bg_preempt", &boot_arg
, sizeof (boot_arg
))) {
86 default_bg_preemption_rate
= boot_arg
;
92 * Kick off the kernel bootstrap.
101 __unused vm_size_t size
)
103 return (PE_boot_args());
109 machine_info
.memory_size
= mem_size
;
117 is_clock_configured
= TRUE
;
123 slave_machine_init(__unused
void *param
)
125 cpu_machine_init(); /* Initialize the processor */
126 clock_init(); /* Init the clock */
130 * Routine: machine_processor_shutdown
134 machine_processor_shutdown(
135 __unused thread_t thread
,
136 void (*doshutdown
) (processor_t
),
137 processor_t processor
)
139 return (Shutdown_context(doshutdown
, processor
));
143 * Routine: ml_init_max_cpus
147 ml_init_max_cpus(unsigned int max_cpus
)
149 boolean_t current_state
;
151 current_state
= ml_set_interrupts_enabled(FALSE
);
152 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
153 machine_info
.max_cpus
= max_cpus
;
154 machine_info
.physical_cpu_max
= max_cpus
;
155 machine_info
.logical_cpu_max
= max_cpus
;
156 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
157 thread_wakeup((event_t
) & max_cpus_initialized
);
158 max_cpus_initialized
= MAX_CPUS_SET
;
160 (void) ml_set_interrupts_enabled(current_state
);
164 * Routine: ml_get_max_cpus
168 ml_get_max_cpus(void)
170 boolean_t current_state
;
172 current_state
= ml_set_interrupts_enabled(FALSE
);
173 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
174 max_cpus_initialized
= MAX_CPUS_WAIT
;
175 assert_wait((event_t
) & max_cpus_initialized
, THREAD_UNINT
);
176 (void) thread_block(THREAD_CONTINUE_NULL
);
178 (void) ml_set_interrupts_enabled(current_state
);
179 return (machine_info
.max_cpus
);
183 * Routine: ml_init_lock_timeout
187 ml_init_lock_timeout(void)
191 uint64_t default_timeout_ns
= NSEC_PER_SEC
>>2;
194 if (PE_parse_boot_argn("slto_us", &slto
, sizeof (slto
)))
195 default_timeout_ns
= slto
* NSEC_PER_USEC
;
197 nanoseconds_to_absolutetime(default_timeout_ns
, &abstime
);
198 LockTimeOutUsec
= (uint32_t)(abstime
/ NSEC_PER_USEC
);
199 LockTimeOut
= (uint32_t)abstime
;
201 if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof (mtxspin
))) {
202 if (mtxspin
> USEC_PER_SEC
>>4)
203 mtxspin
= USEC_PER_SEC
>>4;
204 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
206 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
212 * This is called from the machine-independent routine cpu_up()
213 * to perform machine-dependent info updates.
218 hw_atomic_add(&machine_info
.physical_cpu
, 1);
219 hw_atomic_add(&machine_info
.logical_cpu
, 1);
223 * This is called from the machine-independent routine cpu_down()
224 * to perform machine-dependent info updates.
229 cpu_data_t
*cpu_data_ptr
;
231 hw_atomic_sub(&machine_info
.physical_cpu
, 1);
232 hw_atomic_sub(&machine_info
.logical_cpu
, 1);
235 * If we want to deal with outstanding IPIs, we need to
236 * do relatively early in the processor_doshutdown path,
237 * as we pend decrementer interrupts using the IPI
238 * mechanism if we cannot immediately service them (if
239 * IRQ is masked). Do so now.
241 * We aren't on the interrupt stack here; would it make
242 * more sense to disable signaling and then enable
243 * interrupts? It might be a bit cleaner.
245 cpu_data_ptr
= getCpuDatap();
246 cpu_data_ptr
->cpu_running
= FALSE
;
248 cpu_signal_handler_internal(TRUE
);
252 * Routine: ml_cpu_get_info
256 ml_cpu_get_info(ml_cpu_info_t
* ml_cpu_info
)
258 cache_info_t
*cpuid_cache_info
;
260 cpuid_cache_info
= cache_info();
261 ml_cpu_info
->vector_unit
= 0;
262 ml_cpu_info
->cache_line_size
= cpuid_cache_info
->c_linesz
;
263 ml_cpu_info
->l1_icache_size
= cpuid_cache_info
->c_isize
;
264 ml_cpu_info
->l1_dcache_size
= cpuid_cache_info
->c_dsize
;
266 #if (__ARM_ARCH__ >= 7)
267 ml_cpu_info
->l2_settings
= 1;
268 ml_cpu_info
->l2_cache_size
= cpuid_cache_info
->c_l2size
;
270 ml_cpu_info
->l2_settings
= 0;
271 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
273 ml_cpu_info
->l3_settings
= 0;
274 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
278 ml_get_machine_mem(void)
280 return (machine_info
.memory_size
);
283 /* Return max offset */
289 unsigned int pmap_max_offset_option
= 0;
292 case MACHINE_MAX_OFFSET_DEFAULT
:
293 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_DEFAULT
;
295 case MACHINE_MAX_OFFSET_MIN
:
296 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_MIN
;
298 case MACHINE_MAX_OFFSET_MAX
:
299 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_MAX
;
301 case MACHINE_MAX_OFFSET_DEVICE
:
302 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_DEVICE
;
305 panic("ml_get_max_offset(): Illegal option 0x%x\n", option
);
308 return pmap_max_offset(is64
, pmap_max_offset_option
);
312 ml_wants_panic_trap_to_debugger(void)
318 ml_panic_trap_to_debugger(__unused
const char *panic_format_str
,
319 __unused
va_list *panic_args
,
320 __unused
unsigned int reason
,
322 __unused
uint64_t panic_options_mask
,
323 __unused
unsigned long panic_caller
)
328 __attribute__((noreturn
))
330 halt_all_cpus(boolean_t reboot
)
333 printf("MACH Reboot\n");
334 PEHaltRestart(kPERestartCPU
);
336 printf("CPU halted\n");
337 PEHaltRestart(kPEHaltCPU
);
342 __attribute__((noreturn
))
346 halt_all_cpus(FALSE
);
350 * Routine: machine_signal_idle
355 processor_t processor
)
357 cpu_signal(processor_to_cpu_datap(processor
), SIGPnop
, (void *)NULL
, (void *)NULL
);
358 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
362 machine_signal_idle_deferred(
363 processor_t processor
)
365 cpu_signal_deferred(processor_to_cpu_datap(processor
));
366 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_DEFERRED_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
370 machine_signal_idle_cancel(
371 processor_t processor
)
373 cpu_signal_cancel(processor_to_cpu_datap(processor
));
374 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_CANCEL_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
378 * Routine: ml_install_interrupt_handler
379 * Function: Initialize Interrupt Handler
382 ml_install_interrupt_handler(
386 IOInterruptHandler handler
,
389 cpu_data_t
*cpu_data_ptr
;
390 boolean_t current_state
;
392 current_state
= ml_set_interrupts_enabled(FALSE
);
393 cpu_data_ptr
= getCpuDatap();
395 cpu_data_ptr
->interrupt_nub
= nub
;
396 cpu_data_ptr
->interrupt_source
= source
;
397 cpu_data_ptr
->interrupt_target
= target
;
398 cpu_data_ptr
->interrupt_handler
= handler
;
399 cpu_data_ptr
->interrupt_refCon
= refCon
;
401 cpu_data_ptr
->interrupts_enabled
= TRUE
;
402 (void) ml_set_interrupts_enabled(current_state
);
404 initialize_screen(NULL
, kPEAcquireScreen
);
408 * Routine: ml_init_interrupt
409 * Function: Initialize Interrupts
412 ml_init_interrupt(void)
417 * Routine: ml_init_timebase
418 * Function: register and setup Timebase, Decremeter services
420 void ml_init_timebase(
423 vm_offset_t int_address
,
424 vm_offset_t int_value
)
426 cpu_data_t
*cpu_data_ptr
;
428 cpu_data_ptr
= (cpu_data_t
*)args
;
430 if ((cpu_data_ptr
== &BootCpuData
)
431 && (rtclock_timebase_func
.tbd_fiq_handler
== (void *)NULL
)) {
432 rtclock_timebase_func
= *tbd_funcs
;
433 rtclock_timebase_addr
= int_address
;
434 rtclock_timebase_val
= int_value
;
439 ml_parse_cpu_topology(void)
441 DTEntry entry
, child
;
442 OpaqueDTEntryIterator iter
;
443 uint32_t cpu_boot_arg
;
446 err
= DTLookupEntry(NULL
, "/cpus", &entry
);
447 assert(err
== kSuccess
);
449 err
= DTInitEntryIterator(entry
, &iter
);
450 assert(err
== kSuccess
);
452 while (kSuccess
== DTIterateEntries(&iter
, &child
)) {
455 unsigned int propSize
;
457 if (avail_cpus
== 0) {
458 if (kSuccess
!= DTGetProperty(child
, "state", &prop
, &propSize
))
459 panic("unable to retrieve state for cpu %u", avail_cpus
);
461 if (strncmp((char*)prop
, "running", propSize
) != 0)
462 panic("cpu 0 has not been marked as running!");
464 assert(kSuccess
== DTGetProperty(child
, "reg", &prop
, &propSize
));
465 assert(avail_cpus
== *((uint32_t*)prop
));
470 cpu_boot_arg
= avail_cpus
;
471 if (PE_parse_boot_argn("cpus", &cpu_boot_arg
, sizeof(cpu_boot_arg
)) &&
472 (avail_cpus
> cpu_boot_arg
))
473 avail_cpus
= cpu_boot_arg
;
476 panic("No cpus found!");
480 ml_get_cpu_count(void)
486 ml_get_boot_cpu_number(void)
492 ml_get_boot_cluster(void)
494 return CLUSTER_TYPE_SMP
;
498 ml_get_cpu_number(uint32_t phys_id
)
504 ml_get_max_cpu_number(void)
506 return avail_cpus
- 1;
510 ml_processor_register(
511 ml_processor_info_t
* in_processor_info
,
512 processor_t
* processor_out
,
513 ipi_handler_t
* ipi_handler
)
515 cpu_data_t
*this_cpu_datap
;
516 boolean_t is_boot_cpu
;
518 if (in_processor_info
->phys_id
>= MAX_CPUS
) {
520 * The physical CPU ID indicates that we have more CPUs than
521 * this xnu build support. This probably means we have an
522 * incorrect board configuration.
524 * TODO: Should this just return a failure instead? A panic
525 * is simply a convenient way to catch bugs in the pexpert
528 panic("phys_id %u is too large for MAX_CPUS (%u)", in_processor_info
->phys_id
, MAX_CPUS
);
531 /* Fail the registration if the number of CPUs has been limited by boot-arg. */
532 if ((in_processor_info
->phys_id
>= avail_cpus
) ||
533 (in_processor_info
->log_id
> (uint32_t)ml_get_max_cpu_number()))
536 if (in_processor_info
->log_id
!= (uint32_t)ml_get_boot_cpu_number()) {
538 this_cpu_datap
= cpu_data_alloc(FALSE
);
539 cpu_data_init(this_cpu_datap
);
541 this_cpu_datap
= &BootCpuData
;
545 this_cpu_datap
->cpu_id
= in_processor_info
->cpu_id
;
547 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(is_boot_cpu
);
548 if (this_cpu_datap
->cpu_console_buf
== (void *)(NULL
))
549 goto processor_register_error
;
552 if (cpu_data_register(this_cpu_datap
) != KERN_SUCCESS
)
553 goto processor_register_error
;
556 this_cpu_datap
->cpu_idle_notify
= (void *) in_processor_info
->processor_idle
;
557 this_cpu_datap
->cpu_cache_dispatch
= in_processor_info
->platform_cache_dispatch
;
558 nanoseconds_to_absolutetime((uint64_t) in_processor_info
->powergate_latency
, &this_cpu_datap
->cpu_idle_latency
);
559 this_cpu_datap
->cpu_reset_assist
= kvtophys(in_processor_info
->powergate_stub_addr
);
561 this_cpu_datap
->idle_timer_notify
= (void *) in_processor_info
->idle_timer
;
562 this_cpu_datap
->idle_timer_refcon
= in_processor_info
->idle_timer_refcon
;
564 this_cpu_datap
->platform_error_handler
= (void *) in_processor_info
->platform_error_handler
;
565 this_cpu_datap
->cpu_regmap_paddr
= in_processor_info
->regmap_paddr
;
566 this_cpu_datap
->cpu_phys_id
= in_processor_info
->phys_id
;
567 this_cpu_datap
->cpu_l2_access_penalty
= in_processor_info
->l2_access_penalty
;
570 processor_init((struct processor
*)this_cpu_datap
->cpu_processor
,
571 this_cpu_datap
->cpu_number
, processor_pset(master_processor
));
573 if (this_cpu_datap
->cpu_l2_access_penalty
) {
575 * Cores that have a non-zero L2 access penalty compared
576 * to the boot processor should be de-prioritized by the
577 * scheduler, so that threads use the cores with better L2
580 processor_set_primary(this_cpu_datap
->cpu_processor
,
585 *processor_out
= this_cpu_datap
->cpu_processor
;
586 *ipi_handler
= cpu_signal_handler
;
587 if (in_processor_info
->idle_tickle
!= (idle_tickle_t
*) NULL
)
588 *in_processor_info
->idle_tickle
= (idle_tickle_t
) cpu_idle_tickle
;
591 if (kpc_register_cpu(this_cpu_datap
) != TRUE
)
592 goto processor_register_error
;
596 prng_cpu_init(this_cpu_datap
->cpu_number
);
600 processor_register_error
:
602 kpc_unregister_cpu(this_cpu_datap
);
605 cpu_data_free(this_cpu_datap
);
610 ml_init_arm_debug_interface(
612 vm_offset_t virt_address
)
614 ((cpu_data_t
*)in_cpu_datap
)->cpu_debug_interface_map
= virt_address
;
619 * Routine: init_ast_check
624 __unused processor_t processor
)
629 * Routine: cause_ast_check
634 processor_t processor
)
636 if (current_processor() != processor
) {
637 cpu_signal(processor_to_cpu_datap(processor
), SIGPast
, (void *)NULL
, (void *)NULL
);
638 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 1 /* ast */, 0, 0, 0);
644 * Routine: ml_at_interrupt_context
645 * Function: Check if running at interrupt context
648 ml_at_interrupt_context(void)
650 boolean_t at_interrupt_context
= FALSE
;
652 disable_preemption();
653 at_interrupt_context
= (getCpuDatap()->cpu_int_state
!= NULL
);
656 return at_interrupt_context
;
659 extern uint32_t cpu_idle_count
;
661 void ml_get_power_state(boolean_t
*icp
, boolean_t
*pidlep
) {
662 *icp
= ml_at_interrupt_context();
663 *pidlep
= (cpu_idle_count
== real_ncpus
);
667 * Routine: ml_cause_interrupt
668 * Function: Generate a fake interrupt
671 ml_cause_interrupt(void)
676 /* Map memory map IO space */
679 vm_offset_t phys_addr
,
682 return (io_map(phys_addr
, size
, VM_WIMG_IO
));
687 vm_offset_t phys_addr
,
690 return (io_map(phys_addr
, size
, VM_WIMG_WCOMB
));
693 /* boot memory allocation */
696 __unused vm_size_t size
)
698 return ((vm_offset_t
) NULL
);
703 vm_offset_t phys_addr
,
706 return pmap_map_high_window_bd(phys_addr
, len
, VM_PROT_READ
| VM_PROT_WRITE
);
713 return phystokv(paddr
);
720 if (((vm_address_t
)(vaddr
) - gVirtBase
) >= gPhysSize
)
721 panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr
);
722 return ((vm_address_t
)(vaddr
) - gVirtBase
+ gPhysBase
);
728 vm_offset_t vaddr
, /* kernel virtual address */
732 pt_entry_t arm_prot
= 0;
733 pt_entry_t arm_block_prot
= 0;
734 vm_offset_t vaddr_cur
;
736 kern_return_t result
= KERN_SUCCESS
;
738 if (vaddr
< VM_MIN_KERNEL_ADDRESS
)
741 assert((vaddr
& (ARM_PGBYTES
- 1)) == 0); /* must be page aligned */
743 if ((new_prot
& VM_PROT_WRITE
) && (new_prot
& VM_PROT_EXECUTE
)) {
744 panic("ml_static_protect(): WX request on %p", (void *) vaddr
);
747 /* Set up the protection bits, and block bits so we can validate block mappings. */
748 if (new_prot
& VM_PROT_WRITE
) {
749 arm_prot
|= ARM_PTE_AP(AP_RWNA
);
750 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RWNA
);
752 arm_prot
|= ARM_PTE_AP(AP_RONA
);
753 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RONA
);
756 if (!(new_prot
& VM_PROT_EXECUTE
)) {
757 arm_prot
|= ARM_PTE_NX
;
758 arm_block_prot
|= ARM_TTE_BLOCK_NX
;
761 for (vaddr_cur
= vaddr
;
762 vaddr_cur
< ((vaddr
+ size
) & ~ARM_PGMASK
);
763 vaddr_cur
+= ARM_PGBYTES
) {
764 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
765 if (ppn
!= (vm_offset_t
) NULL
) {
766 tt_entry_t
*ttp
= &kernel_pmap
->tte
[ttenum(vaddr_cur
)];
767 tt_entry_t tte
= *ttp
;
769 if ((tte
& ARM_TTE_TYPE_MASK
) != ARM_TTE_TYPE_TABLE
) {
770 if (((tte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_BLOCK
) &&
771 ((tte
& (ARM_TTE_BLOCK_APMASK
| ARM_TTE_BLOCK_NX_MASK
)) == arm_block_prot
)) {
773 * We can support ml_static_protect on a block mapping if the mapping already has
774 * the desired protections. We still want to run checks on a per-page basis.
779 result
= KERN_FAILURE
;
783 pt_entry_t
*pte_p
= (pt_entry_t
*) ttetokv(tte
) + ptenum(vaddr_cur
);
784 pt_entry_t ptmp
= *pte_p
;
786 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_NX_MASK
)) | arm_prot
;
788 #ifndef __ARM_L1_PTW__
789 FlushPoC_DcacheRegion((vm_offset_t
) pte_p
, sizeof(*pte_p
));
794 if (vaddr_cur
> vaddr
)
795 flush_mmu_tlb_region(vaddr
, (vm_size_t
)(vaddr_cur
- vaddr
));
801 * Routine: ml_static_mfree
809 vm_offset_t vaddr_cur
;
811 uint32_t freed_pages
= 0;
813 /* It is acceptable (if bad) to fail to free. */
814 if (vaddr
< VM_MIN_KERNEL_ADDRESS
)
817 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
819 for (vaddr_cur
= vaddr
;
820 vaddr_cur
< trunc_page_32(vaddr
+ size
);
821 vaddr_cur
+= PAGE_SIZE
) {
822 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
823 if (ppn
!= (vm_offset_t
) NULL
) {
825 * It is not acceptable to fail to update the protections on a page
826 * we will release to the VM. We need to either panic or continue.
827 * For now, we'll panic (to help flag if there is memory we can
830 if (ml_static_protect(vaddr_cur
, PAGE_SIZE
, VM_PROT_WRITE
| VM_PROT_READ
) != KERN_SUCCESS
) {
831 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur
);
835 * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
836 * relies on the persistence of these mappings for all time.
838 // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
840 vm_page_create(ppn
, (ppn
+ 1));
844 vm_page_lockspin_queues();
845 vm_page_wire_count
-= freed_pages
;
846 vm_page_wire_count_initial
-= freed_pages
;
847 vm_page_unlock_queues();
849 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages
, (void *)vaddr
, (uint64_t)size
, ppn
);
854 /* virtual to physical on wired pages */
856 ml_vtophys(vm_offset_t vaddr
)
858 return kvtophys(vaddr
);
862 * Routine: ml_nofault_copy
863 * Function: Perform a physical mode copy if the source and destination have
864 * valid translations in the kernel pmap. If translations are present, they are
865 * assumed to be wired; e.g., no attempt is made to guarantee that the
866 * translations obtained remain valid for the duration of the copy process.
869 ml_nofault_copy(vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
871 addr64_t cur_phys_dst
, cur_phys_src
;
872 uint32_t count
, nbytes
= 0;
875 if (!(cur_phys_src
= kvtophys(virtsrc
)))
877 if (!(cur_phys_dst
= kvtophys(virtdst
)))
879 if (!pmap_valid_address(trunc_page_64(cur_phys_dst
)) ||
880 !pmap_valid_address(trunc_page_64(cur_phys_src
)))
882 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
883 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
)))
884 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
888 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
900 * Routine: ml_validate_nofault
901 * Function: Validate that ths address range has a valid translations
902 * in the kernel pmap. If translations are present, they are
903 * assumed to be wired; i.e. no attempt is made to guarantee
904 * that the translation persist after the check.
905 * Returns: TRUE if the range is mapped and will not cause a fault,
909 boolean_t
ml_validate_nofault(
910 vm_offset_t virtsrc
, vm_size_t size
)
912 addr64_t cur_phys_src
;
916 if (!(cur_phys_src
= kvtophys(virtsrc
)))
918 if (!pmap_valid_address(trunc_page_64(cur_phys_src
)))
920 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
922 count
= (uint32_t)size
;
932 ml_get_bouncepool_info(vm_offset_t
* phys_addr
, vm_size_t
* size
)
939 * Stubs for CPU Stepper
942 active_rt_threads(__unused boolean_t active
)
947 thread_tell_urgency(__unused
int urgency
,
948 __unused
uint64_t rt_period
,
949 __unused
uint64_t rt_deadline
,
950 __unused
uint64_t sched_latency
,
951 __unused thread_t nthread
)
956 machine_run_count(__unused
uint32_t count
)
961 machine_choose_processor(__unused processor_set_t pset
, processor_t processor
)
967 ml_stack_remaining(void)
969 uintptr_t local
= (uintptr_t) &local
;
970 vm_offset_t intstack_top_ptr
;
972 intstack_top_ptr
= getCpuDatap()->intstack_top
;
973 if ((local
< intstack_top_ptr
) && (local
> intstack_top_ptr
- INTSTACK_SIZE
)) {
974 return (local
- (getCpuDatap()->intstack_top
- INTSTACK_SIZE
));
976 return (local
- current_thread()->kernel_stack
);
980 boolean_t
machine_timeout_suspended(void) {
985 ml_interrupt_prewarm(__unused
uint64_t deadline
)
993 uint64_t high_first
= 0;
994 uint64_t high_second
= 0;
997 __builtin_arm_isb(ISB_SY
);
1000 high_first
= __builtin_arm_mrrc(15, 0, 14) >> 32;
1001 low
= __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL
;
1002 high_second
= __builtin_arm_mrrc(15, 0, 14) >> 32;
1003 } while (high_first
!= high_second
);
1005 return (high_first
<< 32) | (low
);
1009 ml_delay_should_spin(uint64_t interval
)
1011 cpu_data_t
*cdp
= getCpuDatap();
1013 if (cdp
->cpu_idle_latency
) {
1014 return (interval
< cdp
->cpu_idle_latency
) ? TRUE
: FALSE
;
1017 * Early boot, latency is unknown. Err on the side of blocking,
1018 * which should always be safe, even if slow
1024 boolean_t
ml_thread_is64bit(thread_t thread
)
1026 return (thread_is_64bit(thread
));
1029 void ml_timer_evaluate(void) {
1033 ml_timer_forced_evaluation(void) {
1038 ml_energy_stat(__unused thread_t t
) {
1044 ml_gpu_stat_update(__unused
uint64_t gpu_ns_delta
) {
1047 * For now: update the resource coalition stats of the
1048 * current thread's coalition
1050 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta
);
1055 ml_gpu_stat(__unused thread_t t
) {
1059 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1061 timer_state_event(boolean_t switch_to_kernel
)
1063 thread_t thread
= current_thread();
1064 if (!thread
->precise_user_kernel_time
) return;
1066 processor_data_t
*pd
= &getCpuDatap()->cpu_processor
->processor_data
;
1067 uint64_t now
= ml_get_timebase();
1069 timer_stop(pd
->current_state
, now
);
1070 pd
->current_state
= (switch_to_kernel
) ? &pd
->system_state
: &pd
->user_state
;
1071 timer_start(pd
->current_state
, now
);
1073 timer_stop(pd
->thread_timer
, now
);
1074 pd
->thread_timer
= (switch_to_kernel
) ? &thread
->system_timer
: &thread
->user_timer
;
1075 timer_start(pd
->thread_timer
, now
);
1079 timer_state_event_user_to_kernel(void)
1081 timer_state_event(TRUE
);
1085 timer_state_event_kernel_to_user(void)
1087 timer_state_event(FALSE
);
1089 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1092 user_cont_hwclock_allowed(void)
1098 user_timebase_allowed(void)
1108 * The following are required for parts of the kernel
1109 * that cannot resolve these functions as inlines:
1111 extern thread_t
current_act(void);
1115 return current_thread_fast();
1118 #undef current_thread
1119 extern thread_t
current_thread(void);
1121 current_thread(void)
1123 return current_thread_fast();
1126 #if __ARM_USER_PROTECT__
1128 arm_user_protect_begin(thread_t thread
)
1130 uintptr_t ttbr0
, asid
= 0; // kernel asid
1132 ttbr0
= __builtin_arm_mrc(15,0,2,0,0); // Get TTBR0
1133 if (ttbr0
!= thread
->machine
.kptw_ttb
) {
1134 __builtin_arm_mcr(15,0,thread
->machine
.kptw_ttb
,2,0,0); // Set TTBR0
1135 __builtin_arm_mcr(15,0,asid
,13,0,1); // Set CONTEXTIDR
1136 __builtin_arm_isb(ISB_SY
);
1142 arm_user_protect_end(thread_t thread
, uintptr_t ttbr0
, boolean_t disable_interrupts
)
1144 if ((ttbr0
!= thread
->machine
.kptw_ttb
) && (thread
->machine
.uptw_ttb
!= thread
->machine
.kptw_ttb
)) {
1145 if (disable_interrupts
)
1146 __asm__
volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
1147 __builtin_arm_mcr(15,0,thread
->machine
.uptw_ttb
,2,0,0); // Set TTBR0
1148 __builtin_arm_mcr(15,0,thread
->machine
.asid
,13,0,1); // Set CONTEXTIDR with thread asid
1149 __builtin_arm_dsb(DSB_ISH
);
1150 __builtin_arm_isb(ISB_SY
);
1153 #endif // __ARM_USER_PROTECT__
1155 void ml_task_set_rop_pid(__unused task_t task
, __unused task_t parent_task
, __unused boolean_t inherit
)