2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm/rtclock.h>
38 #include <arm/caches_internal.h>
39 #include <console/serial_protos.h>
40 #include <kern/machine.h>
41 #include <prng/random.h>
42 #include <kern/startup.h>
43 #include <kern/sched.h>
44 #include <kern/thread.h>
45 #include <mach/machine.h>
46 #include <machine/atomic.h>
48 #include <vm/vm_page.h>
49 #include <sys/kdebug.h>
50 #include <kern/coalition.h>
51 #include <pexpert/device_tree.h>
53 #include <IOKit/IOPlatformExpert.h>
54 #include <libkern/section_keywords.h>
60 static int max_cpus_initialized
= 0;
61 #define MAX_CPUS_SET 0x1
62 #define MAX_CPUS_WAIT 0x2
64 static unsigned int avail_cpus
= 0;
67 uint32_t LockTimeOutUsec
;
69 boolean_t is_clock_configured
= FALSE
;
71 extern int mach_assert
;
72 extern volatile uint32_t debug_enabled
;
73 SECURITY_READ_ONLY_LATE(unsigned int) debug_boot_arg
;
75 void machine_conf(void);
78 machine_startup(__unused boot_args
* args
)
83 if (PE_parse_boot_argn("debug", &debug_boot_arg
, sizeof (debug_boot_arg
)) &&
85 #if DEVELOPMENT || DEBUG
86 if (debug_boot_arg
& DB_HALT
)
89 if (debug_boot_arg
& DB_NMI
)
90 panicDebugging
= TRUE
;
96 PE_parse_boot_argn("assert", &mach_assert
, sizeof (mach_assert
));
98 if (PE_parse_boot_argn("preempt", &boot_arg
, sizeof (boot_arg
))) {
99 default_preemption_rate
= boot_arg
;
101 if (PE_parse_boot_argn("bg_preempt", &boot_arg
, sizeof (boot_arg
))) {
102 default_bg_preemption_rate
= boot_arg
;
108 * Kick off the kernel bootstrap.
117 __unused vm_size_t size
)
119 return (PE_boot_args());
125 machine_info
.memory_size
= mem_size
;
133 is_clock_configured
= TRUE
;
139 slave_machine_init(__unused
void *param
)
141 cpu_machine_init(); /* Initialize the processor */
142 clock_init(); /* Init the clock */
146 * Routine: machine_processor_shutdown
150 machine_processor_shutdown(
151 __unused thread_t thread
,
152 void (*doshutdown
) (processor_t
),
153 processor_t processor
)
155 return (Shutdown_context(doshutdown
, processor
));
159 * Routine: ml_init_max_cpus
163 ml_init_max_cpus(unsigned int max_cpus
)
165 boolean_t current_state
;
167 current_state
= ml_set_interrupts_enabled(FALSE
);
168 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
169 machine_info
.max_cpus
= max_cpus
;
170 machine_info
.physical_cpu_max
= max_cpus
;
171 machine_info
.logical_cpu_max
= max_cpus
;
172 if (max_cpus_initialized
== MAX_CPUS_WAIT
)
173 thread_wakeup((event_t
) & max_cpus_initialized
);
174 max_cpus_initialized
= MAX_CPUS_SET
;
176 (void) ml_set_interrupts_enabled(current_state
);
180 * Routine: ml_get_max_cpus
184 ml_get_max_cpus(void)
186 boolean_t current_state
;
188 current_state
= ml_set_interrupts_enabled(FALSE
);
189 if (max_cpus_initialized
!= MAX_CPUS_SET
) {
190 max_cpus_initialized
= MAX_CPUS_WAIT
;
191 assert_wait((event_t
) & max_cpus_initialized
, THREAD_UNINT
);
192 (void) thread_block(THREAD_CONTINUE_NULL
);
194 (void) ml_set_interrupts_enabled(current_state
);
195 return (machine_info
.max_cpus
);
199 * Routine: ml_init_lock_timeout
203 ml_init_lock_timeout(void)
207 uint64_t default_timeout_ns
= NSEC_PER_SEC
>>2;
210 if (PE_parse_boot_argn("slto_us", &slto
, sizeof (slto
)))
211 default_timeout_ns
= slto
* NSEC_PER_USEC
;
213 nanoseconds_to_absolutetime(default_timeout_ns
, &abstime
);
214 LockTimeOutUsec
= (uint32_t)(abstime
/ NSEC_PER_USEC
);
215 LockTimeOut
= (uint32_t)abstime
;
217 if (PE_parse_boot_argn("mtxspin", &mtxspin
, sizeof (mtxspin
))) {
218 if (mtxspin
> USEC_PER_SEC
>>4)
219 mtxspin
= USEC_PER_SEC
>>4;
220 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
222 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
228 * This is called from the machine-independent routine cpu_up()
229 * to perform machine-dependent info updates.
234 hw_atomic_add(&machine_info
.physical_cpu
, 1);
235 hw_atomic_add(&machine_info
.logical_cpu
, 1);
239 * This is called from the machine-independent routine cpu_down()
240 * to perform machine-dependent info updates.
245 cpu_data_t
*cpu_data_ptr
;
247 hw_atomic_sub(&machine_info
.physical_cpu
, 1);
248 hw_atomic_sub(&machine_info
.logical_cpu
, 1);
251 * If we want to deal with outstanding IPIs, we need to
252 * do relatively early in the processor_doshutdown path,
253 * as we pend decrementer interrupts using the IPI
254 * mechanism if we cannot immediately service them (if
255 * IRQ is masked). Do so now.
257 * We aren't on the interrupt stack here; would it make
258 * more sense to disable signaling and then enable
259 * interrupts? It might be a bit cleaner.
261 cpu_data_ptr
= getCpuDatap();
262 cpu_data_ptr
->cpu_running
= FALSE
;
264 cpu_signal_handler_internal(TRUE
);
268 * Routine: ml_cpu_get_info
272 ml_cpu_get_info(ml_cpu_info_t
* ml_cpu_info
)
274 cache_info_t
*cpuid_cache_info
;
276 cpuid_cache_info
= cache_info();
277 ml_cpu_info
->vector_unit
= 0;
278 ml_cpu_info
->cache_line_size
= cpuid_cache_info
->c_linesz
;
279 ml_cpu_info
->l1_icache_size
= cpuid_cache_info
->c_isize
;
280 ml_cpu_info
->l1_dcache_size
= cpuid_cache_info
->c_dsize
;
282 #if (__ARM_ARCH__ >= 7)
283 ml_cpu_info
->l2_settings
= 1;
284 ml_cpu_info
->l2_cache_size
= cpuid_cache_info
->c_l2size
;
286 ml_cpu_info
->l2_settings
= 0;
287 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
289 ml_cpu_info
->l3_settings
= 0;
290 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
294 ml_get_machine_mem(void)
296 return (machine_info
.memory_size
);
299 /* Return max offset */
305 unsigned int pmap_max_offset_option
= 0;
308 case MACHINE_MAX_OFFSET_DEFAULT
:
309 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_DEFAULT
;
311 case MACHINE_MAX_OFFSET_MIN
:
312 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_MIN
;
314 case MACHINE_MAX_OFFSET_MAX
:
315 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_MAX
;
317 case MACHINE_MAX_OFFSET_DEVICE
:
318 pmap_max_offset_option
= ARM_PMAP_MAX_OFFSET_DEVICE
;
321 panic("ml_get_max_offset(): Illegal option 0x%x\n", option
);
324 return pmap_max_offset(is64
, pmap_max_offset_option
);
328 ml_wants_panic_trap_to_debugger(void)
334 ml_panic_trap_to_debugger(__unused
const char *panic_format_str
,
335 __unused
va_list *panic_args
,
336 __unused
unsigned int reason
,
338 __unused
uint64_t panic_options_mask
,
339 __unused
unsigned long panic_caller
)
344 __attribute__((noreturn
))
346 halt_all_cpus(boolean_t reboot
)
349 printf("MACH Reboot\n");
350 PEHaltRestart(kPERestartCPU
);
352 printf("CPU halted\n");
353 PEHaltRestart(kPEHaltCPU
);
358 __attribute__((noreturn
))
362 halt_all_cpus(FALSE
);
366 * Routine: machine_signal_idle
371 processor_t processor
)
373 cpu_signal(processor_to_cpu_datap(processor
), SIGPnop
, (void *)NULL
, (void *)NULL
);
374 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
378 machine_signal_idle_deferred(
379 processor_t processor
)
381 cpu_signal_deferred(processor_to_cpu_datap(processor
));
382 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_DEFERRED_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
386 machine_signal_idle_cancel(
387 processor_t processor
)
389 cpu_signal_cancel(processor_to_cpu_datap(processor
));
390 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_CANCEL_AST
), processor
->cpu_id
, 0 /* nop */, 0, 0, 0);
394 * Routine: ml_install_interrupt_handler
395 * Function: Initialize Interrupt Handler
398 ml_install_interrupt_handler(
402 IOInterruptHandler handler
,
405 cpu_data_t
*cpu_data_ptr
;
406 boolean_t current_state
;
408 current_state
= ml_set_interrupts_enabled(FALSE
);
409 cpu_data_ptr
= getCpuDatap();
411 cpu_data_ptr
->interrupt_nub
= nub
;
412 cpu_data_ptr
->interrupt_source
= source
;
413 cpu_data_ptr
->interrupt_target
= target
;
414 cpu_data_ptr
->interrupt_handler
= handler
;
415 cpu_data_ptr
->interrupt_refCon
= refCon
;
417 cpu_data_ptr
->interrupts_enabled
= TRUE
;
418 (void) ml_set_interrupts_enabled(current_state
);
420 initialize_screen(NULL
, kPEAcquireScreen
);
424 * Routine: ml_init_interrupt
425 * Function: Initialize Interrupts
428 ml_init_interrupt(void)
433 * Routine: ml_init_timebase
434 * Function: register and setup Timebase, Decremeter services
436 void ml_init_timebase(
439 vm_offset_t int_address
,
440 vm_offset_t int_value
)
442 cpu_data_t
*cpu_data_ptr
;
444 cpu_data_ptr
= (cpu_data_t
*)args
;
446 if ((cpu_data_ptr
== &BootCpuData
)
447 && (rtclock_timebase_func
.tbd_fiq_handler
== (void *)NULL
)) {
448 rtclock_timebase_func
= *tbd_funcs
;
449 rtclock_timebase_addr
= int_address
;
450 rtclock_timebase_val
= int_value
;
455 ml_parse_cpu_topology(void)
457 DTEntry entry
, child
;
458 OpaqueDTEntryIterator iter
;
459 uint32_t cpu_boot_arg
;
462 err
= DTLookupEntry(NULL
, "/cpus", &entry
);
463 assert(err
== kSuccess
);
465 err
= DTInitEntryIterator(entry
, &iter
);
466 assert(err
== kSuccess
);
468 while (kSuccess
== DTIterateEntries(&iter
, &child
)) {
471 unsigned int propSize
;
473 if (avail_cpus
== 0) {
474 if (kSuccess
!= DTGetProperty(child
, "state", &prop
, &propSize
))
475 panic("unable to retrieve state for cpu %u", avail_cpus
);
477 if (strncmp((char*)prop
, "running", propSize
) != 0)
478 panic("cpu 0 has not been marked as running!");
480 assert(kSuccess
== DTGetProperty(child
, "reg", &prop
, &propSize
));
481 assert(avail_cpus
== *((uint32_t*)prop
));
486 cpu_boot_arg
= avail_cpus
;
487 if (PE_parse_boot_argn("cpus", &cpu_boot_arg
, sizeof(cpu_boot_arg
)) &&
488 (avail_cpus
> cpu_boot_arg
))
489 avail_cpus
= cpu_boot_arg
;
492 panic("No cpus found!");
496 ml_get_cpu_count(void)
502 ml_get_boot_cpu_number(void)
508 ml_get_boot_cluster(void)
510 return CLUSTER_TYPE_SMP
;
514 ml_get_cpu_number(uint32_t phys_id
)
520 ml_get_max_cpu_number(void)
522 return avail_cpus
- 1;
526 ml_processor_register(
527 ml_processor_info_t
* in_processor_info
,
528 processor_t
* processor_out
,
529 ipi_handler_t
* ipi_handler
)
531 cpu_data_t
*this_cpu_datap
;
532 boolean_t is_boot_cpu
;
534 if (in_processor_info
->phys_id
>= MAX_CPUS
) {
536 * The physical CPU ID indicates that we have more CPUs than
537 * this xnu build support. This probably means we have an
538 * incorrect board configuration.
540 * TODO: Should this just return a failure instead? A panic
541 * is simply a convenient way to catch bugs in the pexpert
544 panic("phys_id %u is too large for MAX_CPUS (%u)", in_processor_info
->phys_id
, MAX_CPUS
);
547 /* Fail the registration if the number of CPUs has been limited by boot-arg. */
548 if ((in_processor_info
->phys_id
>= avail_cpus
) ||
549 (in_processor_info
->log_id
> (uint32_t)ml_get_max_cpu_number()))
552 if (in_processor_info
->log_id
!= (uint32_t)ml_get_boot_cpu_number()) {
554 this_cpu_datap
= cpu_data_alloc(FALSE
);
555 cpu_data_init(this_cpu_datap
);
557 this_cpu_datap
= &BootCpuData
;
561 this_cpu_datap
->cpu_id
= in_processor_info
->cpu_id
;
563 this_cpu_datap
->cpu_chud
= chudxnu_cpu_alloc(is_boot_cpu
);
564 if (this_cpu_datap
->cpu_chud
== (void *)NULL
)
565 goto processor_register_error
;
566 this_cpu_datap
->cpu_console_buf
= console_cpu_alloc(is_boot_cpu
);
567 if (this_cpu_datap
->cpu_console_buf
== (void *)(NULL
))
568 goto processor_register_error
;
571 if (cpu_data_register(this_cpu_datap
) != KERN_SUCCESS
)
572 goto processor_register_error
;
575 this_cpu_datap
->cpu_idle_notify
= (void *) in_processor_info
->processor_idle
;
576 this_cpu_datap
->cpu_cache_dispatch
= in_processor_info
->platform_cache_dispatch
;
577 nanoseconds_to_absolutetime((uint64_t) in_processor_info
->powergate_latency
, &this_cpu_datap
->cpu_idle_latency
);
578 this_cpu_datap
->cpu_reset_assist
= kvtophys(in_processor_info
->powergate_stub_addr
);
580 this_cpu_datap
->idle_timer_notify
= (void *) in_processor_info
->idle_timer
;
581 this_cpu_datap
->idle_timer_refcon
= in_processor_info
->idle_timer_refcon
;
583 this_cpu_datap
->platform_error_handler
= (void *) in_processor_info
->platform_error_handler
;
584 this_cpu_datap
->cpu_regmap_paddr
= in_processor_info
->regmap_paddr
;
585 this_cpu_datap
->cpu_phys_id
= in_processor_info
->phys_id
;
586 this_cpu_datap
->cpu_l2_access_penalty
= in_processor_info
->l2_access_penalty
;
589 processor_init((struct processor
*)this_cpu_datap
->cpu_processor
,
590 this_cpu_datap
->cpu_number
, processor_pset(master_processor
));
592 if (this_cpu_datap
->cpu_l2_access_penalty
) {
594 * Cores that have a non-zero L2 access penalty compared
595 * to the boot processor should be de-prioritized by the
596 * scheduler, so that threads use the cores with better L2
599 processor_set_primary(this_cpu_datap
->cpu_processor
,
604 *processor_out
= this_cpu_datap
->cpu_processor
;
605 *ipi_handler
= cpu_signal_handler
;
606 if (in_processor_info
->idle_tickle
!= (idle_tickle_t
*) NULL
)
607 *in_processor_info
->idle_tickle
= (idle_tickle_t
) cpu_idle_tickle
;
610 if (kpc_register_cpu(this_cpu_datap
) != TRUE
)
611 goto processor_register_error
;
615 prng_cpu_init(this_cpu_datap
->cpu_number
);
619 processor_register_error
:
621 kpc_unregister_cpu(this_cpu_datap
);
623 if (this_cpu_datap
->cpu_chud
!= (void *)NULL
)
624 chudxnu_cpu_free(this_cpu_datap
->cpu_chud
);
626 cpu_data_free(this_cpu_datap
);
631 ml_init_arm_debug_interface(
633 vm_offset_t virt_address
)
635 ((cpu_data_t
*)in_cpu_datap
)->cpu_debug_interface_map
= virt_address
;
640 * Routine: init_ast_check
645 __unused processor_t processor
)
650 * Routine: cause_ast_check
655 processor_t processor
)
657 if (current_processor() != processor
) {
658 cpu_signal(processor_to_cpu_datap(processor
), SIGPast
, (void *)NULL
, (void *)NULL
);
659 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REMOTE_AST
), processor
->cpu_id
, 1 /* ast */, 0, 0, 0);
665 * Routine: ml_at_interrupt_context
666 * Function: Check if running at interrupt context
669 ml_at_interrupt_context(void)
671 vm_offset_t stack_ptr
;
672 vm_offset_t intstack_top_ptr
;
674 __asm__
volatile("mov %0, sp\n":"=r"(stack_ptr
));
675 intstack_top_ptr
= getCpuDatap()->intstack_top
;
676 return ((stack_ptr
< intstack_top_ptr
) && (stack_ptr
> intstack_top_ptr
- INTSTACK_SIZE
));
679 extern uint32_t cpu_idle_count
;
681 void ml_get_power_state(boolean_t
*icp
, boolean_t
*pidlep
) {
682 *icp
= ml_at_interrupt_context();
683 *pidlep
= (cpu_idle_count
== real_ncpus
);
687 * Routine: ml_cause_interrupt
688 * Function: Generate a fake interrupt
691 ml_cause_interrupt(void)
696 /* Map memory map IO space */
699 vm_offset_t phys_addr
,
702 return (io_map(phys_addr
, size
, VM_WIMG_IO
));
707 vm_offset_t phys_addr
,
710 return (io_map(phys_addr
, size
, VM_WIMG_WCOMB
));
713 /* boot memory allocation */
716 __unused vm_size_t size
)
718 return ((vm_offset_t
) NULL
);
723 vm_offset_t phys_addr
,
726 return pmap_map_high_window_bd(phys_addr
, len
, VM_PROT_READ
| VM_PROT_WRITE
);
733 return phystokv(paddr
);
740 if (((vm_address_t
)(vaddr
) - gVirtBase
) >= gPhysSize
)
741 panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr
);
742 return ((vm_address_t
)(vaddr
) - gVirtBase
+ gPhysBase
);
748 vm_offset_t vaddr
, /* kernel virtual address */
752 pt_entry_t arm_prot
= 0;
753 pt_entry_t arm_block_prot
= 0;
754 vm_offset_t vaddr_cur
;
756 kern_return_t result
= KERN_SUCCESS
;
758 if (vaddr
< VM_MIN_KERNEL_ADDRESS
)
761 assert((vaddr
& (ARM_PGBYTES
- 1)) == 0); /* must be page aligned */
763 if ((new_prot
& VM_PROT_WRITE
) && (new_prot
& VM_PROT_EXECUTE
)) {
764 panic("ml_static_protect(): WX request on %p", (void *) vaddr
);
767 /* Set up the protection bits, and block bits so we can validate block mappings. */
768 if (new_prot
& VM_PROT_WRITE
) {
769 arm_prot
|= ARM_PTE_AP(AP_RWNA
);
770 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RWNA
);
772 arm_prot
|= ARM_PTE_AP(AP_RONA
);
773 arm_block_prot
|= ARM_TTE_BLOCK_AP(AP_RONA
);
776 if (!(new_prot
& VM_PROT_EXECUTE
)) {
777 arm_prot
|= ARM_PTE_NX
;
778 arm_block_prot
|= ARM_TTE_BLOCK_NX
;
781 for (vaddr_cur
= vaddr
;
782 vaddr_cur
< ((vaddr
+ size
) & ~ARM_PGMASK
);
783 vaddr_cur
+= ARM_PGBYTES
) {
784 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
785 if (ppn
!= (vm_offset_t
) NULL
) {
786 tt_entry_t
*ttp
= &kernel_pmap
->tte
[ttenum(vaddr_cur
)];
787 tt_entry_t tte
= *ttp
;
789 if ((tte
& ARM_TTE_TYPE_MASK
) != ARM_TTE_TYPE_TABLE
) {
790 if (((tte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_BLOCK
) &&
791 ((tte
& (ARM_TTE_BLOCK_APMASK
| ARM_TTE_BLOCK_NX_MASK
)) == arm_block_prot
)) {
793 * We can support ml_static_protect on a block mapping if the mapping already has
794 * the desired protections. We still want to run checks on a per-page basis.
799 result
= KERN_FAILURE
;
803 pt_entry_t
*pte_p
= (pt_entry_t
*) ttetokv(tte
) + ptenum(vaddr_cur
);
804 pt_entry_t ptmp
= *pte_p
;
806 ptmp
= (ptmp
& ~(ARM_PTE_APMASK
| ARM_PTE_NX_MASK
)) | arm_prot
;
808 #ifndef __ARM_L1_PTW__
809 FlushPoC_DcacheRegion((vm_offset_t
) pte_p
, sizeof(*pte_p
));
814 if (vaddr_cur
> vaddr
)
815 flush_mmu_tlb_region(vaddr
, (vm_size_t
)(vaddr_cur
- vaddr
));
821 * Routine: ml_static_mfree
829 vm_offset_t vaddr_cur
;
831 uint32_t freed_pages
= 0;
833 /* It is acceptable (if bad) to fail to free. */
834 if (vaddr
< VM_MIN_KERNEL_ADDRESS
)
837 assert((vaddr
& (PAGE_SIZE
- 1)) == 0); /* must be page aligned */
839 for (vaddr_cur
= vaddr
;
840 vaddr_cur
< trunc_page_32(vaddr
+ size
);
841 vaddr_cur
+= PAGE_SIZE
) {
842 ppn
= pmap_find_phys(kernel_pmap
, vaddr_cur
);
843 if (ppn
!= (vm_offset_t
) NULL
) {
845 * It is not acceptable to fail to update the protections on a page
846 * we will release to the VM. We need to either panic or continue.
847 * For now, we'll panic (to help flag if there is memory we can
850 if (ml_static_protect(vaddr_cur
, PAGE_SIZE
, VM_PROT_WRITE
| VM_PROT_READ
) != KERN_SUCCESS
) {
851 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur
);
855 * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
856 * relies on the persistence of these mappings for all time.
858 // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
860 vm_page_create(ppn
, (ppn
+ 1));
864 vm_page_lockspin_queues();
865 vm_page_wire_count
-= freed_pages
;
866 vm_page_wire_count_initial
-= freed_pages
;
867 vm_page_unlock_queues();
869 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages
, (void *)vaddr
, (uint64_t)size
, ppn
);
874 /* virtual to physical on wired pages */
876 ml_vtophys(vm_offset_t vaddr
)
878 return kvtophys(vaddr
);
882 * Routine: ml_nofault_copy
883 * Function: Perform a physical mode copy if the source and destination have
884 * valid translations in the kernel pmap. If translations are present, they are
885 * assumed to be wired; e.g., no attempt is made to guarantee that the
886 * translations obtained remain valid for the duration of the copy process.
889 ml_nofault_copy(vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
891 addr64_t cur_phys_dst
, cur_phys_src
;
892 uint32_t count
, nbytes
= 0;
895 if (!(cur_phys_src
= kvtophys(virtsrc
)))
897 if (!(cur_phys_dst
= kvtophys(virtdst
)))
899 if (!pmap_valid_address(trunc_page_64(cur_phys_dst
)) ||
900 !pmap_valid_address(trunc_page_64(cur_phys_src
)))
902 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
903 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
)))
904 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
908 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
920 * Routine: ml_validate_nofault
921 * Function: Validate that ths address range has a valid translations
922 * in the kernel pmap. If translations are present, they are
923 * assumed to be wired; i.e. no attempt is made to guarantee
924 * that the translation persist after the check.
925 * Returns: TRUE if the range is mapped and will not cause a fault,
929 boolean_t
ml_validate_nofault(
930 vm_offset_t virtsrc
, vm_size_t size
)
932 addr64_t cur_phys_src
;
936 if (!(cur_phys_src
= kvtophys(virtsrc
)))
938 if (!pmap_valid_address(trunc_page_64(cur_phys_src
)))
940 count
= (uint32_t)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
942 count
= (uint32_t)size
;
952 ml_get_bouncepool_info(vm_offset_t
* phys_addr
, vm_size_t
* size
)
959 * Stubs for CPU Stepper
962 active_rt_threads(__unused boolean_t active
)
967 thread_tell_urgency(__unused
int urgency
,
968 __unused
uint64_t rt_period
,
969 __unused
uint64_t rt_deadline
,
970 __unused
uint64_t sched_latency
,
971 __unused thread_t nthread
)
976 machine_run_count(__unused
uint32_t count
)
981 machine_choose_processor(__unused processor_set_t pset
, processor_t processor
)
987 ml_stack_remaining(void)
989 uintptr_t local
= (uintptr_t) &local
;
991 if (ml_at_interrupt_context()) {
992 return (local
- (getCpuDatap()->intstack_top
- INTSTACK_SIZE
));
994 return (local
- current_thread()->kernel_stack
);
998 boolean_t
machine_timeout_suspended(void) {
1003 ml_interrupt_prewarm(__unused
uint64_t deadline
)
1005 return KERN_FAILURE
;
1009 ml_get_hwclock(void)
1011 uint64_t high_first
= 0;
1012 uint64_t high_second
= 0;
1015 __builtin_arm_isb(ISB_SY
);
1018 high_first
= __builtin_arm_mrrc(15, 0, 14) >> 32;
1019 low
= __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL
;
1020 high_second
= __builtin_arm_mrrc(15, 0, 14) >> 32;
1021 } while (high_first
!= high_second
);
1023 return (high_first
<< 32) | (low
);
1027 ml_delay_should_spin(uint64_t interval
)
1029 cpu_data_t
*cdp
= getCpuDatap();
1031 if (cdp
->cpu_idle_latency
) {
1032 return (interval
< cdp
->cpu_idle_latency
) ? TRUE
: FALSE
;
1035 * Early boot, latency is unknown. Err on the side of blocking,
1036 * which should always be safe, even if slow
1042 boolean_t
ml_thread_is64bit(thread_t thread
)
1044 return (thread_is_64bit(thread
));
1047 void ml_timer_evaluate(void) {
1051 ml_timer_forced_evaluation(void) {
1056 ml_energy_stat(__unused thread_t t
) {
1062 ml_gpu_stat_update(__unused
uint64_t gpu_ns_delta
) {
1065 * For now: update the resource coalition stats of the
1066 * current thread's coalition
1068 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta
);
1073 ml_gpu_stat(__unused thread_t t
) {
1077 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1079 timer_state_event(boolean_t switch_to_kernel
)
1081 thread_t thread
= current_thread();
1082 if (!thread
->precise_user_kernel_time
) return;
1084 processor_data_t
*pd
= &getCpuDatap()->cpu_processor
->processor_data
;
1085 uint64_t now
= ml_get_timebase();
1087 timer_stop(pd
->current_state
, now
);
1088 pd
->current_state
= (switch_to_kernel
) ? &pd
->system_state
: &pd
->user_state
;
1089 timer_start(pd
->current_state
, now
);
1091 timer_stop(pd
->thread_timer
, now
);
1092 pd
->thread_timer
= (switch_to_kernel
) ? &thread
->system_timer
: &thread
->user_timer
;
1093 timer_start(pd
->thread_timer
, now
);
1097 timer_state_event_user_to_kernel(void)
1099 timer_state_event(TRUE
);
1103 timer_state_event_kernel_to_user(void)
1105 timer_state_event(FALSE
);
1107 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1110 user_cont_hwclock_allowed(void)
1116 user_timebase_allowed(void)
1126 * The following are required for parts of the kernel
1127 * that cannot resolve these functions as inlines:
1129 extern thread_t
current_act(void);
1133 return current_thread_fast();
1136 #undef current_thread
1137 extern thread_t
current_thread(void);
1139 current_thread(void)
1141 return current_thread_fast();
1144 #if __ARM_USER_PROTECT__
1146 arm_user_protect_begin(thread_t thread
)
1148 uintptr_t ttbr0
, asid
= 0; // kernel asid
1150 ttbr0
= __builtin_arm_mrc(15,0,2,0,0); // Get TTBR0
1151 if (ttbr0
!= thread
->machine
.kptw_ttb
) {
1152 __builtin_arm_mcr(15,0,thread
->machine
.kptw_ttb
,2,0,0); // Set TTBR0
1153 __builtin_arm_mcr(15,0,asid
,13,0,1); // Set CONTEXTIDR
1154 __builtin_arm_isb(ISB_SY
);
1160 arm_user_protect_end(thread_t thread
, uintptr_t ttbr0
, boolean_t disable_interrupts
)
1162 if ((ttbr0
!= thread
->machine
.kptw_ttb
) && (thread
->machine
.uptw_ttb
!= thread
->machine
.kptw_ttb
)) {
1163 if (disable_interrupts
)
1164 __asm__
volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
1165 __builtin_arm_mcr(15,0,thread
->machine
.uptw_ttb
,2,0,0); // Set TTBR0
1166 __builtin_arm_mcr(15,0,thread
->machine
.asid
,13,0,1); // Set CONTEXTIDR with thread asid
1167 __builtin_arm_dsb(DSB_ISH
);
1168 __builtin_arm_isb(ISB_SY
);
1171 #endif // __ARM_USER_PROTECT__
1173 void ml_task_set_rop_pid(__unused task_t task
, __unused task_t parent_task
, __unused boolean_t inherit
)