X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/e8c3f78193f1895ea514044358b93b1add9322f3..c6bf4f310a33a9262d455ea4d3f0630b1255e3fe:/osfmk/arm/machine_routines.c?ds=inline diff --git a/osfmk/arm/machine_routines.c b/osfmk/arm/machine_routines.c index 94fc76bf4..df89b7500 100644 --- a/osfmk/arm/machine_routines.c +++ b/osfmk/arm/machine_routines.c @@ -49,6 +49,8 @@ #include #include #include +#include +#include #include @@ -64,10 +66,13 @@ static unsigned int avail_cpus = 0; uint32_t LockTimeOut; uint32_t LockTimeOutUsec; +uint64_t TLockTimeOut; uint64_t MutexSpin; boolean_t is_clock_configured = FALSE; +#if CONFIG_NONFATAL_ASSERTS extern int mach_assert; +#endif extern volatile uint32_t debug_enabled; void machine_conf(void); @@ -77,12 +82,14 @@ machine_startup(__unused boot_args * args) { int boot_arg; - PE_parse_boot_argn("assert", &mach_assert, sizeof (mach_assert)); +#if CONFIG_NONFATAL_ASSERTS + PE_parse_boot_argn("assert", &mach_assert, sizeof(mach_assert)); +#endif - if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) { default_preemption_rate = boot_arg; } - if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof(boot_arg))) { default_bg_preemption_rate = boot_arg; } @@ -97,10 +104,10 @@ machine_startup(__unused boot_args * args) char * machine_boot_info( - __unused char *buf, - __unused vm_size_t size) + __unused char *buf, + __unused vm_size_t size) { - return (PE_boot_args()); + return PE_boot_args(); } void @@ -115,15 +122,16 @@ machine_init(void) debug_log_init(); clock_config(); is_clock_configured = TRUE; - if (debug_enabled) + if (debug_enabled) { pmap_map_globals(); + } } -void +void slave_machine_init(__unused void *param) { - cpu_machine_init(); /* Initialize the processor */ - clock_init(); /* Init the clock */ + cpu_machine_init(); /* Initialize the processor */ + clock_init(); /* Init the clock */ } /* @@ -132,11 +140,11 @@ slave_machine_init(__unused void *param) */ thread_t machine_processor_shutdown( - __unused thread_t thread, - void (*doshutdown) (processor_t), - processor_t processor) + __unused thread_t thread, + void (*doshutdown)(processor_t), + processor_t processor) { - return (Shutdown_context(doshutdown, processor)); + return Shutdown_context(doshutdown, processor); } /* @@ -153,8 +161,9 @@ ml_init_max_cpus(unsigned int max_cpus) machine_info.max_cpus = max_cpus; machine_info.physical_cpu_max = max_cpus; machine_info.logical_cpu_max = max_cpus; - if (max_cpus_initialized == MAX_CPUS_WAIT) - thread_wakeup((event_t) & max_cpus_initialized); + if (max_cpus_initialized == MAX_CPUS_WAIT) { + thread_wakeup((event_t) &max_cpus_initialized); + } max_cpus_initialized = MAX_CPUS_SET; } (void) ml_set_interrupts_enabled(current_state); @@ -172,11 +181,11 @@ ml_get_max_cpus(void) current_state = ml_set_interrupts_enabled(FALSE); if (max_cpus_initialized != MAX_CPUS_SET) { max_cpus_initialized = MAX_CPUS_WAIT; - assert_wait((event_t) & max_cpus_initialized, THREAD_UNINT); + assert_wait((event_t) &max_cpus_initialized, THREAD_UNINT); (void) thread_block(THREAD_CONTINUE_NULL); } (void) ml_set_interrupts_enabled(current_state); - return (machine_info.max_cpus); + return machine_info.max_cpus; } /* @@ -188,22 +197,25 @@ ml_init_lock_timeout(void) { uint64_t abstime; uint64_t mtxspin; - uint64_t default_timeout_ns = NSEC_PER_SEC>>2; + uint64_t default_timeout_ns = NSEC_PER_SEC >> 2; uint32_t slto; - if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto))) + if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) { default_timeout_ns = slto * NSEC_PER_USEC; + } nanoseconds_to_absolutetime(default_timeout_ns, &abstime); - LockTimeOutUsec = (uint32_t)(abstime / NSEC_PER_USEC); + LockTimeOutUsec = (uint32_t)(default_timeout_ns / NSEC_PER_USEC); LockTimeOut = (uint32_t)abstime; + TLockTimeOut = LockTimeOut; - if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) { - if (mtxspin > USEC_PER_SEC>>4) - mtxspin = USEC_PER_SEC>>4; - nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime); + if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) { + if (mtxspin > USEC_PER_SEC >> 4) { + mtxspin = USEC_PER_SEC >> 4; + } + nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime); } else { - nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime); + nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime); } MutexSpin = abstime; } @@ -215,8 +227,8 @@ ml_init_lock_timeout(void) void ml_cpu_up(void) { - hw_atomic_add(&machine_info.physical_cpu, 1); - hw_atomic_add(&machine_info.logical_cpu, 1); + os_atomic_inc(&machine_info.physical_cpu, relaxed); + os_atomic_inc(&machine_info.logical_cpu, relaxed); } /* @@ -226,11 +238,11 @@ ml_cpu_up(void) void ml_cpu_down(void) { - cpu_data_t *cpu_data_ptr; + cpu_data_t *cpu_data_ptr; + + os_atomic_dec(&machine_info.physical_cpu, relaxed); + os_atomic_dec(&machine_info.logical_cpu, relaxed); - hw_atomic_sub(&machine_info.physical_cpu, 1); - hw_atomic_sub(&machine_info.logical_cpu, 1); - /* * If we want to deal with outstanding IPIs, we need to * do relatively early in the processor_doshutdown path, @@ -277,34 +289,34 @@ ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info) unsigned int ml_get_machine_mem(void) { - return (machine_info.memory_size); + return machine_info.memory_size; } /* Return max offset */ vm_map_offset_t ml_get_max_offset( - boolean_t is64, + boolean_t is64, unsigned int option) { - unsigned int pmap_max_offset_option = 0; + unsigned int pmap_max_offset_option = 0; switch (option) { case MACHINE_MAX_OFFSET_DEFAULT: pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT; - break; - case MACHINE_MAX_OFFSET_MIN: + break; + case MACHINE_MAX_OFFSET_MIN: pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN; - break; - case MACHINE_MAX_OFFSET_MAX: + break; + case MACHINE_MAX_OFFSET_MAX: pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX; - break; - case MACHINE_MAX_OFFSET_DEVICE: + break; + case MACHINE_MAX_OFFSET_DEVICE: pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE; - break; - default: + break; + default: panic("ml_get_max_offset(): Illegal option 0x%x\n", option); - break; - } + break; + } return pmap_max_offset(is64, pmap_max_offset_option); } @@ -316,11 +328,11 @@ ml_wants_panic_trap_to_debugger(void) void ml_panic_trap_to_debugger(__unused const char *panic_format_str, - __unused va_list *panic_args, - __unused unsigned int reason, - __unused void *ctx, - __unused uint64_t panic_options_mask, - __unused unsigned long panic_caller) + __unused va_list *panic_args, + __unused unsigned int reason, + __unused void *ctx, + __unused uint64_t panic_options_mask, + __unused unsigned long panic_caller) { return; } @@ -336,7 +348,9 @@ halt_all_cpus(boolean_t reboot) printf("CPU halted\n"); PEHaltRestart(kPEHaltCPU); } - while (1); + while (1) { + ; + } } __attribute__((noreturn)) @@ -352,7 +366,7 @@ halt_cpu(void) */ void machine_signal_idle( - processor_t processor) + processor_t processor) { cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); @@ -360,7 +374,7 @@ machine_signal_idle( void machine_signal_idle_deferred( - processor_t processor) + processor_t processor) { cpu_signal_deferred(processor_to_cpu_datap(processor)); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); @@ -368,7 +382,7 @@ machine_signal_idle_deferred( void machine_signal_idle_cancel( - processor_t processor) + processor_t processor) { cpu_signal_cancel(processor_to_cpu_datap(processor)); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); @@ -378,13 +392,13 @@ machine_signal_idle_cancel( * Routine: ml_install_interrupt_handler * Function: Initialize Interrupt Handler */ -void +void ml_install_interrupt_handler( - void *nub, - int source, - void *target, - IOInterruptHandler handler, - void *refCon) + void *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) { cpu_data_t *cpu_data_ptr; boolean_t current_state; @@ -408,7 +422,7 @@ ml_install_interrupt_handler( * Routine: ml_init_interrupt * Function: Initialize Interrupts */ -void +void ml_init_interrupt(void) { } @@ -417,11 +431,12 @@ ml_init_interrupt(void) * Routine: ml_init_timebase * Function: register and setup Timebase, Decremeter services */ -void ml_init_timebase( - void *args, - tbd_ops_t tbd_funcs, - vm_offset_t int_address, - vm_offset_t int_value) +void +ml_init_timebase( + void *args, + tbd_ops_t tbd_funcs, + vm_offset_t int_address, + vm_offset_t int_value) { cpu_data_t *cpu_data_ptr; @@ -456,16 +471,17 @@ ml_parse_cpu_topology(void) assert(err == kSuccess); while (kSuccess == DTIterateEntries(&iter, &child)) { - #if MACH_ASSERT unsigned int propSize; void *prop = NULL; if (avail_cpus == 0) { - if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) + if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) { panic("unable to retrieve state for cpu %u", avail_cpus); + } - if (strncmp((char*)prop, "running", propSize) != 0) + if (strncmp((char*)prop, "running", propSize) != 0) { panic("cpu 0 has not been marked as running!"); + } } assert(kSuccess == DTGetProperty(child, "reg", &prop, &propSize)); assert(avail_cpus == *((uint32_t*)prop)); @@ -475,11 +491,13 @@ ml_parse_cpu_topology(void) cpu_boot_arg = avail_cpus; if (PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)) && - (avail_cpus > cpu_boot_arg)) + (avail_cpus > cpu_boot_arg)) { avail_cpus = cpu_boot_arg; + } - if (avail_cpus == 0) + if (avail_cpus == 0) { panic("No cpus found!"); + } } unsigned int @@ -513,10 +531,9 @@ ml_get_max_cpu_number(void) } kern_return_t -ml_processor_register( - ml_processor_info_t * in_processor_info, - processor_t * processor_out, - ipi_handler_t * ipi_handler) +ml_processor_register(ml_processor_info_t *in_processor_info, + processor_t * processor_out, ipi_handler_t *ipi_handler_out, + perfmon_interrupt_handler_func *pmi_handler_out) { cpu_data_t *this_cpu_datap; boolean_t is_boot_cpu; @@ -536,8 +553,9 @@ ml_processor_register( /* Fail the registration if the number of CPUs has been limited by boot-arg. */ if ((in_processor_info->phys_id >= avail_cpus) || - (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) + (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) { return KERN_FAILURE; + } if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) { is_boot_cpu = FALSE; @@ -551,12 +569,14 @@ ml_processor_register( this_cpu_datap->cpu_id = in_processor_info->cpu_id; this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu); - if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) + if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) { goto processor_register_error; + } if (!is_boot_cpu) { - if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) + if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) { goto processor_register_error; + } } this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle; @@ -574,7 +594,7 @@ ml_processor_register( if (!is_boot_cpu) { processor_init((struct processor *)this_cpu_datap->cpu_processor, - this_cpu_datap->cpu_number, processor_pset(master_processor)); + this_cpu_datap->cpu_number, processor_pset(master_processor)); if (this_cpu_datap->cpu_l2_access_penalty) { /* @@ -584,22 +604,26 @@ ml_processor_register( * preferentially. */ processor_set_primary(this_cpu_datap->cpu_processor, - master_processor); + master_processor); } } *processor_out = this_cpu_datap->cpu_processor; - *ipi_handler = cpu_signal_handler; - if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) + *ipi_handler_out = cpu_signal_handler; + *pmi_handler_out = NULL; + if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) { *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle; + } #if KPC - if (kpc_register_cpu(this_cpu_datap) != TRUE) + if (kpc_register_cpu(this_cpu_datap) != TRUE) { goto processor_register_error; + } #endif - if (!is_boot_cpu) - early_random_cpu_init(this_cpu_datap->cpu_number); + if (!is_boot_cpu) { + random_cpu_init(this_cpu_datap->cpu_number); + } return KERN_SUCCESS; @@ -607,15 +631,16 @@ processor_register_error: #if KPC kpc_unregister_cpu(this_cpu_datap); #endif - if (!is_boot_cpu) + if (!is_boot_cpu) { cpu_data_free(this_cpu_datap); + } return KERN_FAILURE; } void ml_init_arm_debug_interface( - void * in_cpu_datap, - vm_offset_t virt_address) + void * in_cpu_datap, + vm_offset_t virt_address) { ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address; do_debugid(); @@ -627,7 +652,7 @@ ml_init_arm_debug_interface( */ void init_ast_check( - __unused processor_t processor) + __unused processor_t processor) { } @@ -637,7 +662,7 @@ init_ast_check( */ void cause_ast_check( - processor_t processor) + processor_t processor) { if (current_processor() != processor) { cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL); @@ -647,7 +672,9 @@ cause_ast_check( extern uint32_t cpu_idle_count; -void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { +void +ml_get_power_state(boolean_t *icp, boolean_t *pidlep) +{ *icp = ml_at_interrupt_context(); *pidlep = (cpu_idle_count == real_ncpus); } @@ -656,59 +683,86 @@ void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { * Routine: ml_cause_interrupt * Function: Generate a fake interrupt */ -void +void ml_cause_interrupt(void) { - return; /* BS_XXX */ + return; /* BS_XXX */ } /* Map memory map IO space */ vm_offset_t ml_io_map( - vm_offset_t phys_addr, - vm_size_t size) + vm_offset_t phys_addr, + vm_size_t size) +{ + return io_map(phys_addr, size, VM_WIMG_IO); +} + +/* Map memory map IO space (with protections specified) */ +vm_offset_t +ml_io_map_with_prot( + vm_offset_t phys_addr, + vm_size_t size, + vm_prot_t prot) { - return (io_map(phys_addr, size, VM_WIMG_IO)); + return io_map_with_prot(phys_addr, size, VM_WIMG_IO, prot); } vm_offset_t ml_io_map_wcomb( - vm_offset_t phys_addr, - vm_size_t size) + vm_offset_t phys_addr, + vm_size_t size) { - return (io_map(phys_addr, size, VM_WIMG_WCOMB)); + return io_map(phys_addr, size, VM_WIMG_WCOMB); } /* boot memory allocation */ -vm_offset_t +vm_offset_t ml_static_malloc( - __unused vm_size_t size) + __unused vm_size_t size) { - return ((vm_offset_t) NULL); + return (vm_offset_t) NULL; } vm_map_address_t ml_map_high_window( - vm_offset_t phys_addr, - vm_size_t len) + vm_offset_t phys_addr, + vm_size_t len) { return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE); } vm_offset_t ml_static_ptovirt( - vm_offset_t paddr) + vm_offset_t paddr) { return phystokv(paddr); } vm_offset_t ml_static_vtop( - vm_offset_t vaddr) + vm_offset_t vaddr) +{ + assertf(((vm_address_t)(vaddr) - gVirtBase) < gPhysSize, "%s: illegal vaddr: %p", __func__, (void*)vaddr); + return (vm_address_t)(vaddr) - gVirtBase + gPhysBase; +} + +/* + * Return the maximum contiguous KVA range that can be accessed from this + * physical address. For arm64, we employ a segmented physical aperture + * relocation table which can limit the available range for a given PA to + * something less than the extent of physical memory. But here, we still + * have a flat physical aperture, so no such requirement exists. + */ +vm_map_address_t +phystokv_range(pmap_paddr_t pa, vm_size_t *max_len) { - if (((vm_address_t)(vaddr) - gVirtBase) >= gPhysSize) - panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr); - return ((vm_address_t)(vaddr) - gVirtBase + gPhysBase); + vm_size_t len = gPhysSize - (pa - gPhysBase); + if (*max_len > len) { + *max_len = len; + } + assertf((pa - gPhysBase) < gPhysSize, "%s: illegal PA: 0x%lx", __func__, (unsigned long)pa); + return pa - gPhysBase + gVirtBase; } vm_offset_t @@ -737,8 +791,9 @@ ml_static_protect( ppnum_t ppn; kern_return_t result = KERN_SUCCESS; - if (vaddr < VM_MIN_KERNEL_ADDRESS) + if (vaddr < VM_MIN_KERNEL_ADDRESS) { return KERN_FAILURE; + } assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */ @@ -761,8 +816,8 @@ ml_static_protect( } for (vaddr_cur = vaddr; - vaddr_cur < ((vaddr + size) & ~ARM_PGMASK); - vaddr_cur += ARM_PGBYTES) { + vaddr_cur < ((vaddr + size) & ~ARM_PGMASK); + vaddr_cur += ARM_PGBYTES) { ppn = pmap_find_phys(kernel_pmap, vaddr_cur); if (ppn != (vm_offset_t) NULL) { tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)]; @@ -787,14 +842,12 @@ ml_static_protect( ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot; *pte_p = ptmp; -#ifndef __ARM_L1_PTW__ - FlushPoC_DcacheRegion((vm_offset_t) pte_p, sizeof(*pte_p)); -#endif } } - if (vaddr_cur > vaddr) + if (vaddr_cur > vaddr) { flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr)); + } return result; } @@ -805,22 +858,23 @@ ml_static_protect( */ void ml_static_mfree( - vm_offset_t vaddr, - vm_size_t size) + vm_offset_t vaddr, + vm_size_t size) { vm_offset_t vaddr_cur; ppnum_t ppn; uint32_t freed_pages = 0; /* It is acceptable (if bad) to fail to free. */ - if (vaddr < VM_MIN_KERNEL_ADDRESS) + if (vaddr < VM_MIN_KERNEL_ADDRESS) { return; + } - assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ + assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ for (vaddr_cur = vaddr; - vaddr_cur < trunc_page_32(vaddr + size); - vaddr_cur += PAGE_SIZE) { + vaddr_cur < trunc_page_32(vaddr + size); + vaddr_cur += PAGE_SIZE) { ppn = pmap_find_phys(kernel_pmap, vaddr_cur); if (ppn != (vm_offset_t) NULL) { /* @@ -847,7 +901,7 @@ ml_static_mfree( vm_page_wire_count -= freed_pages; vm_page_wire_count_initial -= freed_pages; vm_page_unlock_queues(); -#if DEBUG +#if DEBUG kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); #endif } @@ -867,25 +921,30 @@ ml_vtophys(vm_offset_t vaddr) * assumed to be wired; e.g., no attempt is made to guarantee that the * translations obtained remain valid for the duration of the copy process. */ -vm_size_t +vm_size_t ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) { addr64_t cur_phys_dst, cur_phys_src; uint32_t count, nbytes = 0; while (size > 0) { - if (!(cur_phys_src = kvtophys(virtsrc))) + if (!(cur_phys_src = kvtophys(virtsrc))) { break; - if (!(cur_phys_dst = kvtophys(virtdst))) + } + if (!(cur_phys_dst = kvtophys(virtdst))) { break; + } if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) || - !pmap_valid_address(trunc_page_64(cur_phys_src))) + !pmap_valid_address(trunc_page_64(cur_phys_src))) { break; + } count = PAGE_SIZE - (cur_phys_src & PAGE_MASK); - if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) + if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) { count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); - if (count > size) + } + if (count > size) { count = size; + } bcopy_phys(cur_phys_src, cur_phys_dst, count); @@ -908,20 +967,24 @@ ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) * FALSE otherwise. */ -boolean_t ml_validate_nofault( +boolean_t +ml_validate_nofault( vm_offset_t virtsrc, vm_size_t size) { addr64_t cur_phys_src; uint32_t count; while (size > 0) { - if (!(cur_phys_src = kvtophys(virtsrc))) + if (!(cur_phys_src = kvtophys(virtsrc))) { return FALSE; - if (!pmap_valid_address(trunc_page_64(cur_phys_src))) + } + if (!pmap_valid_address(trunc_page_64(cur_phys_src))) { return FALSE; + } count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); - if (count > size) + if (count > size) { count = (uint32_t)size; + } virtsrc += count; size -= count; @@ -946,11 +1009,11 @@ active_rt_threads(__unused boolean_t active) } void -thread_tell_urgency(__unused int urgency, - __unused uint64_t rt_period, - __unused uint64_t rt_deadline, - __unused uint64_t sched_latency, - __unused thread_t nthread) +thread_tell_urgency(__unused thread_urgency_t urgency, + __unused uint64_t rt_period, + __unused uint64_t rt_deadline, + __unused uint64_t sched_latency, + __unused thread_t nthread) { } @@ -962,15 +1025,17 @@ machine_run_count(__unused uint32_t count) processor_t machine_choose_processor(__unused processor_set_t pset, processor_t processor) { - return (processor); + return processor; } -boolean_t machine_timeout_suspended(void) { +boolean_t +machine_timeout_suspended(void) +{ return FALSE; } -kern_return_t -ml_interrupt_prewarm(__unused uint64_t deadline) +kern_return_t +ml_interrupt_prewarm(__unused uint64_t deadline) { return KERN_FAILURE; } @@ -1009,29 +1074,38 @@ ml_delay_should_spin(uint64_t interval) } } -void ml_delay_on_yield(void) {} +void +ml_delay_on_yield(void) +{ +} -boolean_t ml_thread_is64bit(thread_t thread) +boolean_t +ml_thread_is64bit(thread_t thread) { - return (thread_is_64bit_addr(thread)); + return thread_is_64bit_addr(thread); } -void ml_timer_evaluate(void) { +void +ml_timer_evaluate(void) +{ } boolean_t -ml_timer_forced_evaluation(void) { +ml_timer_forced_evaluation(void) +{ return FALSE; } uint64_t -ml_energy_stat(__unused thread_t t) { +ml_energy_stat(__unused thread_t t) +{ return 0; } void -ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { +ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) +{ #if CONFIG_EMBEDDED /* * For now: update the resource coalition stats of the @@ -1042,7 +1116,8 @@ ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { } uint64_t -ml_gpu_stat(__unused thread_t t) { +ml_gpu_stat(__unused thread_t t) +{ return 0; } @@ -1051,7 +1126,9 @@ static void timer_state_event(boolean_t switch_to_kernel) { thread_t thread = current_thread(); - if (!thread->precise_user_kernel_time) return; + if (!thread->precise_user_kernel_time) { + return; + } processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data; uint64_t now = ml_get_timebase(); @@ -1078,19 +1155,28 @@ timer_state_event_kernel_to_user(void) } #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ +uint32_t +get_arm_cpu_version(void) +{ + uint32_t value = machine_read_midr(); + + /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */ + return ((value & MIDR_REV_MASK) >> MIDR_REV_SHIFT) | ((value & MIDR_VAR_MASK) >> (MIDR_VAR_SHIFT - 4)); +} + boolean_t user_cont_hwclock_allowed(void) { return FALSE; } -boolean_t -user_timebase_allowed(void) +uint8_t +user_timebase_type(void) { #if __ARM_TIME__ - return TRUE; + return USER_TIMEBASE_SPEC; #else - return FALSE; + return USER_TIMEBASE_NONE; #endif } @@ -1098,7 +1184,7 @@ user_timebase_allowed(void) * The following are required for parts of the kernel * that cannot resolve these functions as inlines: */ -extern thread_t current_act(void); +extern thread_t current_act(void) __attribute__((const)); thread_t current_act(void) { @@ -1106,7 +1192,7 @@ current_act(void) } #undef current_thread -extern thread_t current_thread(void); +extern thread_t current_thread(void) __attribute__((const)); thread_t current_thread(void) { @@ -1117,27 +1203,28 @@ current_thread(void) uintptr_t arm_user_protect_begin(thread_t thread) { - uintptr_t ttbr0, asid = 0; // kernel asid + uintptr_t ttbr0, asid = 0; // kernel asid - ttbr0 = __builtin_arm_mrc(15,0,2,0,0); // Get TTBR0 - if (ttbr0 != thread->machine.kptw_ttb) { - __builtin_arm_mcr(15,0,thread->machine.kptw_ttb,2,0,0); // Set TTBR0 - __builtin_arm_mcr(15,0,asid,13,0,1); // Set CONTEXTIDR - __builtin_arm_isb(ISB_SY); - } - return ttbr0; + ttbr0 = __builtin_arm_mrc(15, 0, 2, 0, 0); // Get TTBR0 + if (ttbr0 != thread->machine.kptw_ttb) { + __builtin_arm_mcr(15, 0, thread->machine.kptw_ttb, 2, 0, 0); // Set TTBR0 + __builtin_arm_mcr(15, 0, asid, 13, 0, 1); // Set CONTEXTIDR + __builtin_arm_isb(ISB_SY); + } + return ttbr0; } void arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts) { - if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) { - if (disable_interrupts) - __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ - __builtin_arm_mcr(15,0,thread->machine.uptw_ttb,2,0,0); // Set TTBR0 - __builtin_arm_mcr(15,0,thread->machine.asid,13,0,1); // Set CONTEXTIDR with thread asid - __builtin_arm_dsb(DSB_ISH); - __builtin_arm_isb(ISB_SY); - } + if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) { + if (disable_interrupts) { + __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ + } + __builtin_arm_mcr(15, 0, thread->machine.uptw_ttb, 2, 0, 0); // Set TTBR0 + __builtin_arm_mcr(15, 0, thread->machine.asid, 13, 0, 1); // Set CONTEXTIDR with thread asid + __builtin_arm_dsb(DSB_ISH); + __builtin_arm_isb(ISB_SY); + } } #endif // __ARM_USER_PROTECT__