X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/de355530ae67247cbd0da700edb3a2a1dae884c2..cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e:/osfmk/kern/machine.c diff --git a/osfmk/kern/machine.c b/osfmk/kern/machine.c index 0ef1b7e90..c90a8ea6d 100644 --- a/osfmk/kern/machine.c +++ b/osfmk/kern/machine.c @@ -1,49 +1,55 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -57,418 +63,374 @@ * Support for machine independent machine abstraction. */ -#include - #include + +#include #include #include -#include #include #include #include +#include +#include + +#include #include #include +#include #include #include -#include #include +#include #include #include #include +#include #include #include -#include -#include -#include +#include + +#if HIBERNATION +#include +#endif +#include + +#if CONFIG_DTRACE +extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t); +#endif + +#if defined(__x86_64__) +#include +#include +#endif /* * Exported variables: */ -struct machine_info machine_info; -struct machine_slot machine_slot[NCPUS]; - -static queue_head_t processor_action_queue; -static boolean_t processor_action_active; -static thread_call_t processor_action_call; -static thread_call_data_t processor_action_call_data; -decl_simple_lock_data(static,processor_action_lock) - -thread_t machine_wake_thread; +struct machine_info machine_info; /* Forwards */ -processor_set_t processor_request_action( - processor_t processor, - processor_set_t new_pset); +static void +processor_doshutdown(processor_t processor); -void processor_doaction( - processor_t processor); +static void +processor_offline(void * parameter, __unused wait_result_t result); -void processor_doshutdown( - processor_t processor); +static void +processor_offline_intstack(processor_t processor) __dead2; /* - * cpu_up: + * processor_up: * - * Flag specified cpu as up and running. Called when a processor comes - * online. + * Flag processor as up and running, and available + * for scheduling. */ void -cpu_up( - int cpu) +processor_up( + processor_t processor) { - processor_t processor = cpu_to_processor(cpu); - processor_set_t pset = &default_pset; - struct machine_slot *ms; - spl_t s; - - /* - * Just twiddle our thumbs; we've got nothing better to do - * yet, anyway. - */ - while (!simple_lock_try(&pset->processors_lock)) - continue; + processor_set_t pset; + spl_t s; + boolean_t pset_online = false; s = splsched(); - processor_lock(processor); init_ast_check(processor); - ms = &machine_slot[cpu]; - ms->running = TRUE; - machine_info.avail_cpus++; - pset_add_processor(pset, processor); - simple_lock(&pset->sched_lock); - enqueue_tail(&pset->active_queue, (queue_entry_t)processor); - processor->state = PROCESSOR_RUNNING; - simple_unlock(&pset->sched_lock); - processor_unlock(processor); + pset = processor->processor_set; + pset_lock(pset); + if (pset->online_processor_count == 0) { + /* About to bring the first processor of a pset online */ + pset_online = true; + } + ++pset->online_processor_count; + pset_update_processor_state(pset, processor, PROCESSOR_RUNNING); + os_atomic_inc(&processor_avail_count, relaxed); + if (processor->is_recommended) { + os_atomic_inc(&processor_avail_count_user, relaxed); + } + commpage_update_active_cpus(); + if (pset_online) { + /* New pset is coming up online; callout to the + * scheduler in case it wants to adjust runqs. + */ + SCHED(pset_made_schedulable)(processor, pset, true); + /* pset lock dropped */ + } else { + pset_unlock(pset); + } + ml_cpu_up(); splx(s); - simple_unlock(&pset->processors_lock); -} - -/* - * cpu_down: - * - * Flag specified cpu as down. Called when a processor is about to - * go offline. - */ -void -cpu_down( - int cpu) -{ - processor_t processor; - struct machine_slot *ms; - spl_t s; - - processor = cpu_to_processor(cpu); - - s = splsched(); - processor_lock(processor); - ms = &machine_slot[cpu]; - ms->running = FALSE; - machine_info.avail_cpus--; - /* - * processor has already been removed from pset. - */ - processor->processor_set_next = PROCESSOR_SET_NULL; - processor->state = PROCESSOR_OFF_LINE; - processor_unlock(processor); - splx(s); +#if CONFIG_DTRACE + if (dtrace_cpu_state_changed_hook) { + (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE); + } +#endif } +#include kern_return_t host_reboot( - host_priv_t host_priv, - int options) + host_priv_t host_priv, + int options) { - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_HOST); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_HOST; + } assert(host_priv == &realhost); +#if DEVELOPMENT || DEBUG if (options & HOST_REBOOT_DEBUGGER) { Debugger("Debugger"); + return KERN_SUCCESS; } - else +#endif + + if (options & HOST_REBOOT_UPSDELAY) { + // UPS power cutoff path + PEHaltRestart( kPEUPSDelayHaltCPU ); + } else { halt_all_cpus(!(options & HOST_REBOOT_HALT)); + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } -/* - * processor_request_action: - * - * Common internals of processor_assign and processor_shutdown. - * If new_pset is null, this is a shutdown, else it's an assign - * and caller must donate a reference. - * For assign operations, it returns an old pset that must be deallocated - * if it's not NULL. - * For shutdown operations, it always returns PROCESSOR_SET_NULL. - */ -processor_set_t -processor_request_action( - processor_t processor, - processor_set_t new_pset) +kern_return_t +processor_assign( + __unused processor_t processor, + __unused processor_set_t new_pset, + __unused boolean_t wait) { - processor_set_t pset, old_pset; + return KERN_FAILURE; +} - /* - * Processor must be in a processor set. Must lock its idle lock to - * get at processor state. - */ - pset = processor->processor_set; - simple_lock(&pset->sched_lock); +kern_return_t +processor_shutdown( + processor_t processor) +{ + processor_set_t pset; + spl_t s; - /* - * If the processor is dispatching, let it finish - it will set its - * state to running very soon. - */ - while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) { - simple_unlock(&pset->sched_lock); + s = splsched(); + pset = processor->processor_set; + pset_lock(pset); + if (processor->state == PROCESSOR_OFF_LINE) { + /* + * Success if already shutdown. + */ + pset_unlock(pset); + splx(s); - simple_lock(&pset->sched_lock); + return KERN_SUCCESS; } - assert( processor->state == PROCESSOR_IDLE || - processor->state == PROCESSOR_RUNNING || - processor->state == PROCESSOR_ASSIGN ); - - /* - * Now lock the action queue and do the dirty work. - */ - simple_lock(&processor_action_lock); + if (processor->state == PROCESSOR_START) { + /* + * Failure if currently being started. + */ + pset_unlock(pset); + splx(s); - if (processor->state == PROCESSOR_IDLE) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; + return KERN_FAILURE; } - else - if (processor->state == PROCESSOR_RUNNING) - remqueue(&pset->active_queue, (queue_entry_t)processor); - - if (processor->state != PROCESSOR_ASSIGN) - enqueue_tail(&processor_action_queue, (queue_entry_t)processor); /* - * And ask the action_thread to do the work. + * If the processor is dispatching, let it finish. */ - if (new_pset != PROCESSOR_SET_NULL) { - processor->state = PROCESSOR_ASSIGN; - old_pset = processor->processor_set_next; - processor->processor_set_next = new_pset; - } - else { - processor->state = PROCESSOR_SHUTDOWN; - old_pset = PROCESSOR_SET_NULL; + while (processor->state == PROCESSOR_DISPATCHING) { + pset_unlock(pset); + splx(s); + delay(1); + s = splsched(); + pset_lock(pset); } - simple_unlock(&pset->sched_lock); - - if (processor_action_active) { - simple_unlock(&processor_action_lock); + /* + * Success if already being shutdown. + */ + if (processor->state == PROCESSOR_SHUTDOWN) { + pset_unlock(pset); + splx(s); - return (old_pset); + return KERN_SUCCESS; } - processor_action_active = TRUE; - simple_unlock(&processor_action_lock); + pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN); + pset_unlock(pset); - processor_unlock(processor); + processor_doshutdown(processor); + splx(s); - thread_call_enter(processor_action_call); - processor_lock(processor); + cpu_exit_wait(processor->cpu_id); - return (old_pset); -} - -kern_return_t -processor_assign( - processor_t processor, - processor_set_t new_pset, - boolean_t wait) -{ -#ifdef lint - processor++; new_pset++; wait++; -#endif /* lint */ - return (KERN_FAILURE); + return KERN_SUCCESS; } /* - * processor_shutdown() queues a processor up for shutdown. - * Any assignment in progress is overriden. + * Called with interrupts disabled. */ -kern_return_t -processor_shutdown( - processor_t processor) +static void +processor_doshutdown( + processor_t processor) { - spl_t s; + thread_t self = current_thread(); - s = splsched(); - processor_lock(processor); - if ( processor->state == PROCESSOR_OFF_LINE || - processor->state == PROCESSOR_SHUTDOWN ) { - /* - * Already shutdown or being shutdown -- nothing to do. - */ - processor_unlock(processor); - splx(s); + /* + * Get onto the processor to shutdown + */ + processor_t prev = thread_bind(processor); + thread_block(THREAD_CONTINUE_NULL); + + /* interrupts still disabled */ + assert(ml_get_interrupts_enabled() == FALSE); + + assert(processor == current_processor()); + assert(processor->state == PROCESSOR_SHUTDOWN); - return (KERN_SUCCESS); +#if CONFIG_DTRACE + if (dtrace_cpu_state_changed_hook) { + (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE); } +#endif - processor_request_action(processor, PROCESSOR_SET_NULL); + ml_cpu_down(); - assert_wait((event_t)processor, THREAD_UNINT); +#if HIBERNATION + if (processor_avail_count < 2) { + hibernate_vm_lock(); + hibernate_vm_unlock(); + } +#endif - processor_unlock(processor); - splx(s); + processor_set_t pset = processor->processor_set; - thread_block(THREAD_CONTINUE_NULL); + pset_lock(pset); + pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE); + --pset->online_processor_count; + os_atomic_dec(&processor_avail_count, relaxed); + if (processor->is_recommended) { + os_atomic_dec(&processor_avail_count_user, relaxed); + } + commpage_update_active_cpus(); + SCHED(processor_queue_shutdown)(processor); + /* pset lock dropped */ + SCHED(rt_queue_shutdown)(processor); + + thread_bind(prev); - return (KERN_SUCCESS); + /* interrupts still disabled */ + + /* + * Continue processor shutdown on the processor's idle thread. + * The handoff won't fail because the idle thread has a reserved stack. + * Switching to the idle thread leaves interrupts disabled, + * so we can't accidentally take an interrupt after the context switch. + */ + thread_t shutdown_thread = processor->idle_thread; + shutdown_thread->continuation = processor_offline; + shutdown_thread->parameter = processor; + + thread_run(self, NULL, NULL, shutdown_thread); } /* - * processor_action() shuts down processors or changes their assignment. + * Called in the context of the idle thread to shut down the processor + * + * A shut-down processor looks like it's 'running' the idle thread parked + * in this routine, but it's actually been powered off and has no hardware state. */ static void -_processor_action( - thread_call_param_t p0, - thread_call_param_t p1) +processor_offline( + void * parameter, + __unused wait_result_t result) { - register processor_t processor; - spl_t s; + processor_t processor = (processor_t) parameter; + thread_t self = current_thread(); + __assert_only thread_t old_thread = THREAD_NULL; - s = splsched(); - simple_lock(&processor_action_lock); - - while (!queue_empty(&processor_action_queue)) { - processor = (processor_t)dequeue_head(&processor_action_queue); - simple_unlock(&processor_action_lock); - splx(s); + assert(processor == current_processor()); + assert(self->state & TH_IDLE); + assert(processor->idle_thread == self); + assert(ml_get_interrupts_enabled() == FALSE); + assert(self->continuation == NULL); + assert(processor->processor_offlined == false); - processor_doaction(processor); + bool enforce_quiesce_safety = gEnforceQuiesceSafety; - s = splsched(); - simple_lock(&processor_action_lock); + /* + * Scheduling is now disabled for this processor. + * Ensure that primitives that need scheduling (like mutexes) know this. + */ + if (enforce_quiesce_safety) { + disable_preemption(); } - processor_action_active = FALSE; - simple_unlock(&processor_action_lock); - splx(s); -} - -void -processor_action(void) -{ - queue_init(&processor_action_queue); - simple_lock_init(&processor_action_lock, ETAP_THREAD_ACTION); - processor_action_active = FALSE; - - thread_call_setup(&processor_action_call_data, _processor_action, NULL); - processor_action_call = &processor_action_call_data; -} - -/* - * processor_doaction actually does the shutdown. The trick here - * is to schedule ourselves onto a cpu and then save our - * context back into the runqs before taking out the cpu. - */ -void -processor_doaction( - processor_t processor) -{ - thread_t self = current_thread(); - processor_set_t pset; - thread_t old_thread; - spl_t s; + /* convince slave_main to come back here */ + processor->processor_offlined = true; /* - * Get onto the processor to shutdown + * Switch to the interrupt stack and shut down the processor. + * + * When the processor comes back, it will eventually call load_context which + * restores the context saved by machine_processor_shutdown, returning here. */ - thread_bind(self, processor); - thread_block(THREAD_CONTINUE_NULL); + old_thread = machine_processor_shutdown(self, processor_offline_intstack, processor); - pset = processor->processor_set; - simple_lock(&pset->processors_lock); + /* old_thread should be NULL because we got here through Load_context */ + assert(old_thread == THREAD_NULL); - if (pset->processor_count == 1) { - thread_t thread; - extern void start_cpu_thread(void); + assert(processor == current_processor()); + assert(processor->idle_thread == current_thread()); - simple_unlock(&pset->processors_lock); + assert(ml_get_interrupts_enabled() == FALSE); + assert(self->continuation == NULL); - /* - * Create the thread, and point it at the routine. - */ - thread = kernel_thread_with_priority( - kernel_task, MAXPRI_KERNEL, - start_cpu_thread, TRUE, FALSE); + /* Extract the machine_param value stashed by slave_main */ + void * machine_param = self->parameter; + self->parameter = NULL; - disable_preemption(); + /* Re-initialize the processor */ + slave_machine_init(machine_param); - s = splsched(); - thread_lock(thread); - machine_wake_thread = thread; - thread_go_locked(thread, THREAD_AWAKENED); - (void)rem_runq(thread); - thread_unlock(thread); - splx(s); + assert(processor->processor_offlined == true); + processor->processor_offlined = false; - simple_lock(&pset->processors_lock); + if (enforce_quiesce_safety) { enable_preemption(); } - s = splsched(); - processor_lock(processor); - - /* - * Do shutdown, make sure we live when processor dies. - */ - if (processor->state != PROCESSOR_SHUTDOWN) { - panic("action_thread -- bad processor state"); - } - - pset_remove_processor(pset, processor); - processor_unlock(processor); - simple_unlock(&pset->processors_lock); - /* - * Clean up. + * Now that the processor is back, invoke the idle thread to find out what to do next. + * idle_thread will enable interrupts. */ - thread_bind(self, PROCESSOR_NULL); - self->continuation = 0; - old_thread = switch_to_shutdown_context(self, - processor_doshutdown, processor); - if (processor != current_processor()) - timer_call_shutdown(processor); - thread_dispatch(old_thread); - thread_wakeup((event_t)processor); - splx(s); + thread_block(idle_thread); + /*NOTREACHED*/ } /* - * Actually do the processor shutdown. This is called at splsched, - * running on the processor's shutdown stack. + * Complete the shutdown and place the processor offline. + * + * Called at splsched in the shutdown context + * (i.e. on the idle thread, on the interrupt stack) + * + * The onlining half of this is done in load_context(). */ - -void -processor_doshutdown( - processor_t processor) +static void +processor_offline_intstack( + processor_t processor) { - register int cpu = processor->slot_num; + assert(processor == current_processor()); + assert(processor->active_thread == current_thread()); - timer_call_cancel(&processor->quantum_timer); - thread_dispatch(current_thread()); - timer_switch(&kernel_timer[cpu]); + timer_stop(PROCESSOR_DATA(processor, current_state), processor->last_dispatch); + + cpu_quiescent_counter_leave(processor->last_dispatch); + + PMAP_DEACTIVATE_KERNEL(processor->cpu_id); - /* - * OK, now exit this cpu. - */ - PMAP_DEACTIVATE_KERNEL(cpu); - thread_machine_set_current(processor->idle_thread); - cpu_down(cpu); cpu_sleep(); panic("zombie processor"); /*NOTREACHED*/ @@ -476,16 +438,13 @@ processor_doshutdown( kern_return_t host_get_boot_info( - host_priv_t host_priv, - kernel_boot_info_t boot_info) + host_priv_t host_priv, + kernel_boot_info_t boot_info) { - char *src = ""; - extern char *machine_boot_info( - kernel_boot_info_t boot_info, - vm_size_t buf_len); - - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_HOST); + const char *src = ""; + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_HOST; + } assert(host_priv == &realhost); @@ -494,8 +453,250 @@ host_get_boot_info( * standardized strings generated from boot string. */ src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX); - if (src != boot_info) + if (src != boot_info) { (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX); + } + + return KERN_SUCCESS; +} + +#if CONFIG_DTRACE +#include +#endif + +unsigned long long +ml_io_read(uintptr_t vaddr, int size) +{ + unsigned long long result = 0; + unsigned char s1; + unsigned short s2; + +#if defined(__x86_64__) + uint64_t sabs, eabs; + boolean_t istate, timeread = FALSE; +#if DEVELOPMENT || DEBUG + extern uint64_t simulate_stretched_io; + uintptr_t paddr = pmap_verify_noncacheable(vaddr); +#endif /* x86_64 DEVELOPMENT || DEBUG */ + if (__improbable(reportphyreaddelayabs != 0)) { + istate = ml_set_interrupts_enabled(FALSE); + sabs = mach_absolute_time(); + timeread = TRUE; + } + +#if DEVELOPMENT || DEBUG + if (__improbable(timeread && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ + +#endif /* x86_64 */ + + switch (size) { + case 1: + s1 = *(volatile unsigned char *)vaddr; + result = s1; + break; + case 2: + s2 = *(volatile unsigned short *)vaddr; + result = s2; + break; + case 4: + result = *(volatile unsigned int *)vaddr; + break; + case 8: + result = *(volatile unsigned long long *)vaddr; + break; + default: + panic("Invalid size %d for ml_io_read(%p)", size, (void *)vaddr); + break; + } + +#if defined(__x86_64__) + if (__improbable(timeread == TRUE)) { + eabs = mach_absolute_time(); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_IO_READ, vaddr, paddr, size, result, sabs, eabs - sabs); +#endif + + if (__improbable((eabs - sabs) > reportphyreaddelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + (void)ml_set_interrupts_enabled(istate); + + if (phyreadpanic && (machine_timeout_suspended() == FALSE)) { + panic_io_port_read(); + panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, " + "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", + vaddr, paddr, (eabs - sabs), result, sabs, eabs, + reportphyreaddelayabs); + } + + if (reportphyreadosbt) { + OSReportWithBacktrace("ml_io_read(v=%p, p=%p) size %d result 0x%llx " + "took %lluus", + (void *)vaddr, (void *)paddr, size, result, + (eabs - sabs) / NSEC_PER_USEC); + } +#if CONFIG_DTRACE + DTRACE_PHYSLAT5(physioread, uint64_t, (eabs - sabs), + uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, result); +#endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_READ), + (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, result); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); + } + } +#endif /* x86_64 */ + return result; +} + +unsigned int +ml_io_read8(uintptr_t vaddr) +{ + return (unsigned) ml_io_read(vaddr, 1); +} + +unsigned int +ml_io_read16(uintptr_t vaddr) +{ + return (unsigned) ml_io_read(vaddr, 2); +} + +unsigned int +ml_io_read32(uintptr_t vaddr) +{ + return (unsigned) ml_io_read(vaddr, 4); +} + +unsigned long long +ml_io_read64(uintptr_t vaddr) +{ + return ml_io_read(vaddr, 8); +} + +/* ml_io_write* */ + +void +ml_io_write(uintptr_t vaddr, uint64_t val, int size) +{ +#if defined(__x86_64__) + uint64_t sabs, eabs; + boolean_t istate, timewrite = FALSE; +#if DEVELOPMENT || DEBUG + extern uint64_t simulate_stretched_io; + uintptr_t paddr = pmap_verify_noncacheable(vaddr); +#endif /* x86_64 DEVELOPMENT || DEBUG */ + if (__improbable(reportphywritedelayabs != 0)) { + istate = ml_set_interrupts_enabled(FALSE); + sabs = mach_absolute_time(); + timewrite = TRUE; + } + +#if DEVELOPMENT || DEBUG + if (__improbable(timewrite && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ +#endif /* x86_64 */ + + switch (size) { + case 1: + *(volatile uint8_t *)vaddr = (uint8_t)val; + break; + case 2: + *(volatile uint16_t *)vaddr = (uint16_t)val; + break; + case 4: + *(volatile uint32_t *)vaddr = (uint32_t)val; + break; + case 8: + *(volatile uint64_t *)vaddr = (uint64_t)val; + break; + default: + panic("Invalid size %d for ml_io_write(%p, 0x%llx)", size, (void *)vaddr, val); + break; + } + +#if defined(__x86_64__) + if (__improbable(timewrite == TRUE)) { + eabs = mach_absolute_time(); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_IO_WRITE, vaddr, paddr, size, val, sabs, eabs - sabs); +#endif + + if (__improbable((eabs - sabs) > reportphywritedelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + (void)ml_set_interrupts_enabled(istate); + + if (phywritepanic && (machine_timeout_suspended() == FALSE)) { + panic_io_port_read(); + panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns," + " (start: %llu, end: %llu), ceiling: %llu", + (void *)vaddr, (void *)paddr, val, (eabs - sabs), sabs, eabs, + reportphywritedelayabs); + } + + if (reportphywriteosbt) { + OSReportWithBacktrace("ml_io_write size %d (v=%p, p=%p, 0x%llx) " + "took %lluus", + size, (void *)vaddr, (void *)paddr, val, (eabs - sabs) / NSEC_PER_USEC); + } +#if CONFIG_DTRACE + DTRACE_PHYSLAT5(physiowrite, uint64_t, (eabs - sabs), + uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, val); +#endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_WRITE), + (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, val); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); + } + } +#endif /* x86_64 */ +} + +void +ml_io_write8(uintptr_t vaddr, uint8_t val) +{ + ml_io_write(vaddr, val, 1); +} + +void +ml_io_write16(uintptr_t vaddr, uint16_t val) +{ + ml_io_write(vaddr, val, 2); +} - return (KERN_SUCCESS); +void +ml_io_write32(uintptr_t vaddr, uint32_t val) +{ + ml_io_write(vaddr, val, 4); +} + +void +ml_io_write64(uintptr_t vaddr, uint64_t val) +{ + ml_io_write(vaddr, val, 8); }