X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/e5568f75972dfc723778653c11cb6b4dc825716a..cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e:/osfmk/kern/machine.c diff --git a/osfmk/kern/machine.c b/osfmk/kern/machine.c index 5c643f6cb..c90a8ea6d 100644 --- a/osfmk/kern/machine.c +++ b/osfmk/kern/machine.c @@ -1,49 +1,55 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -57,288 +63,374 @@ * Support for machine independent machine abstraction. */ -#include - #include + +#include #include #include -#include #include #include #include +#include +#include + +#include #include #include +#include #include #include -#include #include +#include #include #include #include +#include #include #include -#include -#include -#include +#include + +#if HIBERNATION +#include +#endif +#include + +#if CONFIG_DTRACE +extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t); +#endif + +#if defined(__x86_64__) +#include +#include +#endif /* * Exported variables: */ -struct machine_info machine_info; -struct machine_slot machine_slot[NCPUS]; - -thread_t machine_wake_thread; +struct machine_info machine_info; /* Forwards */ -void processor_doshutdown( - processor_t processor); +static void +processor_doshutdown(processor_t processor); -/* - * cpu_up: - * - * Flag specified cpu as up and running. Called when a processor comes - * online. - */ -void -cpu_up( - int cpu) -{ - processor_t processor = cpu_to_processor(cpu); - processor_set_t pset = &default_pset; - struct machine_slot *ms; - spl_t s; +static void +processor_offline(void * parameter, __unused wait_result_t result); - s = splsched(); - processor_lock(processor); - init_ast_check(processor); - ms = &machine_slot[cpu]; - ms->running = TRUE; - machine_info.avail_cpus++; - simple_lock(&pset->sched_lock); - pset_add_processor(pset, processor); - enqueue_tail(&pset->active_queue, (queue_entry_t)processor); - processor->deadline = UINT64_MAX; - processor->state = PROCESSOR_RUNNING; - simple_unlock(&pset->sched_lock); - processor_unlock(processor); - splx(s); -} +static void +processor_offline_intstack(processor_t processor) __dead2; /* - * cpu_down: + * processor_up: * - * Flag specified cpu as down. Called when a processor is about to - * go offline. + * Flag processor as up and running, and available + * for scheduling. */ void -cpu_down( - int cpu) +processor_up( + processor_t processor) { - processor_t processor; - struct machine_slot *ms; - spl_t s; - - processor = cpu_to_processor(cpu); + processor_set_t pset; + spl_t s; + boolean_t pset_online = false; s = splsched(); - processor_lock(processor); - ms = &machine_slot[cpu]; - ms->running = FALSE; - machine_info.avail_cpus--; - /* - * processor has already been removed from pset. - */ - processor->state = PROCESSOR_OFF_LINE; - processor_unlock(processor); + init_ast_check(processor); + pset = processor->processor_set; + pset_lock(pset); + if (pset->online_processor_count == 0) { + /* About to bring the first processor of a pset online */ + pset_online = true; + } + ++pset->online_processor_count; + pset_update_processor_state(pset, processor, PROCESSOR_RUNNING); + os_atomic_inc(&processor_avail_count, relaxed); + if (processor->is_recommended) { + os_atomic_inc(&processor_avail_count_user, relaxed); + } + commpage_update_active_cpus(); + if (pset_online) { + /* New pset is coming up online; callout to the + * scheduler in case it wants to adjust runqs. + */ + SCHED(pset_made_schedulable)(processor, pset, true); + /* pset lock dropped */ + } else { + pset_unlock(pset); + } + ml_cpu_up(); splx(s); + +#if CONFIG_DTRACE + if (dtrace_cpu_state_changed_hook) { + (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE); + } +#endif } +#include kern_return_t host_reboot( - host_priv_t host_priv, - int options) + host_priv_t host_priv, + int options) { - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_HOST); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_HOST; + } assert(host_priv == &realhost); +#if DEVELOPMENT || DEBUG if (options & HOST_REBOOT_DEBUGGER) { Debugger("Debugger"); - return (KERN_SUCCESS); + return KERN_SUCCESS; } +#endif - halt_all_cpus(!(options & HOST_REBOOT_HALT)); + if (options & HOST_REBOOT_UPSDELAY) { + // UPS power cutoff path + PEHaltRestart( kPEUPSDelayHaltCPU ); + } else { + halt_all_cpus(!(options & HOST_REBOOT_HALT)); + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t processor_assign( - processor_t processor, - processor_set_t new_pset, - boolean_t wait) + __unused processor_t processor, + __unused processor_set_t new_pset, + __unused boolean_t wait) { -#ifdef lint - processor++; new_pset++; wait++; -#endif /* lint */ - return (KERN_FAILURE); + return KERN_FAILURE; } kern_return_t processor_shutdown( - processor_t processor) + processor_t processor) { - processor_set_t pset; - spl_t s; + processor_set_t pset; + spl_t s; s = splsched(); - processor_lock(processor); - if ( processor->state == PROCESSOR_OFF_LINE || - processor->state == PROCESSOR_SHUTDOWN ) { + pset = processor->processor_set; + pset_lock(pset); + if (processor->state == PROCESSOR_OFF_LINE) { /* - * Success if already shutdown or being shutdown. + * Success if already shutdown. */ - processor_unlock(processor); + pset_unlock(pset); splx(s); - return (KERN_SUCCESS); + return KERN_SUCCESS; } if (processor->state == PROCESSOR_START) { /* * Failure if currently being started. */ - processor_unlock(processor); + pset_unlock(pset); splx(s); - return (KERN_FAILURE); + return KERN_FAILURE; } /* - * Processor must be in a processor set. Must lock the scheduling - * lock to get at the processor state. + * If the processor is dispatching, let it finish. */ - pset = processor->processor_set; - simple_lock(&pset->sched_lock); + while (processor->state == PROCESSOR_DISPATCHING) { + pset_unlock(pset); + splx(s); + delay(1); + s = splsched(); + pset_lock(pset); + } /* - * If the processor is dispatching, let it finish - it will set its - * state to running very soon. + * Success if already being shutdown. */ - while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) { - simple_unlock(&pset->sched_lock); - delay(1); - simple_lock(&pset->sched_lock); - } + if (processor->state == PROCESSOR_SHUTDOWN) { + pset_unlock(pset); + splx(s); - if (processor->state == PROCESSOR_IDLE) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; + return KERN_SUCCESS; } - else - if (processor->state == PROCESSOR_RUNNING) - remqueue(&pset->active_queue, (queue_entry_t)processor); - else - panic("processor_request_action"); - - processor->state = PROCESSOR_SHUTDOWN; - simple_unlock(&pset->sched_lock); - - processor_unlock(processor); + pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN); + pset_unlock(pset); processor_doshutdown(processor); splx(s); - return (KERN_SUCCESS); + cpu_exit_wait(processor->cpu_id); + + return KERN_SUCCESS; } /* - * Called at splsched. + * Called with interrupts disabled. */ -void +static void processor_doshutdown( - processor_t processor) + processor_t processor) { - thread_t old_thread, self = current_thread(); - processor_set_t pset; - processor_t prev; + thread_t self = current_thread(); /* * Get onto the processor to shutdown */ - prev = thread_bind(self, processor); + processor_t prev = thread_bind(processor); thread_block(THREAD_CONTINUE_NULL); - processor_lock(processor); - pset = processor->processor_set; - simple_lock(&pset->sched_lock); + /* interrupts still disabled */ + assert(ml_get_interrupts_enabled() == FALSE); - if (pset->processor_count == 1) { - thread_t thread; - extern void start_cpu_thread(void); + assert(processor == current_processor()); + assert(processor->state == PROCESSOR_SHUTDOWN); - simple_unlock(&pset->sched_lock); - processor_unlock(processor); +#if CONFIG_DTRACE + if (dtrace_cpu_state_changed_hook) { + (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE); + } +#endif - /* - * Create the thread, and point it at the routine. - */ - thread = kernel_thread_create(start_cpu_thread, MAXPRI_KERNEL); + ml_cpu_down(); - thread_lock(thread); - machine_wake_thread = thread; - thread->state = TH_RUN; - pset_run_incr(thread->processor_set); - thread_unlock(thread); +#if HIBERNATION + if (processor_avail_count < 2) { + hibernate_vm_lock(); + hibernate_vm_unlock(); + } +#endif - processor_lock(processor); - simple_lock(&pset->sched_lock); + processor_set_t pset = processor->processor_set; + + pset_lock(pset); + pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE); + --pset->online_processor_count; + os_atomic_dec(&processor_avail_count, relaxed); + if (processor->is_recommended) { + os_atomic_dec(&processor_avail_count_user, relaxed); } + commpage_update_active_cpus(); + SCHED(processor_queue_shutdown)(processor); + /* pset lock dropped */ + SCHED(rt_queue_shutdown)(processor); - assert(processor->state == PROCESSOR_SHUTDOWN); + thread_bind(prev); - pset_remove_processor(pset, processor); - simple_unlock(&pset->sched_lock); - processor_unlock(processor); + /* interrupts still disabled */ /* - * Clean up. + * Continue processor shutdown on the processor's idle thread. + * The handoff won't fail because the idle thread has a reserved stack. + * Switching to the idle thread leaves interrupts disabled, + * so we can't accidentally take an interrupt after the context switch. */ - thread_bind(self, prev); - old_thread = switch_to_shutdown_context(self, - processor_offline, processor); - if (processor != current_processor()) - timer_call_shutdown(processor); - thread_dispatch(old_thread); + thread_t shutdown_thread = processor->idle_thread; + shutdown_thread->continuation = processor_offline; + shutdown_thread->parameter = processor; + + thread_run(self, NULL, NULL, shutdown_thread); } /* - * Actually do the processor shutdown. This is called at splsched, - * running on the processor's shutdown stack. + * Called in the context of the idle thread to shut down the processor + * + * A shut-down processor looks like it's 'running' the idle thread parked + * in this routine, but it's actually been powered off and has no hardware state. */ - -void +static void processor_offline( - processor_t processor) + void * parameter, + __unused wait_result_t result) { - register thread_t old_thread = processor->active_thread; - register int cpu = processor->slot_num; + processor_t processor = (processor_t) parameter; + thread_t self = current_thread(); + __assert_only thread_t old_thread = THREAD_NULL; + + assert(processor == current_processor()); + assert(self->state & TH_IDLE); + assert(processor->idle_thread == self); + assert(ml_get_interrupts_enabled() == FALSE); + assert(self->continuation == NULL); + assert(processor->processor_offlined == false); + + bool enforce_quiesce_safety = gEnforceQuiesceSafety; + + /* + * Scheduling is now disabled for this processor. + * Ensure that primitives that need scheduling (like mutexes) know this. + */ + if (enforce_quiesce_safety) { + disable_preemption(); + } + + /* convince slave_main to come back here */ + processor->processor_offlined = true; - timer_call_cancel(&processor->quantum_timer); - timer_switch(&kernel_timer[cpu]); - processor->active_thread = processor->idle_thread; - machine_thread_set_current(processor->active_thread); - thread_dispatch(old_thread); + /* + * Switch to the interrupt stack and shut down the processor. + * + * When the processor comes back, it will eventually call load_context which + * restores the context saved by machine_processor_shutdown, returning here. + */ + old_thread = machine_processor_shutdown(self, processor_offline_intstack, processor); + + /* old_thread should be NULL because we got here through Load_context */ + assert(old_thread == THREAD_NULL); + + assert(processor == current_processor()); + assert(processor->idle_thread == current_thread()); + + assert(ml_get_interrupts_enabled() == FALSE); + assert(self->continuation == NULL); + + /* Extract the machine_param value stashed by slave_main */ + void * machine_param = self->parameter; + self->parameter = NULL; + + /* Re-initialize the processor */ + slave_machine_init(machine_param); + + assert(processor->processor_offlined == true); + processor->processor_offlined = false; + + if (enforce_quiesce_safety) { + enable_preemption(); + } /* - * OK, now exit this cpu. + * Now that the processor is back, invoke the idle thread to find out what to do next. + * idle_thread will enable interrupts. */ - PMAP_DEACTIVATE_KERNEL(cpu); - cpu_down(cpu); + thread_block(idle_thread); + /*NOTREACHED*/ +} + +/* + * Complete the shutdown and place the processor offline. + * + * Called at splsched in the shutdown context + * (i.e. on the idle thread, on the interrupt stack) + * + * The onlining half of this is done in load_context(). + */ +static void +processor_offline_intstack( + processor_t processor) +{ + assert(processor == current_processor()); + assert(processor->active_thread == current_thread()); + + timer_stop(PROCESSOR_DATA(processor, current_state), processor->last_dispatch); + + cpu_quiescent_counter_leave(processor->last_dispatch); + + PMAP_DEACTIVATE_KERNEL(processor->cpu_id); + cpu_sleep(); panic("zombie processor"); /*NOTREACHED*/ @@ -346,16 +438,13 @@ processor_offline( kern_return_t host_get_boot_info( - host_priv_t host_priv, - kernel_boot_info_t boot_info) + host_priv_t host_priv, + kernel_boot_info_t boot_info) { - char *src = ""; - extern char *machine_boot_info( - kernel_boot_info_t boot_info, - vm_size_t buf_len); - - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_HOST); + const char *src = ""; + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_HOST; + } assert(host_priv == &realhost); @@ -364,8 +453,250 @@ host_get_boot_info( * standardized strings generated from boot string. */ src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX); - if (src != boot_info) + if (src != boot_info) { (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX); + } + + return KERN_SUCCESS; +} + +#if CONFIG_DTRACE +#include +#endif + +unsigned long long +ml_io_read(uintptr_t vaddr, int size) +{ + unsigned long long result = 0; + unsigned char s1; + unsigned short s2; + +#if defined(__x86_64__) + uint64_t sabs, eabs; + boolean_t istate, timeread = FALSE; +#if DEVELOPMENT || DEBUG + extern uint64_t simulate_stretched_io; + uintptr_t paddr = pmap_verify_noncacheable(vaddr); +#endif /* x86_64 DEVELOPMENT || DEBUG */ + if (__improbable(reportphyreaddelayabs != 0)) { + istate = ml_set_interrupts_enabled(FALSE); + sabs = mach_absolute_time(); + timeread = TRUE; + } + +#if DEVELOPMENT || DEBUG + if (__improbable(timeread && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ + +#endif /* x86_64 */ + + switch (size) { + case 1: + s1 = *(volatile unsigned char *)vaddr; + result = s1; + break; + case 2: + s2 = *(volatile unsigned short *)vaddr; + result = s2; + break; + case 4: + result = *(volatile unsigned int *)vaddr; + break; + case 8: + result = *(volatile unsigned long long *)vaddr; + break; + default: + panic("Invalid size %d for ml_io_read(%p)", size, (void *)vaddr); + break; + } + +#if defined(__x86_64__) + if (__improbable(timeread == TRUE)) { + eabs = mach_absolute_time(); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_IO_READ, vaddr, paddr, size, result, sabs, eabs - sabs); +#endif + + if (__improbable((eabs - sabs) > reportphyreaddelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + (void)ml_set_interrupts_enabled(istate); + + if (phyreadpanic && (machine_timeout_suspended() == FALSE)) { + panic_io_port_read(); + panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, " + "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", + vaddr, paddr, (eabs - sabs), result, sabs, eabs, + reportphyreaddelayabs); + } + + if (reportphyreadosbt) { + OSReportWithBacktrace("ml_io_read(v=%p, p=%p) size %d result 0x%llx " + "took %lluus", + (void *)vaddr, (void *)paddr, size, result, + (eabs - sabs) / NSEC_PER_USEC); + } +#if CONFIG_DTRACE + DTRACE_PHYSLAT5(physioread, uint64_t, (eabs - sabs), + uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, result); +#endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_READ), + (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, result); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); + } + } +#endif /* x86_64 */ + return result; +} - return (KERN_SUCCESS); +unsigned int +ml_io_read8(uintptr_t vaddr) +{ + return (unsigned) ml_io_read(vaddr, 1); +} + +unsigned int +ml_io_read16(uintptr_t vaddr) +{ + return (unsigned) ml_io_read(vaddr, 2); +} + +unsigned int +ml_io_read32(uintptr_t vaddr) +{ + return (unsigned) ml_io_read(vaddr, 4); +} + +unsigned long long +ml_io_read64(uintptr_t vaddr) +{ + return ml_io_read(vaddr, 8); +} + +/* ml_io_write* */ + +void +ml_io_write(uintptr_t vaddr, uint64_t val, int size) +{ +#if defined(__x86_64__) + uint64_t sabs, eabs; + boolean_t istate, timewrite = FALSE; +#if DEVELOPMENT || DEBUG + extern uint64_t simulate_stretched_io; + uintptr_t paddr = pmap_verify_noncacheable(vaddr); +#endif /* x86_64 DEVELOPMENT || DEBUG */ + if (__improbable(reportphywritedelayabs != 0)) { + istate = ml_set_interrupts_enabled(FALSE); + sabs = mach_absolute_time(); + timewrite = TRUE; + } + +#if DEVELOPMENT || DEBUG + if (__improbable(timewrite && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ +#endif /* x86_64 */ + + switch (size) { + case 1: + *(volatile uint8_t *)vaddr = (uint8_t)val; + break; + case 2: + *(volatile uint16_t *)vaddr = (uint16_t)val; + break; + case 4: + *(volatile uint32_t *)vaddr = (uint32_t)val; + break; + case 8: + *(volatile uint64_t *)vaddr = (uint64_t)val; + break; + default: + panic("Invalid size %d for ml_io_write(%p, 0x%llx)", size, (void *)vaddr, val); + break; + } + +#if defined(__x86_64__) + if (__improbable(timewrite == TRUE)) { + eabs = mach_absolute_time(); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_IO_WRITE, vaddr, paddr, size, val, sabs, eabs - sabs); +#endif + + if (__improbable((eabs - sabs) > reportphywritedelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + (void)ml_set_interrupts_enabled(istate); + + if (phywritepanic && (machine_timeout_suspended() == FALSE)) { + panic_io_port_read(); + panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns," + " (start: %llu, end: %llu), ceiling: %llu", + (void *)vaddr, (void *)paddr, val, (eabs - sabs), sabs, eabs, + reportphywritedelayabs); + } + + if (reportphywriteosbt) { + OSReportWithBacktrace("ml_io_write size %d (v=%p, p=%p, 0x%llx) " + "took %lluus", + size, (void *)vaddr, (void *)paddr, val, (eabs - sabs) / NSEC_PER_USEC); + } +#if CONFIG_DTRACE + DTRACE_PHYSLAT5(physiowrite, uint64_t, (eabs - sabs), + uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, val); +#endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_WRITE), + (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, val); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); + } + } +#endif /* x86_64 */ +} + +void +ml_io_write8(uintptr_t vaddr, uint8_t val) +{ + ml_io_write(vaddr, val, 1); +} + +void +ml_io_write16(uintptr_t vaddr, uint16_t val) +{ + ml_io_write(vaddr, val, 2); +} + +void +ml_io_write32(uintptr_t vaddr, uint32_t val) +{ + ml_io_write(vaddr, val, 4); +} + +void +ml_io_write64(uintptr_t vaddr, uint64_t val) +{ + ml_io_write(vaddr, val, 8); }