/*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
#include <vm/vm_fault.h>
#include <libkern/OSAtomic.h>
+#include <libkern/OSDebug.h>
#include <sys/kdebug.h>
+#if !MACH_KDP
+#include <kdp/kdp_callout.h>
+#endif /* !MACH_KDP */
+
+#include <architecture/i386/pio.h>
+
+#include <libkern/OSDebug.h>
+#if CONFIG_DTRACE
+#include <mach/sdt.h>
+#endif
+
#if 0
#undef KERNEL_DEBUG
#endif
+/* prevent infinite recursion when memmove calls bcopy; in string.h, bcopy is defined to call memmove */
+#undef bcopy
+
/* XXX - should be gone from here */
-extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
-extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
-extern boolean_t phys_page_exists(ppnum_t);
-extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes);
-extern void pmap_set_reference(ppnum_t pn);
-extern void mapping_set_mod(ppnum_t pa);
-extern void mapping_set_ref(ppnum_t pn);
+extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
+extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
+extern boolean_t phys_page_exists(ppnum_t);
+extern void bcopy_no_overwrite(const char *from, char *to, vm_size_t bytes);
+extern void pmap_set_reference(ppnum_t pn);
+extern void mapping_set_mod(ppnum_t pa);
+extern void mapping_set_ref(ppnum_t pn);
-extern void ovbcopy(const char *from,
- char *to,
- vm_size_t nbytes);
-void machine_callstack(natural_t *buf, vm_size_t callstack_max);
+extern void ovbcopy(const char *from,
+ char *to,
+ vm_size_t nbytes);
+void machine_callstack(uintptr_t *buf, vm_size_t callstack_max);
#define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
#define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
-#define INT_SIZE (BYTE_SIZE * sizeof (int))
+#define INT_SIZE (BYTE_SIZE * sizeof (int))
/*
* Set indicated bit in bit string.
{
int offset;
- for (offset = 0; !*s; offset += (int)INT_SIZE, ++s);
+ for (offset = 0; !*s; offset += (int)INT_SIZE, ++s) {
+ ;
+ }
return offset + __builtin_ctz(*s);
}
int
ffs(unsigned int mask)
{
- if (mask == 0)
+ if (mask == 0) {
return 0;
+ }
/*
* NOTE: cannot use __builtin_ffs because it generates a call to
return 1 + __builtin_ctz(mask);
}
+int
+ffsll(unsigned long long mask)
+{
+ if (mask == 0) {
+ return 0;
+ }
+
+ /*
+ * NOTE: cannot use __builtin_ffsll because it generates a call to
+ * 'ffsll'
+ */
+ return 1 + __builtin_ctzll(mask);
+}
+
+/*
+ * Find last bit set in bit string.
+ */
+int
+fls(unsigned int mask)
+{
+ if (mask == 0) {
+ return 0;
+ }
+
+ return (sizeof(mask) << 3) - __builtin_clz(mask);
+}
+
+int
+flsll(unsigned long long mask)
+{
+ if (mask == 0) {
+ return 0;
+ }
+
+ return (sizeof(mask) << 3) - __builtin_clzll(mask);
+}
+
void
bzero_phys_nc(
- addr64_t src64,
- uint32_t bytes)
+ addr64_t src64,
+ uint32_t bytes)
{
- bzero_phys(src64,bytes);
+ bzero_phys(src64, bytes);
}
void
bzero_phys(
- addr64_t src64,
- uint32_t bytes)
+ addr64_t src64,
+ uint32_t bytes)
{
bzero(PHYSMAP_PTOV(src64), bytes);
}
void
bcopy_phys(
- addr64_t src64,
- addr64_t dst64,
- vm_size_t bytes)
+ addr64_t src64,
+ addr64_t dst64,
+ vm_size_t bytes)
{
/* Not necessary for K64 - but ensure we stay within a page */
- if (((((uint32_t)src64 & (NBPG-1)) + bytes) > NBPG) ||
- ((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) {
- panic("bcopy_phys alignment");
+ if (((((uint32_t)src64 & (NBPG - 1)) + bytes) > NBPG) ||
+ ((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
+ panic("bcopy_phys alignment");
}
bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes);
}
-/*
- * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
+/*
+ * allow a function to get a quick virtual mapping of a physical page
+ */
+
+int
+apply_func_phys(
+ addr64_t dst64,
+ vm_size_t bytes,
+ int (*func)(void * buffer, vm_size_t bytes, void * arg),
+ void * arg)
+{
+ /* Not necessary for K64 - but ensure we stay within a page */
+ if (((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
+ panic("apply_func_phys alignment");
+ }
+
+ return func(PHYSMAP_PTOV(dst64), bytes, arg);
+}
+
+/*
+ * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
* them correctly.
*/
void
ovbcopy(
- const char *from,
- char *to,
- vm_size_t bytes) /* num bytes to copy */
+ const char *from,
+ char *to,
+ vm_size_t bytes) /* num bytes to copy */
{
/* Assume that bcopy copies left-to-right (low addr first). */
- if (from + bytes <= to || to + bytes <= from || to == from)
- bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
- else if (from > to)
- bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
- else {
+ if (from + bytes <= to || to + bytes <= from || to == from) {
+ bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
+ } else if (from > to) {
+ bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
+ } else {
/* to > from: overlapping, and must copy right-to-left. */
from += bytes - 1;
to += bytes - 1;
- while (bytes-- > 0)
+ while (bytes-- > 0) {
*to-- = *from--;
+ }
}
}
* Read data from a physical address. Memory should not be cache inhibited.
*/
+uint64_t reportphyreaddelayabs;
+uint64_t reportphywritedelayabs;
+uint32_t reportphyreadosbt;
+uint32_t reportphywriteosbt;
+
+#if DEVELOPMENT || DEBUG
+uint32_t phyreadpanic = 1;
+uint32_t phywritepanic = 1;
+uint64_t tracephyreaddelayabs = 50 * NSEC_PER_USEC;
+uint64_t tracephywritedelayabs = 50 * NSEC_PER_USEC;
+uint64_t simulate_stretched_io = 0;
+#else
+uint32_t phyreadpanic = 0;
+uint32_t phywritepanic = 0;
+uint64_t tracephyreaddelayabs = 0;
+uint64_t tracephywritedelayabs = 0;
+#endif
-static unsigned int
-ml_phys_read_data(pmap_paddr_t paddr, int size)
+__private_extern__ uint64_t
+ml_phys_read_data(uint64_t paddr, int size)
{
- unsigned int result;
+ uint64_t result = 0;
+ unsigned char s1;
+ unsigned short s2;
+ boolean_t istate = TRUE, timeread = FALSE;
+ uint64_t sabs = 0, eabs;
+
+ if (__improbable(!physmap_enclosed(paddr))) {
+ panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
+ }
- switch (size) {
- unsigned char s1;
- unsigned short s2;
- case 1:
- s1 = *(unsigned char *)PHYSMAP_PTOV(paddr);
- result = s1;
- break;
- case 2:
- s2 = *(unsigned short *)PHYSMAP_PTOV(paddr);
- result = s2;
- break;
- case 4:
- default:
- result = *(unsigned int *)PHYSMAP_PTOV(paddr);
- break;
- }
+ if (__improbable(reportphyreaddelayabs != 0)) {
+ istate = ml_set_interrupts_enabled(FALSE);
+ sabs = mach_absolute_time();
+ timeread = TRUE;
+ }
+#if DEVELOPMENT || DEBUG
+ if (__improbable(timeread && simulate_stretched_io)) {
+ sabs -= simulate_stretched_io;
+ }
+#endif /* x86_64 DEVELOPMENT || DEBUG */
- return result;
+ switch (size) {
+ case 1:
+ s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr);
+ result = s1;
+ break;
+ case 2:
+ s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr);
+ result = s2;
+ break;
+ case 4:
+ result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr);
+ break;
+ case 8:
+ result = *(volatile unsigned long long *)PHYSMAP_PTOV(paddr);
+ break;
+ default:
+ panic("Invalid size %d for ml_phys_read_data", size);
+ break;
+ }
+
+ if (__improbable(timeread == TRUE)) {
+ eabs = mach_absolute_time();
+
+#if DEVELOPMENT || DEBUG
+ iotrace(IOTRACE_PHYS_READ, 0, paddr, size, result, sabs, eabs - sabs);
+#endif
+
+ if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
+ (void)ml_set_interrupts_enabled(istate);
+
+ if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
+ panic_io_port_read();
+ panic("Read from physical addr 0x%llx took %llu ns, "
+ "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
+ paddr, (eabs - sabs), result, sabs, eabs,
+ reportphyreaddelayabs);
+ }
+
+ if (reportphyreadosbt) {
+ OSReportWithBacktrace("ml_phys_read_data took %lluus",
+ (eabs - sabs) / NSEC_PER_USEC);
+ }
+#if CONFIG_DTRACE
+ DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
+ uint64_t, paddr, uint32_t, size, uint64_t, result);
+#endif /* CONFIG_DTRACE */
+ } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
+ KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
+ (eabs - sabs), sabs, paddr, result);
+
+ (void)ml_set_interrupts_enabled(istate);
+ } else {
+ (void)ml_set_interrupts_enabled(istate);
+ }
+ }
+
+ return result;
}
static unsigned long long
-ml_phys_read_long_long(pmap_paddr_t paddr )
+ml_phys_read_long_long(uint64_t paddr)
{
- return *(unsigned long long *)PHYSMAP_PTOV(paddr);
+ return ml_phys_read_data(paddr, 8);
}
-
-
-unsigned int ml_phys_read( vm_offset_t paddr)
+unsigned int
+ml_phys_read(vm_offset_t paddr)
{
- return ml_phys_read_data((pmap_paddr_t)paddr, 4);
+ return (unsigned int) ml_phys_read_data(paddr, 4);
}
-unsigned int ml_phys_read_word(vm_offset_t paddr) {
-
- return ml_phys_read_data((pmap_paddr_t)paddr, 4);
+unsigned int
+ml_phys_read_word(vm_offset_t paddr)
+{
+ return (unsigned int) ml_phys_read_data(paddr, 4);
}
-unsigned int ml_phys_read_64(addr64_t paddr64)
+unsigned int
+ml_phys_read_64(addr64_t paddr64)
{
- return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
+ return (unsigned int) ml_phys_read_data(paddr64, 4);
}
-unsigned int ml_phys_read_word_64(addr64_t paddr64)
+unsigned int
+ml_phys_read_word_64(addr64_t paddr64)
{
- return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
+ return (unsigned int) ml_phys_read_data(paddr64, 4);
}
-unsigned int ml_phys_read_half(vm_offset_t paddr)
+unsigned int
+ml_phys_read_half(vm_offset_t paddr)
{
- return ml_phys_read_data((pmap_paddr_t)paddr, 2);
+ return (unsigned int) ml_phys_read_data(paddr, 2);
}
-unsigned int ml_phys_read_half_64(addr64_t paddr64)
+unsigned int
+ml_phys_read_half_64(addr64_t paddr64)
{
- return ml_phys_read_data((pmap_paddr_t)paddr64, 2);
+ return (unsigned int) ml_phys_read_data(paddr64, 2);
}
-unsigned int ml_phys_read_byte(vm_offset_t paddr)
+unsigned int
+ml_phys_read_byte(vm_offset_t paddr)
{
- return ml_phys_read_data((pmap_paddr_t)paddr, 1);
+ return (unsigned int) ml_phys_read_data(paddr, 1);
}
-unsigned int ml_phys_read_byte_64(addr64_t paddr64)
+unsigned int
+ml_phys_read_byte_64(addr64_t paddr64)
{
- return ml_phys_read_data((pmap_paddr_t)paddr64, 1);
+ return (unsigned int) ml_phys_read_data(paddr64, 1);
}
-unsigned long long ml_phys_read_double(vm_offset_t paddr)
+unsigned long long
+ml_phys_read_double(vm_offset_t paddr)
{
- return ml_phys_read_long_long((pmap_paddr_t)paddr);
+ return ml_phys_read_long_long(paddr);
}
-unsigned long long ml_phys_read_double_64(addr64_t paddr64)
+unsigned long long
+ml_phys_read_double_64(addr64_t paddr64)
{
- return ml_phys_read_long_long((pmap_paddr_t)paddr64);
+ return ml_phys_read_long_long(paddr64);
}
* Write data to a physical address. Memory should not be cache inhibited.
*/
-static void
-ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
+__private_extern__ void
+ml_phys_write_data(uint64_t paddr, unsigned long long data, int size)
{
- switch (size) {
- case 1:
- *(unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
- break;
- case 2:
- *(unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
- break;
- case 4:
- default:
- *(unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
- break;
- }
+ boolean_t istate = TRUE, timewrite = FALSE;
+ uint64_t sabs = 0, eabs;
+
+ if (__improbable(!physmap_enclosed(paddr))) {
+ panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
+ }
+
+ if (__improbable(reportphywritedelayabs != 0)) {
+ istate = ml_set_interrupts_enabled(FALSE);
+ sabs = mach_absolute_time();
+ timewrite = TRUE;
+ }
+#if DEVELOPMENT || DEBUG
+ if (__improbable(timewrite && simulate_stretched_io)) {
+ sabs -= simulate_stretched_io;
+ }
+#endif /* x86_64 DEVELOPMENT || DEBUG */
+
+ switch (size) {
+ case 1:
+ *(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
+ break;
+ case 2:
+ *(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
+ break;
+ case 4:
+ *(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
+ break;
+ case 8:
+ *(volatile unsigned long *)PHYSMAP_PTOV(paddr) = data;
+ break;
+ default:
+ panic("Invalid size %d for ml_phys_write_data", size);
+ break;
+ }
+
+ if (__improbable(timewrite == TRUE)) {
+ eabs = mach_absolute_time();
+
+#if DEVELOPMENT || DEBUG
+ iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
+#endif
+
+ if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
+ (void)ml_set_interrupts_enabled(istate);
+
+ if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
+ panic_io_port_read();
+ panic("Write to physical addr 0x%llx took %llu ns, "
+ "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
+ paddr, (eabs - sabs), data, sabs, eabs,
+ reportphywritedelayabs);
+ }
+
+ if (reportphywriteosbt) {
+ OSReportWithBacktrace("ml_phys_write_data (%p, 0x%llx) "
+ "took %lluus",
+ paddr, data, (eabs - sabs) / NSEC_PER_USEC);
+ }
+#if CONFIG_DTRACE
+ DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
+ uint64_t, paddr, uint32_t, size, uint64_t, data);
+#endif /* CONFIG_DTRACE */
+ } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
+ KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
+ (eabs - sabs), sabs, paddr, data);
+
+ (void)ml_set_interrupts_enabled(istate);
+ } else {
+ (void)ml_set_interrupts_enabled(istate);
+ }
+ }
}
-static void
-ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
+void
+ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
{
- *(unsigned long long *)PHYSMAP_PTOV(paddr) = data;
+ ml_phys_write_data(paddr, data, 1);
}
+void
+ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
+{
+ ml_phys_write_data(paddr64, data, 1);
+}
+void
+ml_phys_write_half(vm_offset_t paddr, unsigned int data)
+{
+ ml_phys_write_data(paddr, data, 2);
+}
-void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
+void
+ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
+ ml_phys_write_data(paddr64, data, 2);
}
-void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
+void
+ml_phys_write(vm_offset_t paddr, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
+ ml_phys_write_data(paddr, data, 4);
}
-void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
+void
+ml_phys_write_64(addr64_t paddr64, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
+ ml_phys_write_data(paddr64, data, 4);
}
-void ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
+void
+ml_phys_write_word(vm_offset_t paddr, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
+ ml_phys_write_data(paddr, data, 4);
}
-void ml_phys_write(vm_offset_t paddr, unsigned int data)
+void
+ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
+ ml_phys_write_data(paddr64, data, 4);
+}
+
+void
+ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
+{
+ ml_phys_write_data(paddr, data, 8);
+}
+
+void
+ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
+{
+ ml_phys_write_data(paddr64, data, 8);
+}
+
+uint32_t
+ml_port_io_read(uint16_t ioport, int size)
+{
+ uint32_t result = 0;
+
+ uint64_t sabs, eabs;
+ boolean_t istate, timeread = FALSE;
+
+ if (__improbable(reportphyreaddelayabs != 0)) {
+ istate = ml_set_interrupts_enabled(FALSE);
+ sabs = mach_absolute_time();
+ timeread = TRUE;
+ }
+
+#if DEVELOPMENT || DEBUG
+ if (__improbable(timeread && simulate_stretched_io)) {
+ sabs -= simulate_stretched_io;
+ }
+#endif /* x86_64 DEVELOPMENT || DEBUG */
+
+ switch (size) {
+ case 1:
+ result = inb(ioport);
+ break;
+ case 2:
+ result = inw(ioport);
+ break;
+ case 4:
+ result = inl(ioport);
+ break;
+ default:
+ panic("Invalid size %d for ml_port_io_read(0x%x)", size, (unsigned)ioport);
+ break;
+ }
+
+ if (__improbable(timeread == TRUE)) {
+ eabs = mach_absolute_time();
+
+#if DEVELOPMENT || DEBUG
+ iotrace(IOTRACE_PORTIO_READ, 0, ioport, size, result, sabs, eabs - sabs);
+#endif
+
+ if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
+ (void)ml_set_interrupts_enabled(istate);
+
+ if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
+ panic_io_port_read();
+ panic("Read from IO port 0x%x took %llu ns, "
+ "result: 0x%x (start: %llu, end: %llu), ceiling: %llu",
+ ioport, (eabs - sabs), result, sabs, eabs,
+ reportphyreaddelayabs);
+ }
+
+ if (reportphyreadosbt) {
+ OSReportWithBacktrace("ml_port_io_read(0x%x) took %lluus",
+ ioport, (eabs - sabs) / NSEC_PER_USEC);
+ }
+#if CONFIG_DTRACE
+ DTRACE_PHYSLAT3(portioread, uint64_t, (eabs - sabs),
+ uint16_t, ioport, uint32_t, size);
+#endif /* CONFIG_DTRACE */
+ } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
+ KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_READ),
+ (eabs - sabs), sabs, ioport, result);
+
+ (void)ml_set_interrupts_enabled(istate);
+ } else {
+ (void)ml_set_interrupts_enabled(istate);
+ }
+ }
+
+ return result;
+}
+
+void
+ml_port_io_write(uint16_t ioport, uint32_t val, int size)
+{
+ uint64_t sabs, eabs;
+ boolean_t istate, timewrite = FALSE;
+
+ if (__improbable(reportphywritedelayabs != 0)) {
+ istate = ml_set_interrupts_enabled(FALSE);
+ sabs = mach_absolute_time();
+ timewrite = TRUE;
+ }
+#if DEVELOPMENT || DEBUG
+ if (__improbable(timewrite && simulate_stretched_io)) {
+ sabs -= simulate_stretched_io;
+ }
+#endif /* x86_64 DEVELOPMENT || DEBUG */
+
+ switch (size) {
+ case 1:
+ outb(ioport, (uint8_t)val);
+ break;
+ case 2:
+ outw(ioport, (uint16_t)val);
+ break;
+ case 4:
+ outl(ioport, (uint32_t)val);
+ break;
+ default:
+ panic("Invalid size %d for ml_port_io_write(0x%x)", size, (unsigned)ioport);
+ break;
+ }
+
+ if (__improbable(timewrite == TRUE)) {
+ eabs = mach_absolute_time();
+
+#if DEVELOPMENT || DEBUG
+ iotrace(IOTRACE_PORTIO_WRITE, 0, ioport, size, val, sabs, eabs - sabs);
+#endif
+
+ if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
+ (void)ml_set_interrupts_enabled(istate);
+
+ if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
+ panic_io_port_read();
+ panic("Write to IO port 0x%x took %llu ns, val: 0x%x"
+ " (start: %llu, end: %llu), ceiling: %llu",
+ ioport, (eabs - sabs), val, sabs, eabs,
+ reportphywritedelayabs);
+ }
+
+ if (reportphywriteosbt) {
+ OSReportWithBacktrace("ml_port_io_write(0x%x, %d, 0x%llx) "
+ "took %lluus",
+ ioport, size, val, (eabs - sabs) / NSEC_PER_USEC);
+ }
+
+#if CONFIG_DTRACE
+ DTRACE_PHYSLAT4(portiowrite, uint64_t, (eabs - sabs),
+ uint16_t, ioport, uint32_t, size, uint64_t, val);
+#endif /* CONFIG_DTRACE */
+ } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
+ KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_WRITE),
+ (eabs - sabs), sabs, ioport, val);
+
+ (void)ml_set_interrupts_enabled(istate);
+ } else {
+ (void)ml_set_interrupts_enabled(istate);
+ }
+ }
}
-void ml_phys_write_64(addr64_t paddr64, unsigned int data)
+uint8_t
+ml_port_io_read8(uint16_t ioport)
{
- ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
+ return ml_port_io_read(ioport, 1);
}
-void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
+uint16_t
+ml_port_io_read16(uint16_t ioport)
{
- ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
+ return ml_port_io_read(ioport, 2);
}
-void ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
+uint32_t
+ml_port_io_read32(uint16_t ioport)
{
- ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
+ return ml_port_io_read(ioport, 4);
}
-void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
+void
+ml_port_io_write8(uint16_t ioport, uint8_t val)
{
- ml_phys_write_long_long((pmap_paddr_t)paddr, data);
+ ml_port_io_write(ioport, val, 1);
}
-void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
+void
+ml_port_io_write16(uint16_t ioport, uint16_t val)
{
- ml_phys_write_long_long((pmap_paddr_t)paddr64, data);
+ ml_port_io_write(ioport, val, 2);
}
+void
+ml_port_io_write32(uint16_t ioport, uint32_t val)
+{
+ ml_port_io_write(ioport, val, 4);
+}
/* PCI config cycle probing
*
*
* Read the memory location at physical address paddr.
- * This is a part of a device probe, so there is a good chance we will
- * have a machine check here. So we have to be able to handle that.
- * We assume that machine checks are enabled both in MSR and HIDs
+ * *Does not* recover from machine checks, unlike the PowerPC implementation.
+ * Should probably be deprecated.
*/
boolean_t
ml_probe_read(vm_offset_t paddr, unsigned int *val)
{
- if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4)
- return FALSE;
+ if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) {
+ return FALSE;
+ }
- *val = ml_phys_read((pmap_paddr_t)paddr);
+ *val = ml_phys_read(paddr);
- return TRUE;
+ return TRUE;
}
/*
* have a machine check here. So we have to be able to handle that.
* We assume that machine checks are enabled both in MSR and HIDs
*/
-boolean_t
+boolean_t
ml_probe_read_64(addr64_t paddr64, unsigned int *val)
{
- if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4)
- return FALSE;
+ if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) {
+ return FALSE;
+ }
- *val = ml_phys_read_64((pmap_paddr_t)paddr64);
- return TRUE;
+ *val = ml_phys_read_64(paddr64);
+ return TRUE;
}
-int bcmp(
- const void *pa,
- const void *pb,
- size_t len)
+#undef bcmp
+int
+bcmp(
+ const void *pa,
+ const void *pb,
+ size_t len)
{
const char *a = (const char *)pa;
const char *b = (const char *)pb;
- if (len == 0)
+ if (len == 0) {
return 0;
+ }
- do
- if (*a++ != *b++)
+ do{
+ if (*a++ != *b++) {
break;
- while (--len);
+ }
+ } while (--len);
return (int)len;
}
+#undef memcmp
int
memcmp(const void *s1, const void *s2, size_t n)
{
const unsigned char *p1 = s1, *p2 = s2;
do {
- if (*p1++ != *p2++)
- return (*--p1 - *--p2);
+ if (*p1++ != *p2++) {
+ return *--p1 - *--p2;
+ }
} while (--n != 0);
}
- return (0);
+ return 0;
+}
+
+#undef memmove
+void *
+memmove(void *dst, const void *src, size_t ulen)
+{
+ bcopy(src, dst, ulen);
+ return dst;
}
/*
* the terminating null character.
*/
+#undef strlen
size_t
strlen(
- register const char *string)
+ const char *string)
{
- register const char *ret = string;
+ const char *ret = string;
- while (*string++ != '\0')
+ while (*string++ != '\0') {
continue;
+ }
return string - 1 - ret;
}
-uint32_t
-hw_compare_and_store(uint32_t oldval, uint32_t newval, volatile uint32_t *dest)
-{
- return OSCompareAndSwap((UInt32)oldval,
- (UInt32)newval,
- (volatile UInt32 *)dest);
-}
-
-#if MACH_ASSERT
+#if MACH_ASSERT
/*
* Machine-dependent routine to fill in an array with up to callstack_max
* levels of return pc information.
*/
-void machine_callstack(
- __unused natural_t *buf,
- __unused vm_size_t callstack_max)
+void
+machine_callstack(
+ __unused uintptr_t *buf,
+ __unused vm_size_t callstack_max)
{
}
-#endif /* MACH_ASSERT */
+#endif /* MACH_ASSERT */
-void fillPage(ppnum_t pa, unsigned int fill)
+void
+fillPage(ppnum_t pa, unsigned int fill)
{
- pmap_paddr_t src;
- int i;
+ uint64_t src;
int cnt = PAGE_SIZE / sizeof(unsigned int);
- unsigned int *addr;
src = i386_ptob(pa);
- for (i = 0, addr = (unsigned int *)PHYSMAP_PTOV(src); i < cnt; i++)
- *addr++ = fill;
+ memset_word((int *)PHYSMAP_PTOV(src), fill, cnt);
}
-static inline void __sfence(void)
+static inline void
+__clflush(void *ptr)
{
- __asm__ volatile("sfence");
-}
-static inline void __mfence(void)
-{
- __asm__ volatile("mfence");
-}
-static inline void __wbinvd(void)
-{
- __asm__ volatile("wbinvd");
-}
-static inline void __clflush(void *ptr)
-{
- __asm__ volatile("clflush (%0)" : : "r" (ptr));
+ __asm__ volatile ("clflush (%0)" : : "r" (ptr));
}
-void dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
+void
+dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
{
- uint32_t linesize = cpuid_info()->cache_linesize;
- addr64_t addr;
- boolean_t istate;
-
- __mfence();
+ addr64_t linesize = cpuid_info()->cache_linesize;
+ addr64_t bound = (pa + count + linesize - 1) & ~(linesize - 1);
- istate = ml_set_interrupts_enabled(FALSE);
+ mfence();
- for (addr = pa; addr < pa + count; addr += linesize)
- __clflush(PHYSMAP_PTOV(addr));
-
- (void) ml_set_interrupts_enabled(istate);
+ while (pa < bound) {
+ __clflush(PHYSMAP_PTOV(pa));
+ pa += linesize;
+ }
- __mfence();
+ mfence();
}
-void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
+void
+dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
{
- return(dcache_incoherent_io_store64(pa,count));
+ return dcache_incoherent_io_store64(pa, count);
}
void
-flush_dcache64(__unused addr64_t addr,
- __unused unsigned count,
- __unused int phys)
+flush_dcache64(addr64_t addr, unsigned count, int phys)
{
+ if (phys) {
+ dcache_incoherent_io_flush64(addr, count);
+ } else {
+ uint64_t linesize = cpuid_info()->cache_linesize;
+ addr64_t bound = (addr + count + linesize - 1) & ~(linesize - 1);
+ mfence();
+ while (addr < bound) {
+ __clflush((void *) (uintptr_t) addr);
+ addr += linesize;
+ }
+ mfence();
+ }
}
void
invalidate_icache64(__unused addr64_t addr,
- __unused unsigned count,
- __unused int phys)
+ __unused unsigned count,
+ __unused int phys)
{
}
void
mapping_set_mod(ppnum_t pn)
{
- pmap_set_modify(pn);
+ pmap_set_modify(pn);
}
void
mapping_set_ref(ppnum_t pn)
{
- pmap_set_reference(pn);
+ pmap_set_reference(pn);
}
+extern i386_cpu_info_t cpuid_cpu_info;
void
cache_flush_page_phys(ppnum_t pa)
{
- boolean_t istate;
- unsigned char *cacheline_addr;
- int cacheline_size = cpuid_info()->cache_linesize;
- int cachelines_to_flush = PAGE_SIZE/cacheline_size;
+ boolean_t istate;
+ unsigned char *cacheline_addr;
+ i386_cpu_info_t *cpuid_infop = cpuid_info();
+ int cacheline_size;
+ int cachelines_to_flush;
+
+ cacheline_size = cpuid_infop->cache_linesize;
+ if (cacheline_size == 0) {
+ panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop);
+ }
+ cachelines_to_flush = PAGE_SIZE / cacheline_size;
- __mfence();
+ mfence();
istate = ml_set_interrupts_enabled(FALSE);
for (cacheline_addr = (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa));
- cachelines_to_flush > 0;
- cachelines_to_flush--, cacheline_addr += cacheline_size) {
+ cachelines_to_flush > 0;
+ cachelines_to_flush--, cacheline_addr += cacheline_size) {
__clflush((void *) cacheline_addr);
}
(void) ml_set_interrupts_enabled(istate);
- __mfence();
+ mfence();
}
-static int copyio(int, user_addr_t, char *, vm_size_t, vm_size_t *, int);
-static int copyio_phys(addr64_t, addr64_t, vm_size_t, int);
-
-/*
- * The copy engine has the following characteristics
- * - copyio() handles copies to/from user or kernel space
- * - copypv() deals with physical or virtual addresses
- *
- * Readers familiar with the 32-bit kernel will expect Joe's thesis at this
- * point describing the full glory of the copy window implementation. In K64,
- * however, there is no need for windowing. Thanks to the vast shared address
- * space, the kernel has direct access to userspace and to physical memory.
- *
- * User virtual addresses are accessible provided the user's cr3 is loaded.
- * Physical addresses are accessible via the direct map and the PHYSMAP_PTOV()
- * translation.
- *
- * Copyin/out variants all boil done to just these 2 routines in locore.s which
- * provide fault-recoverable copying:
- */
-extern int _bcopy(const void *, void *, vm_size_t);
-extern int _bcopystr(const void *, void *, vm_size_t, vm_size_t *);
-
-
-/*
- * Types of copies:
- */
-#define COPYIN 0 /* from user virtual to kernel virtual */
-#define COPYOUT 1 /* from kernel virtual to user virtual */
-#define COPYINSTR 2 /* string variant of copyout */
-#define COPYINPHYS 3 /* from user virtual to kernel physical */
-#define COPYOUTPHYS 4 /* from kernel physical to user virtual */
-
-
-static int
-copyio(int copy_type, user_addr_t user_addr, char *kernel_addr,
- vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map)
-{
- thread_t thread;
- pmap_t pmap;
- vm_size_t bytes_copied;
- int error = 0;
- boolean_t istate = FALSE;
- boolean_t recursive_CopyIOActive;
-#if KDEBUG
- int debug_type = 0xeff70010;
- debug_type += (copy_type << 2);
-#endif
-
- thread = current_thread();
-
- KERNEL_DEBUG(debug_type | DBG_FUNC_START,
- (unsigned)(user_addr >> 32), (unsigned)user_addr,
- nbytes, thread->machine.copyio_state, 0);
-
- if (nbytes == 0)
- goto out;
-
- pmap = thread->map->pmap;
-
- /* Sanity and security check for addresses to/from a user */
- if ((copy_type == COPYIN ||
- copy_type == COPYINSTR ||
- copy_type == COPYOUT) &&
- (pmap != kernel_pmap) &&
- ((vm_offset_t)kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS ||
- !IS_USERADDR64_CANONICAL(user_addr))) {
- error = EACCES;
- goto out;
- }
-
- /*
- * If the no_shared_cr3 boot-arg is set (true), the kernel runs on
- * its own pmap and cr3 rather than the user's -- so that wild accesses
- * from kernel or kexts can be trapped. So, during copyin and copyout,
- * we need to switch back to the user's map/cr3. The thread is flagged
- * "CopyIOActive" at this time so that if the thread is pre-empted,
- * we will later restore the correct cr3.
- */
- recursive_CopyIOActive = thread->machine.specFlags & CopyIOActive;
- thread->machine.specFlags |= CopyIOActive;
- if (no_shared_cr3) {
- istate = ml_set_interrupts_enabled(FALSE);
- if (get_cr3() != pmap->pm_cr3)
- set_cr3(pmap->pm_cr3);
- }
-
- /*
- * Ensure that we're running on the target thread's cr3.
- */
- if ((pmap != kernel_pmap) && !use_kernel_map &&
- (get_cr3() != pmap->pm_cr3)) {
- panic("copyio(%d,%p,%p,%ld,%p,%d) cr3 is %p expects %p",
- copy_type, (void *)user_addr, kernel_addr, nbytes, lencopied, use_kernel_map,
- (void *) get_cr3(), (void *) pmap->pm_cr3);
- }
- if (no_shared_cr3)
- (void) ml_set_interrupts_enabled(istate);
-
- KERNEL_DEBUG(0xeff70044 | DBG_FUNC_NONE, (unsigned)user_addr,
- (unsigned)kernel_addr, nbytes, 0, 0);
-
- switch (copy_type) {
-
- case COPYIN:
- error = _bcopy((const void *) user_addr,
- kernel_addr,
- nbytes);
- break;
-
- case COPYOUT:
- error = _bcopy(kernel_addr,
- (void *) user_addr,
- nbytes);
- break;
-
- case COPYINPHYS:
- error = _bcopy((const void *) user_addr,
- PHYSMAP_PTOV(kernel_addr),
- nbytes);
- break;
-
- case COPYOUTPHYS:
- error = _bcopy((const void *) PHYSMAP_PTOV(kernel_addr),
- (void *) user_addr,
- nbytes);
- break;
-
- case COPYINSTR:
- error = _bcopystr((const void *) user_addr,
- kernel_addr,
- (int) nbytes,
- &bytes_copied);
-
- /*
- * lencopied should be updated on success
- * or ENAMETOOLONG... but not EFAULT
- */
- if (error != EFAULT)
- *lencopied = bytes_copied;
-
- if (error) {
-#if KDEBUG
- nbytes = *lencopied;
-#endif
- break;
- }
- if (*(kernel_addr + bytes_copied - 1) == 0) {
- /*
- * we found a NULL terminator... we're done
- */
-#if KDEBUG
- nbytes = *lencopied;
-#endif
- break;
- } else {
- /*
- * no more room in the buffer and we haven't
- * yet come across a NULL terminator
- */
-#if KDEBUG
- nbytes = *lencopied;
-#endif
- error = ENAMETOOLONG;
- break;
- }
- break;
- }
-
- if (!recursive_CopyIOActive)
- thread->machine.specFlags &= ~CopyIOActive;
- if (no_shared_cr3) {
- istate = ml_set_interrupts_enabled(FALSE);
- if (get_cr3() != kernel_pmap->pm_cr3)
- set_cr3(kernel_pmap->pm_cr3);
- (void) ml_set_interrupts_enabled(istate);
- }
-
-out:
- KERNEL_DEBUG(debug_type | DBG_FUNC_END, (unsigned)user_addr,
- (unsigned)kernel_addr, (unsigned)nbytes, error, 0);
-
- return (error);
-}
-
-
-static int
-copyio_phys(addr64_t source, addr64_t sink, vm_size_t csize, int which)
-{
- char *paddr;
- user_addr_t vaddr;
- int ctype;
-
- if (which & cppvPsnk) {
- paddr = (char *)sink;
- vaddr = (user_addr_t)source;
- ctype = COPYINPHYS;
- } else {
- paddr = (char *)source;
- vaddr = (user_addr_t)sink;
- ctype = COPYOUTPHYS;
- }
- return copyio(ctype, vaddr, paddr, csize, NULL, which & cppvKmap);
-}
-
-int
-copyinmsg(const user_addr_t user_addr, char *kernel_addr, mach_msg_size_t nbytes)
-{
- return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
-}
-
-int
-copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
-{
- return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
-}
-
-int
-copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
-{
- *lencopied = 0;
-
- return copyio(COPYINSTR, user_addr, kernel_addr, nbytes, lencopied, 0);
-}
-
-int
-copyoutmsg(const char *kernel_addr, user_addr_t user_addr, mach_msg_size_t nbytes)
-{
- return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
-}
-
-int
-copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
-{
- return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
-}
-
-
-kern_return_t
-copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which)
-{
- unsigned int lop, csize;
- int bothphys = 0;
-
- KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64,
- (unsigned)snk64, size, which, 0);
-
- if ((which & (cppvPsrc | cppvPsnk)) == 0 ) /* Make sure that only one is virtual */
- panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
-
- if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk))
- bothphys = 1; /* both are physical */
-
- while (size) {
-
- if (bothphys) {
- lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */
-
- if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))))
- lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */
- } else {
- /*
- * only need to compute the resid for the physical page
- * address... we don't care about where we start/finish in
- * the virtual since we just call the normal copyin/copyout
- */
- if (which & cppvPsrc)
- lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)));
- else
- lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1)));
- }
- csize = size; /* Assume we can copy it all */
- if (lop < size)
- csize = lop; /* Nope, we can't do it all */
-#if 0
- /*
- * flush_dcache64 is currently a nop on the i386...
- * it's used when copying to non-system memory such
- * as video capture cards... on PPC there was a need
- * to flush due to how we mapped this memory... not
- * sure if it's needed on i386.
- */
- if (which & cppvFsrc)
- flush_dcache64(src64, csize, 1); /* If requested, flush source before move */
- if (which & cppvFsnk)
- flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */
-#endif
- if (bothphys)
- bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */
- else {
- if (copyio_phys(src64, snk64, csize, which))
- return (KERN_FAILURE);
- }
-#if 0
- if (which & cppvFsrc)
- flush_dcache64(src64, csize, 1); /* If requested, flush source after move */
- if (which & cppvFsnk)
- flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */
-#endif
- size -= csize; /* Calculate what is left */
- snk64 += csize; /* Bump sink to next physical address */
- src64 += csize; /* Bump source to next physical address */
- }
- KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (unsigned)src64,
- (unsigned)snk64, size, which, 0);
-
- return KERN_SUCCESS;
-}
-
#if !MACH_KDP
void
-kdp_register_callout(void)
+kdp_register_callout(kdp_callout_fn_t fn, void *arg)
{
+#pragma unused(fn,arg)
}
#endif
#if !CONFIG_VMX
-int host_vmxon(boolean_t exclusive __unused)
+int
+host_vmxon(boolean_t exclusive __unused)
{
return VMX_UNSUPPORTED;
}
-void host_vmxoff(void)
+void
+host_vmxoff(void)
{
return;
}