2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_assert.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <sys/errno.h>
67 #include <i386/param.h>
68 #include <i386/misc_protos.h>
69 #include <i386/cpu_data.h>
70 #include <i386/machine_routines.h>
71 #include <i386/cpuid.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_fault.h>
78 #include <libkern/OSAtomic.h>
79 #include <libkern/OSDebug.h>
80 #include <sys/kdebug.h>
83 #include <kdp/kdp_callout.h>
84 #endif /* !MACH_KDP */
86 #include <architecture/i386/pio.h>
88 #include <libkern/OSDebug.h>
96 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
101 /* prevent infinite recursion when memmove calls bcopy; in string.h, bcopy is defined to call memmove */
104 /* XXX - should be gone from here */
105 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
106 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
107 extern boolean_t
phys_page_exists(ppnum_t
);
108 extern void bcopy_no_overwrite(const char *from
, char *to
, vm_size_t bytes
);
109 extern void pmap_set_reference(ppnum_t pn
);
110 extern void mapping_set_mod(ppnum_t pa
);
111 extern void mapping_set_ref(ppnum_t pn
);
113 extern void ovbcopy(const char *from
,
116 void machine_callstack(uintptr_t *buf
, vm_size_t callstack_max
);
119 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
120 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
122 #define INT_SIZE (BYTE_SIZE * sizeof (int))
125 * Set indicated bit in bit string.
128 setbit(int bitno
, int *s
)
130 s
[bitno
/ INT_SIZE
] |= 1 << (bitno
% INT_SIZE
);
134 * Clear indicated bit in bit string.
137 clrbit(int bitno
, int *s
)
139 s
[bitno
/ INT_SIZE
] &= ~(1 << (bitno
% INT_SIZE
));
143 * Test if indicated bit is set in bit string.
146 testbit(int bitno
, int *s
)
148 return s
[bitno
/ INT_SIZE
] & (1 << (bitno
% INT_SIZE
));
152 * Find first bit set in bit string.
159 for (offset
= 0; !*s
; offset
+= (int)INT_SIZE
, ++s
) {
162 return offset
+ __builtin_ctz(*s
);
166 ffs(unsigned int mask
)
173 * NOTE: cannot use __builtin_ffs because it generates a call to
176 return 1 + __builtin_ctz(mask
);
180 ffsll(unsigned long long mask
)
187 * NOTE: cannot use __builtin_ffsll because it generates a call to
190 return 1 + __builtin_ctzll(mask
);
194 * Find last bit set in bit string.
197 fls(unsigned int mask
)
203 return (sizeof(mask
) << 3) - __builtin_clz(mask
);
207 flsll(unsigned long long mask
)
213 return (sizeof(mask
) << 3) - __builtin_clzll(mask
);
221 bzero_phys(src64
, bytes
);
229 bzero(PHYSMAP_PTOV(src64
), bytes
);
234 * bcopy_phys - like bcopy but copies from/to physical addresses.
243 /* Not necessary for K64 - but ensure we stay within a page */
244 if (((((uint32_t)src64
& (NBPG
- 1)) + bytes
) > NBPG
) ||
245 ((((uint32_t)dst64
& (NBPG
- 1)) + bytes
) > NBPG
)) {
246 panic("bcopy_phys alignment");
248 bcopy(PHYSMAP_PTOV(src64
), PHYSMAP_PTOV(dst64
), bytes
);
252 * allow a function to get a quick virtual mapping of a physical page
259 int (*func
)(void * buffer
, vm_size_t bytes
, void * arg
),
262 /* Not necessary for K64 - but ensure we stay within a page */
263 if (((((uint32_t)dst64
& (NBPG
- 1)) + bytes
) > NBPG
)) {
264 panic("apply_func_phys alignment");
267 return func(PHYSMAP_PTOV(dst64
), bytes
, arg
);
271 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
279 vm_size_t bytes
) /* num bytes to copy */
281 /* Assume that bcopy copies left-to-right (low addr first). */
282 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
) {
283 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
284 } else if (from
> to
) {
285 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
287 /* to > from: overlapping, and must copy right-to-left. */
290 while (bytes
-- > 0) {
298 * Read data from a physical address. Memory should not be cache inhibited.
301 uint64_t reportphyreaddelayabs
;
302 uint64_t reportphywritedelayabs
;
303 uint32_t reportphyreadosbt
;
304 uint32_t reportphywriteosbt
;
306 #if DEVELOPMENT || DEBUG
307 uint32_t phyreadpanic
= 1;
308 uint32_t phywritepanic
= 1;
309 uint64_t tracephyreaddelayabs
= 50 * NSEC_PER_USEC
;
310 uint64_t tracephywritedelayabs
= 50 * NSEC_PER_USEC
;
311 uint64_t simulate_stretched_io
= 0;
313 uint32_t phyreadpanic
= 0;
314 uint32_t phywritepanic
= 0;
315 uint64_t tracephyreaddelayabs
= 0;
316 uint64_t tracephywritedelayabs
= 0;
319 __private_extern__
uint64_t
320 ml_phys_read_data(uint64_t paddr
, int size
)
325 boolean_t istate
= TRUE
, timeread
= FALSE
;
326 uint64_t sabs
= 0, eabs
;
328 if (__improbable(!physmap_enclosed(paddr
))) {
329 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
332 if (__improbable(reportphyreaddelayabs
!= 0)) {
333 istate
= ml_set_interrupts_enabled(FALSE
);
334 sabs
= mach_absolute_time();
337 #if DEVELOPMENT || DEBUG
338 if (__improbable(timeread
&& simulate_stretched_io
)) {
339 sabs
-= simulate_stretched_io
;
341 #endif /* x86_64 DEVELOPMENT || DEBUG */
345 s1
= *(volatile unsigned char *)PHYSMAP_PTOV(paddr
);
349 s2
= *(volatile unsigned short *)PHYSMAP_PTOV(paddr
);
353 result
= *(volatile unsigned int *)PHYSMAP_PTOV(paddr
);
356 result
= *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
);
359 panic("Invalid size %d for ml_phys_read_data", size
);
363 if (__improbable(timeread
== TRUE
)) {
364 eabs
= mach_absolute_time();
366 #if DEVELOPMENT || DEBUG
367 iotrace(IOTRACE_PHYS_READ
, 0, paddr
, size
, result
, sabs
, eabs
- sabs
);
370 if (__improbable((eabs
- sabs
) > reportphyreaddelayabs
)) {
371 (void)ml_set_interrupts_enabled(istate
);
373 if (phyreadpanic
&& (machine_timeout_suspended() == FALSE
)) {
374 panic_io_port_read();
375 panic("Read from physical addr 0x%llx took %llu ns, "
376 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
377 paddr
, (eabs
- sabs
), result
, sabs
, eabs
,
378 reportphyreaddelayabs
);
381 if (reportphyreadosbt
) {
382 OSReportWithBacktrace("ml_phys_read_data took %lluus",
383 (eabs
- sabs
) / NSEC_PER_USEC
);
386 DTRACE_PHYSLAT4(physread
, uint64_t, (eabs
- sabs
),
387 uint64_t, paddr
, uint32_t, size
, uint64_t, result
);
388 #endif /* CONFIG_DTRACE */
389 } else if (__improbable(tracephyreaddelayabs
> 0 && (eabs
- sabs
) > tracephyreaddelayabs
)) {
390 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_PHYS_READ
),
391 (eabs
- sabs
), sabs
, paddr
, result
);
393 (void)ml_set_interrupts_enabled(istate
);
395 (void)ml_set_interrupts_enabled(istate
);
402 static unsigned long long
403 ml_phys_read_long_long(uint64_t paddr
)
405 return ml_phys_read_data(paddr
, 8);
409 ml_phys_read(vm_offset_t paddr
)
411 return (unsigned int) ml_phys_read_data(paddr
, 4);
415 ml_phys_read_word(vm_offset_t paddr
)
417 return (unsigned int) ml_phys_read_data(paddr
, 4);
421 ml_phys_read_64(addr64_t paddr64
)
423 return (unsigned int) ml_phys_read_data(paddr64
, 4);
427 ml_phys_read_word_64(addr64_t paddr64
)
429 return (unsigned int) ml_phys_read_data(paddr64
, 4);
433 ml_phys_read_half(vm_offset_t paddr
)
435 return (unsigned int) ml_phys_read_data(paddr
, 2);
439 ml_phys_read_half_64(addr64_t paddr64
)
441 return (unsigned int) ml_phys_read_data(paddr64
, 2);
445 ml_phys_read_byte(vm_offset_t paddr
)
447 return (unsigned int) ml_phys_read_data(paddr
, 1);
451 ml_phys_read_byte_64(addr64_t paddr64
)
453 return (unsigned int) ml_phys_read_data(paddr64
, 1);
457 ml_phys_read_double(vm_offset_t paddr
)
459 return ml_phys_read_long_long(paddr
);
463 ml_phys_read_double_64(addr64_t paddr64
)
465 return ml_phys_read_long_long(paddr64
);
471 * Write data to a physical address. Memory should not be cache inhibited.
474 __private_extern__
void
475 ml_phys_write_data(uint64_t paddr
, unsigned long long data
, int size
)
477 boolean_t istate
= TRUE
, timewrite
= FALSE
;
478 uint64_t sabs
= 0, eabs
;
480 if (__improbable(!physmap_enclosed(paddr
))) {
481 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
484 if (__improbable(reportphywritedelayabs
!= 0)) {
485 istate
= ml_set_interrupts_enabled(FALSE
);
486 sabs
= mach_absolute_time();
489 #if DEVELOPMENT || DEBUG
490 if (__improbable(timewrite
&& simulate_stretched_io
)) {
491 sabs
-= simulate_stretched_io
;
493 #endif /* x86_64 DEVELOPMENT || DEBUG */
497 *(volatile unsigned char *)PHYSMAP_PTOV(paddr
) = (unsigned char)data
;
500 *(volatile unsigned short *)PHYSMAP_PTOV(paddr
) = (unsigned short)data
;
503 *(volatile unsigned int *)PHYSMAP_PTOV(paddr
) = (unsigned int)data
;
506 *(volatile unsigned long *)PHYSMAP_PTOV(paddr
) = data
;
509 panic("Invalid size %d for ml_phys_write_data", size
);
513 if (__improbable(timewrite
== TRUE
)) {
514 eabs
= mach_absolute_time();
516 #if DEVELOPMENT || DEBUG
517 iotrace(IOTRACE_PHYS_WRITE
, 0, paddr
, size
, data
, sabs
, eabs
- sabs
);
520 if (__improbable((eabs
- sabs
) > reportphywritedelayabs
)) {
521 (void)ml_set_interrupts_enabled(istate
);
523 if (phywritepanic
&& (machine_timeout_suspended() == FALSE
)) {
524 panic_io_port_read();
525 panic("Write to physical addr 0x%llx took %llu ns, "
526 "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
527 paddr
, (eabs
- sabs
), data
, sabs
, eabs
,
528 reportphywritedelayabs
);
531 if (reportphywriteosbt
) {
532 OSReportWithBacktrace("ml_phys_write_data (%p, 0x%llx) "
534 paddr
, data
, (eabs
- sabs
) / NSEC_PER_USEC
);
537 DTRACE_PHYSLAT4(physwrite
, uint64_t, (eabs
- sabs
),
538 uint64_t, paddr
, uint32_t, size
, uint64_t, data
);
539 #endif /* CONFIG_DTRACE */
540 } else if (__improbable(tracephywritedelayabs
> 0 && (eabs
- sabs
) > tracephywritedelayabs
)) {
541 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_PHYS_WRITE
),
542 (eabs
- sabs
), sabs
, paddr
, data
);
544 (void)ml_set_interrupts_enabled(istate
);
546 (void)ml_set_interrupts_enabled(istate
);
552 ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
554 ml_phys_write_data(paddr
, data
, 1);
558 ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
560 ml_phys_write_data(paddr64
, data
, 1);
564 ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
566 ml_phys_write_data(paddr
, data
, 2);
570 ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
572 ml_phys_write_data(paddr64
, data
, 2);
576 ml_phys_write(vm_offset_t paddr
, unsigned int data
)
578 ml_phys_write_data(paddr
, data
, 4);
582 ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
584 ml_phys_write_data(paddr64
, data
, 4);
588 ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
590 ml_phys_write_data(paddr
, data
, 4);
594 ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
596 ml_phys_write_data(paddr64
, data
, 4);
600 ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
602 ml_phys_write_data(paddr
, data
, 8);
606 ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
608 ml_phys_write_data(paddr64
, data
, 8);
612 ml_port_io_read(uint16_t ioport
, int size
)
617 boolean_t istate
, timeread
= FALSE
;
619 if (__improbable(reportphyreaddelayabs
!= 0)) {
620 istate
= ml_set_interrupts_enabled(FALSE
);
621 sabs
= mach_absolute_time();
625 #if DEVELOPMENT || DEBUG
626 if (__improbable(timeread
&& simulate_stretched_io
)) {
627 sabs
-= simulate_stretched_io
;
629 #endif /* x86_64 DEVELOPMENT || DEBUG */
633 result
= inb(ioport
);
636 result
= inw(ioport
);
639 result
= inl(ioport
);
642 panic("Invalid size %d for ml_port_io_read(0x%x)", size
, (unsigned)ioport
);
646 if (__improbable(timeread
== TRUE
)) {
647 eabs
= mach_absolute_time();
649 #if DEVELOPMENT || DEBUG
650 iotrace(IOTRACE_PORTIO_READ
, 0, ioport
, size
, result
, sabs
, eabs
- sabs
);
653 if (__improbable((eabs
- sabs
) > reportphyreaddelayabs
)) {
654 (void)ml_set_interrupts_enabled(istate
);
656 if (phyreadpanic
&& (machine_timeout_suspended() == FALSE
)) {
657 panic_io_port_read();
658 panic("Read from IO port 0x%x took %llu ns, "
659 "result: 0x%x (start: %llu, end: %llu), ceiling: %llu",
660 ioport
, (eabs
- sabs
), result
, sabs
, eabs
,
661 reportphyreaddelayabs
);
664 if (reportphyreadosbt
) {
665 OSReportWithBacktrace("ml_port_io_read(0x%x) took %lluus",
666 ioport
, (eabs
- sabs
) / NSEC_PER_USEC
);
669 DTRACE_PHYSLAT3(portioread
, uint64_t, (eabs
- sabs
),
670 uint16_t, ioport
, uint32_t, size
);
671 #endif /* CONFIG_DTRACE */
672 } else if (__improbable(tracephyreaddelayabs
> 0 && (eabs
- sabs
) > tracephyreaddelayabs
)) {
673 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_PORTIO_READ
),
674 (eabs
- sabs
), sabs
, ioport
, result
);
676 (void)ml_set_interrupts_enabled(istate
);
678 (void)ml_set_interrupts_enabled(istate
);
686 ml_port_io_write(uint16_t ioport
, uint32_t val
, int size
)
689 boolean_t istate
, timewrite
= FALSE
;
691 if (__improbable(reportphywritedelayabs
!= 0)) {
692 istate
= ml_set_interrupts_enabled(FALSE
);
693 sabs
= mach_absolute_time();
696 #if DEVELOPMENT || DEBUG
697 if (__improbable(timewrite
&& simulate_stretched_io
)) {
698 sabs
-= simulate_stretched_io
;
700 #endif /* x86_64 DEVELOPMENT || DEBUG */
704 outb(ioport
, (uint8_t)val
);
707 outw(ioport
, (uint16_t)val
);
710 outl(ioport
, (uint32_t)val
);
713 panic("Invalid size %d for ml_port_io_write(0x%x)", size
, (unsigned)ioport
);
717 if (__improbable(timewrite
== TRUE
)) {
718 eabs
= mach_absolute_time();
720 #if DEVELOPMENT || DEBUG
721 iotrace(IOTRACE_PORTIO_WRITE
, 0, ioport
, size
, val
, sabs
, eabs
- sabs
);
724 if (__improbable((eabs
- sabs
) > reportphywritedelayabs
)) {
725 (void)ml_set_interrupts_enabled(istate
);
727 if (phywritepanic
&& (machine_timeout_suspended() == FALSE
)) {
728 panic_io_port_read();
729 panic("Write to IO port 0x%x took %llu ns, val: 0x%x"
730 " (start: %llu, end: %llu), ceiling: %llu",
731 ioport
, (eabs
- sabs
), val
, sabs
, eabs
,
732 reportphywritedelayabs
);
735 if (reportphywriteosbt
) {
736 OSReportWithBacktrace("ml_port_io_write(0x%x, %d, 0x%llx) "
738 ioport
, size
, val
, (eabs
- sabs
) / NSEC_PER_USEC
);
742 DTRACE_PHYSLAT4(portiowrite
, uint64_t, (eabs
- sabs
),
743 uint16_t, ioport
, uint32_t, size
, uint64_t, val
);
744 #endif /* CONFIG_DTRACE */
745 } else if (__improbable(tracephywritedelayabs
> 0 && (eabs
- sabs
) > tracephywritedelayabs
)) {
746 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_PORTIO_WRITE
),
747 (eabs
- sabs
), sabs
, ioport
, val
);
749 (void)ml_set_interrupts_enabled(istate
);
751 (void)ml_set_interrupts_enabled(istate
);
757 ml_port_io_read8(uint16_t ioport
)
759 return ml_port_io_read(ioport
, 1);
763 ml_port_io_read16(uint16_t ioport
)
765 return ml_port_io_read(ioport
, 2);
769 ml_port_io_read32(uint16_t ioport
)
771 return ml_port_io_read(ioport
, 4);
775 ml_port_io_write8(uint16_t ioport
, uint8_t val
)
777 ml_port_io_write(ioport
, val
, 1);
781 ml_port_io_write16(uint16_t ioport
, uint16_t val
)
783 ml_port_io_write(ioport
, val
, 2);
787 ml_port_io_write32(uint16_t ioport
, uint32_t val
)
789 ml_port_io_write(ioport
, val
, 4);
792 /* PCI config cycle probing
795 * Read the memory location at physical address paddr.
796 * *Does not* recover from machine checks, unlike the PowerPC implementation.
797 * Should probably be deprecated.
801 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
803 if ((PAGE_SIZE
- (paddr
& PAGE_MASK
)) < 4) {
807 *val
= ml_phys_read(paddr
);
813 * Read the memory location at physical address paddr.
814 * This is a part of a device probe, so there is a good chance we will
815 * have a machine check here. So we have to be able to handle that.
816 * We assume that machine checks are enabled both in MSR and HIDs
819 ml_probe_read_64(addr64_t paddr64
, unsigned int *val
)
821 if ((PAGE_SIZE
- (paddr64
& PAGE_MASK
)) < 4) {
825 *val
= ml_phys_read_64(paddr64
);
837 const char *a
= (const char *)pa
;
838 const char *b
= (const char *)pb
;
855 memcmp(const void *s1
, const void *s2
, size_t n
)
858 const unsigned char *p1
= s1
, *p2
= s2
;
861 if (*p1
++ != *p2
++) {
862 return *--p1
- *--p2
;
871 memmove(void *dst
, const void *src
, size_t ulen
)
873 bcopy(src
, dst
, ulen
);
879 * strlen returns the number of characters in "string" preceeding
880 * the terminating null character.
888 const char *ret
= string
;
890 while (*string
++ != '\0') {
893 return string
- 1 - ret
;
899 * Machine-dependent routine to fill in an array with up to callstack_max
900 * levels of return pc information.
904 __unused
uintptr_t *buf
,
905 __unused vm_size_t callstack_max
)
909 #endif /* MACH_ASSERT */
912 fillPage(ppnum_t pa
, unsigned int fill
)
916 int cnt
= PAGE_SIZE
/ sizeof(unsigned int);
920 for (i
= 0, addr
= (unsigned int *)PHYSMAP_PTOV(src
); i
< cnt
; i
++) {
928 __asm__
volatile ("clflush (%0)" : : "r" (ptr
));
932 dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
934 addr64_t linesize
= cpuid_info()->cache_linesize
;
935 addr64_t bound
= (pa
+ count
+ linesize
- 1) & ~(linesize
- 1);
940 __clflush(PHYSMAP_PTOV(pa
));
948 dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
950 return dcache_incoherent_io_store64(pa
, count
);
954 flush_dcache64(addr64_t addr
, unsigned count
, int phys
)
957 dcache_incoherent_io_flush64(addr
, count
);
959 uint64_t linesize
= cpuid_info()->cache_linesize
;
960 addr64_t bound
= (addr
+ count
+ linesize
- 1) & ~(linesize
- 1);
962 while (addr
< bound
) {
963 __clflush((void *) (uintptr_t) addr
);
971 invalidate_icache64(__unused addr64_t addr
,
972 __unused
unsigned count
,
978 addr64_t vm_last_addr
;
981 mapping_set_mod(ppnum_t pn
)
987 mapping_set_ref(ppnum_t pn
)
989 pmap_set_reference(pn
);
992 extern i386_cpu_info_t cpuid_cpu_info
;
994 cache_flush_page_phys(ppnum_t pa
)
997 unsigned char *cacheline_addr
;
998 i386_cpu_info_t
*cpuid_infop
= cpuid_info();
1000 int cachelines_to_flush
;
1002 cacheline_size
= cpuid_infop
->cache_linesize
;
1003 if (cacheline_size
== 0) {
1004 panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop
);
1006 cachelines_to_flush
= PAGE_SIZE
/ cacheline_size
;
1010 istate
= ml_set_interrupts_enabled(FALSE
);
1012 for (cacheline_addr
= (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa
));
1013 cachelines_to_flush
> 0;
1014 cachelines_to_flush
--, cacheline_addr
+= cacheline_size
) {
1015 __clflush((void *) cacheline_addr
);
1018 (void) ml_set_interrupts_enabled(istate
);
1026 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
1028 #pragma unused(fn,arg)
1034 host_vmxon(boolean_t exclusive __unused
)
1036 return VMX_UNSUPPORTED
;