2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_assert.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <i386/param.h>
67 #include <i386/misc_protos.h>
68 #include <i386/cpu_data.h>
69 #include <i386/machine_routines.h>
70 #include <i386/cpuid.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_fault.h>
76 /* XXX - should be gone from here */
77 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
78 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
79 extern boolean_t
phys_page_exists(ppnum_t
);
80 extern pt_entry_t
*pmap_mapgetpte(vm_map_t
, vm_offset_t
);
81 extern void bcopy_no_overwrite(const char *from
, char *to
,vm_size_t bytes
);
82 extern void pmap_set_reference(ppnum_t pn
);
83 extern void mapping_set_mod(ppnum_t pa
);
84 extern void mapping_set_ref(ppnum_t pn
);
85 extern void switch_to_serial_console(void);
86 extern kern_return_t
copyp2p(vm_offset_t source
,
89 unsigned int flush_action
);
90 extern void fillPage(ppnum_t pa
, unsigned int fill
);
91 extern void ovbcopy(const char *from
,
94 void machine_callstack(natural_t
*buf
, vm_size_t callstack_max
);
97 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
98 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
106 vm_offset_t src
= low32(src64
);
108 mp_disable_preemption();
109 if (*(pt_entry_t
*) CM2
)
110 panic("bzero_phys: CMAP busy");
112 *(pt_entry_t
*) CM2
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (src
& PG_FRAME
) |
113 INTEL_PTE_REF
| INTEL_PTE_MOD
;
114 save2
=*(pt_entry_t
*)CM2
;
117 bzero((void *)((unsigned int)CA2
| (src
& INTEL_OFFMASK
)), bytes
);
118 if (save2
!= *(pt_entry_t
*)CM2
) panic("bzero_phys CMAP changed");
119 *(pt_entry_t
*) CM2
= 0;
120 mp_enable_preemption();
124 * copy 'size' bytes from physical to physical address
125 * the caller must validate the physical ranges
127 * if flush_action == 0, no cache flush necessary
128 * if flush_action == 1, flush the source
129 * if flush_action == 2, flush the dest
130 * if flush_action == 3, flush both source and dest
134 copyp2p(vm_offset_t source
,
137 unsigned int flush_action
)
140 switch(flush_action
) {
142 flush_dcache(source
, size
, 1);
145 flush_dcache(dest
, size
, 1);
148 flush_dcache(source
, size
, 1);
149 flush_dcache(dest
, size
, 1);
153 bcopy_phys((addr64_t
)source
, (addr64_t
)dest
, (vm_size_t
)size
); /* Do a physical copy */
155 switch(flush_action
) {
157 flush_dcache(source
, size
, 1);
160 flush_dcache(dest
, size
, 1);
163 flush_dcache(source
, size
, 1);
164 flush_dcache(dest
, size
, 1);
172 * bcopy_phys - like bcopy but copies from/to physical addresses.
181 vm_offset_t src
= low32(src64
);
182 vm_offset_t dst
= low32(dst64
);
183 pt_entry_t save1
,save2
;
184 /* ensure we stay within a page */
185 if ( (((src
& (NBPG
-1)) + bytes
) > NBPG
) ||
186 (((dst
& (NBPG
-1)) + bytes
) > NBPG
) ) panic("bcopy_phys");
187 mp_disable_preemption();
188 if (*(pt_entry_t
*) CM1
|| *(pt_entry_t
*) CM2
)
189 panic("bcopy_phys: CMAP busy");
191 *(pt_entry_t
*) CM1
= INTEL_PTE_VALID
| (src
& PG_FRAME
) | INTEL_PTE_REF
;
192 *(pt_entry_t
*) CM2
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (dst
& PG_FRAME
) |
193 INTEL_PTE_REF
| INTEL_PTE_MOD
;
194 save1
= *(pt_entry_t
*)CM1
;save2
= *(pt_entry_t
*)CM2
;
198 bcopy((void *) ((uintptr_t)CA1
| (src
& INTEL_OFFMASK
)),
199 (void *) ((uintptr_t)CA2
| (dst
& INTEL_OFFMASK
)), bytes
);
200 if ( (save1
!= *(pt_entry_t
*)CM1
) || (save2
!= *(pt_entry_t
*)CM2
)) panic("bcopy_phys CMAP changed");
201 *(pt_entry_t
*) CM1
= 0;
202 *(pt_entry_t
*) CM2
= 0;
203 mp_enable_preemption();
208 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
216 vm_size_t bytes
) /* num bytes to copy */
218 /* Assume that bcopy copies left-to-right (low addr first). */
219 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
)
220 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
222 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
224 /* to > from: overlapping, and must copy right-to-left. */
234 * Read data from a physical address. Memory should not be cache inhibited.
239 ml_phys_read_data( vm_offset_t paddr
, int size
)
243 mp_disable_preemption();
244 if (*(pt_entry_t
*) CM3
)
245 panic("ml_phys_read_data: CMAP busy");
247 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| (paddr
& PG_FRAME
) | INTEL_PTE_REF
;
248 save
= *(pt_entry_t
*)CM3
;
256 s1
= *(unsigned char *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
260 s2
= *(unsigned short *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
265 result
= *(unsigned int *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
269 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_read_data CMAP changed");
270 *(pt_entry_t
*) CM3
= 0;
271 mp_enable_preemption();
275 static unsigned long long
276 ml_phys_read_long_long( vm_offset_t paddr
)
278 unsigned long long result
;
280 mp_disable_preemption();
281 if (*(pt_entry_t
*) CM3
)
282 panic("ml_phys_read_data: CMAP busy");
284 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| (paddr
& PG_FRAME
) | INTEL_PTE_REF
;
285 save
= *(pt_entry_t
*)CM3
;
288 result
= *(unsigned long long *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
290 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_read_data CMAP changed");
291 *(pt_entry_t
*) CM3
= 0;
292 mp_enable_preemption();
296 unsigned int ml_phys_read( vm_offset_t paddr
)
298 return ml_phys_read_data(paddr
, 4);
301 unsigned int ml_phys_read_word(vm_offset_t paddr
) {
302 return ml_phys_read_data(paddr
, 4);
305 unsigned int ml_phys_read_64(addr64_t paddr64
)
307 return ml_phys_read_data(low32(paddr64
), 4);
310 unsigned int ml_phys_read_word_64(addr64_t paddr64
)
312 return ml_phys_read_data(low32(paddr64
), 4);
315 unsigned int ml_phys_read_half(vm_offset_t paddr
)
317 return ml_phys_read_data(paddr
, 2);
320 unsigned int ml_phys_read_half_64(addr64_t paddr64
)
322 return ml_phys_read_data(low32(paddr64
), 2);
325 unsigned int ml_phys_read_byte(vm_offset_t paddr
)
327 return ml_phys_read_data(paddr
, 1);
330 unsigned int ml_phys_read_byte_64(addr64_t paddr64
)
332 return ml_phys_read_data(low32(paddr64
), 1);
335 unsigned long long ml_phys_read_double(vm_offset_t paddr
)
337 return ml_phys_read_long_long(paddr
);
340 unsigned long long ml_phys_read_double_64(addr64_t paddr
)
342 return ml_phys_read_long_long(low32(paddr
));
347 * Write data to a physical address. Memory should not be cache inhibited.
351 ml_phys_write_data( vm_offset_t paddr
, unsigned long data
, int size
)
354 mp_disable_preemption();
355 if (*(pt_entry_t
*) CM3
)
356 panic("ml_phys_write_data: CMAP busy");
358 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (paddr
& PG_FRAME
) |
359 INTEL_PTE_REF
| INTEL_PTE_MOD
;
360 save
= *(pt_entry_t
*)CM3
;
365 *(unsigned char *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = (unsigned char)data
;
368 *(unsigned short *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = (unsigned short)data
;
372 *(unsigned int *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = data
;
376 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_write_data CMAP changed");
377 *(pt_entry_t
*) CM3
= 0;
378 mp_enable_preemption();
382 ml_phys_write_long_long( vm_offset_t paddr
, unsigned long long data
)
385 mp_disable_preemption();
386 if (*(pt_entry_t
*) CM3
)
387 panic("ml_phys_write_data: CMAP busy");
389 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (paddr
& PG_FRAME
) |
390 INTEL_PTE_REF
| INTEL_PTE_MOD
;
391 save
= *(pt_entry_t
*)CM3
;
394 *(unsigned long long *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = data
;
396 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_write_data CMAP changed");
397 *(pt_entry_t
*) CM3
= 0;
398 mp_enable_preemption();
401 void ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
403 ml_phys_write_data(paddr
, data
, 1);
406 void ml_phys_write_byte_64(addr64_t paddr
, unsigned int data
)
408 ml_phys_write_data(low32(paddr
), data
, 1);
411 void ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
413 ml_phys_write_data(paddr
, data
, 2);
416 void ml_phys_write_half_64(addr64_t paddr
, unsigned int data
)
418 ml_phys_write_data(low32(paddr
), data
, 2);
421 void ml_phys_write(vm_offset_t paddr
, unsigned int data
)
423 ml_phys_write_data(paddr
, data
, 4);
426 void ml_phys_write_64(addr64_t paddr
, unsigned int data
)
428 ml_phys_write_data(low32(paddr
), data
, 4);
431 void ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
433 ml_phys_write_data(paddr
, data
, 4);
436 void ml_phys_write_word_64(addr64_t paddr
, unsigned int data
)
438 ml_phys_write_data(low32(paddr
), data
, 4);
442 void ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
444 ml_phys_write_long_long(paddr
, data
);
447 void ml_phys_write_double_64(addr64_t paddr
, unsigned long long data
)
449 ml_phys_write_long_long(low32(paddr
), data
);
453 /* PCI config cycle probing
456 * Read the memory location at physical address paddr.
457 * This is a part of a device probe, so there is a good chance we will
458 * have a machine check here. So we have to be able to handle that.
459 * We assume that machine checks are enabled both in MSR and HIDs
463 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
465 *val
= ml_phys_read(paddr
);
470 * Read the memory location at physical address paddr.
471 * This is a part of a device probe, so there is a good chance we will
472 * have a machine check here. So we have to be able to handle that.
473 * We assume that machine checks are enabled both in MSR and HIDs
476 ml_probe_read_64(addr64_t paddr
, unsigned int *val
)
478 *val
= ml_phys_read_64(paddr
);
488 const char *a
= (const char *)pa
;
489 const char *b
= (const char *)pb
;
508 const unsigned char *p1
= s1
, *p2
= s2
;
512 return (*--p1
- *--p2
);
520 * strlen returns the number of characters in "string" preceeding
521 * the terminating null character.
526 register const char *string
)
528 register const char *ret
= string
;
530 while (*string
++ != '\0')
532 return string
- 1 - ret
;
535 #include <libkern/OSAtomic.h>
547 newValue
= (oldValue
+ delt
);
548 } while (!OSCompareAndSwap((UInt32
)oldValue
,
549 (UInt32
)newValue
, (UInt32
*)dest
));
564 newValue
= (oldValue
- delt
);
565 } while (!OSCompareAndSwap((UInt32
)oldValue
,
566 (UInt32
)newValue
, (UInt32
*)dest
));
581 newValue
= (oldValue
| mask
);
582 } while (!OSCompareAndSwap((UInt32
)oldValue
,
583 (UInt32
)newValue
, (UInt32
*)dest
));
598 newValue
= (oldValue
& mask
);
599 } while (!OSCompareAndSwap((UInt32
)oldValue
,
600 (UInt32
)newValue
, (UInt32
*)dest
));
606 hw_compare_and_store(
611 return OSCompareAndSwap((UInt32
)oldval
, (UInt32
)newval
, (UInt32
*)dest
);
617 * Machine-dependent routine to fill in an array with up to callstack_max
618 * levels of return pc information.
620 void machine_callstack(
621 __unused natural_t
*buf
,
622 __unused vm_size_t callstack_max
)
626 #endif /* MACH_ASSERT */
631 void fillPage(ppnum_t pa
, unsigned int fill
)
635 int cnt
= PAGE_SIZE
/sizeof(unsigned int);
637 mp_disable_preemption();
638 if (*(pt_entry_t
*) CM2
)
639 panic("fillPage: CMAP busy");
640 src
= (pmap_paddr_t
)i386_ptob(pa
);
641 *(pt_entry_t
*) CM2
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (src
& PG_FRAME
) |
642 INTEL_PTE_REF
| INTEL_PTE_MOD
;
645 for (i
= 0, addr
= (unsigned int *)CA2
; i
< cnt
; i
++ )
648 *(pt_entry_t
*) CM2
= 0;
649 mp_enable_preemption();
652 static inline void __sfence(void)
654 __asm__
volatile("sfence");
656 static inline void __mfence(void)
658 __asm__
volatile("mfence");
660 static inline void __wbinvd(void)
662 __asm__
volatile("wbinvd");
664 static inline void __clflush(void *ptr
)
666 __asm__
volatile(".byte 0x0F; .byte 0xae; .byte 0x38" : : "a" (ptr
));
669 void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
671 if (cpuid_features() & CPUID_FEATURE_CLFSH
)
673 uint32_t linesize
= cpuid_info()->cache_linesize
;
675 uint32_t offset
, chunk
;
678 istate
= ml_set_interrupts_enabled(FALSE
);
680 if (*(pt_entry_t
*) CM2
)
681 panic("cache_flush_page_phys: CMAP busy");
683 offset
= pa
& (linesize
- 1);
686 offset
= addr
& ((addr64_t
) (page_size
- 1));
687 chunk
= page_size
- offset
;
694 *(pt_entry_t
*) CM2
= i386_ptob(atop_64(addr
)) | INTEL_PTE_VALID
;
697 for (; offset
< chunk
; offset
+= linesize
)
698 __clflush((void *)(((u_int
)CA2
) + offset
));
707 *(pt_entry_t
*) CM2
= 0;
709 (void) ml_set_interrupts_enabled(istate
);
716 void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
718 return(dcache_incoherent_io_store64(pa
,count
));
722 flush_dcache64(__unused addr64_t addr
,
723 __unused
unsigned count
,
729 invalidate_icache64(__unused addr64_t addr
,
730 __unused
unsigned count
,
735 kern_return_t
copypv(addr64_t src64
,
743 vm_offset_t source
, sink
;
747 unsigned int lop
, csize
;
748 int needtran
, bothphys
;
752 map
= (which
& cppvKmap
) ? kernel_map
: current_map_fast();
754 source
= low32(src64
);
757 if((which
& (cppvPsrc
| cppvPsnk
)) == 0 ) { /* Make sure that only one is virtual */
758 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
761 bothphys
= 1; /* Assume both are physical */
763 if(!(which
& cppvPsnk
)) { /* Is there a virtual page here? */
764 vaddr
= sink
; /* Sink side is virtual */
765 bothphys
= 0; /* Show both aren't physical */
766 prot
= VM_PROT_READ
| VM_PROT_WRITE
; /* Sink always must be read/write */
767 } else /* if(!(which & cppvPsrc)) */ { /* Source side is virtual */
768 vaddr
= source
; /* Source side is virtual */
769 bothphys
= 0; /* Show both aren't physical */
770 prot
= VM_PROT_READ
; /* Virtual source is always read only */
773 needtran
= 1; /* Show we need to map the virtual the first time */
774 s
= splhigh(); /* Don't bother me */
778 if(!bothphys
&& (needtran
|| !(vaddr
& 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
781 ptep
= pmap_mapgetpte(map
, vaddr
);
782 if((0 == ptep
) || ((*ptep
& INTEL_PTE_VALID
) == 0)) {
783 splx(s
); /* Restore the interrupt level */
784 ret
= vm_fault(map
, vm_map_trunc_page(vaddr
), prot
, FALSE
, THREAD_UNINT
, NULL
, 0); /* Didn't find it, try to fault it in... */
786 if(ret
!= KERN_SUCCESS
)return KERN_FAILURE
; /* Didn't find any, return no good... */
788 s
= splhigh(); /* Don't bother me */
789 continue; /* Go try for the map again... */
793 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
796 if((which
& cppvPsnk
) || (*ptep
& INTEL_PTE_WRITE
)) break; /* We got it mapped R/W or the source is not virtual, leave... */
797 splx(s
); /* Restore the interrupt level */
799 ret
= vm_fault(map
, vm_map_trunc_page(vaddr
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, THREAD_UNINT
, NULL
, 0); /* check for a COW area */
800 if (ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* We couldn't get it R/W, leave in disgrace... */
801 s
= splhigh(); /* Don't bother me */
804 paddr
= pte_to_pa(*ptep
) | (vaddr
& 4095);
806 if(which
& cppvPsrc
) sink
= paddr
; /* If source is physical, then the sink is virtual */
807 else source
= paddr
; /* Otherwise the source is */
810 lop
= (unsigned int)(4096LL - (sink
& 4095LL)); /* Assume sink smallest */
811 if(lop
> (unsigned int)(4096LL - (source
& 4095LL))) lop
= (unsigned int)(4096LL - (source
& 4095LL)); /* No, source is smaller */
813 csize
= size
; /* Assume we can copy it all */
814 if(lop
< size
) csize
= lop
; /* Nope, we can't do it all */
816 if(which
& cppvFsrc
) flush_dcache64((addr64_t
)source
, csize
, 1); /* If requested, flush source before move */
817 if(which
& cppvFsnk
) flush_dcache64((addr64_t
)sink
, csize
, 1); /* If requested, flush sink before move */
819 bcopy_phys((addr64_t
)source
, (addr64_t
)sink
, csize
); /* Do a physical copy, virtually */
821 if(which
& cppvFsrc
) flush_dcache64((addr64_t
)source
, csize
, 1); /* If requested, flush source after move */
822 if(which
& cppvFsnk
) flush_dcache64((addr64_t
)sink
, csize
, 1); /* If requested, flush sink after move */
826 * Note that for certain ram disk flavors, we may be copying outside of known memory.
827 * Therefore, before we try to mark it modifed, we check if it exists.
830 if( !(which
& cppvNoModSnk
)) {
831 if (phys_page_exists((ppnum_t
)sink
>> 12))
832 mapping_set_mod((ppnum_t
)(sink
>> 12)); /* Make sure we know that it is modified */
834 if( !(which
& cppvNoRefSrc
)) {
835 if (phys_page_exists((ppnum_t
)source
>> 12))
836 mapping_set_ref((ppnum_t
)(source
>> 12)); /* Make sure we know that it is modified */
840 size
= size
- csize
; /* Calculate what is left */
841 vaddr
= vaddr
+ csize
; /* Move to next sink address */
842 source
= source
+ csize
; /* Bump source to next physical address */
843 sink
= sink
+ csize
; /* Bump sink to next physical address */
846 splx(s
); /* Open up for interrupts */
851 void switch_to_serial_console(void)
855 addr64_t vm_last_addr
;
858 mapping_set_mod(ppnum_t pn
)
864 mapping_set_ref(ppnum_t pn
)
866 pmap_set_reference(pn
);
870 cache_flush_page_phys(ppnum_t pa
)
874 unsigned int *cacheline_addr
;
875 int cacheline_size
= cpuid_info()->cache_linesize
;
876 int cachelines_in_page
= PAGE_SIZE
/cacheline_size
;
879 * If there's no clflush instruction, we're sadly forced to use wbinvd.
881 if (!(cpuid_features() & CPUID_FEATURE_CLFSH
)) {
882 asm volatile("wbinvd" : : : "memory");
886 istate
= ml_set_interrupts_enabled(FALSE
);
888 if (*(pt_entry_t
*) CM2
)
889 panic("cache_flush_page_phys: CMAP busy");
891 *(pt_entry_t
*) CM2
= i386_ptob(pa
) | INTEL_PTE_VALID
;
894 for (i
= 0, cacheline_addr
= (unsigned int *)CA2
;
895 i
< cachelines_in_page
;
896 i
++, cacheline_addr
+= cacheline_size
) {
897 asm volatile("clflush %0" : : "m" (cacheline_addr
));
900 *(pt_entry_t
*) CM2
= 0;
902 (void) ml_set_interrupts_enabled(istate
);