2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
29 * All Rights Reserved.
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * Carnegie Mellon requests users of this software to return to
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
53 #include <mach_assert.h>
56 #include <mach/boolean.h>
57 #include <mach/i386/vm_types.h>
58 #include <mach/i386/vm_param.h>
59 #include <kern/kern_types.h>
60 #include <kern/misc_protos.h>
61 #include <i386/param.h>
62 #include <i386/misc_protos.h>
63 #include <i386/cpu_data.h>
64 #include <i386/machine_routines.h>
65 #include <i386/cpuid.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_fault.h>
71 /* XXX - should be gone from here */
72 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
73 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
74 extern boolean_t
phys_page_exists(ppnum_t
);
75 extern pt_entry_t
*pmap_mapgetpte(vm_map_t
, vm_offset_t
);
76 extern void bcopy_no_overwrite(const char *from
, char *to
,vm_size_t bytes
);
77 extern void pmap_set_reference(ppnum_t pn
);
78 extern void mapping_set_mod(ppnum_t pa
);
79 extern void mapping_set_ref(ppnum_t pn
);
80 extern void switch_to_serial_console(void);
81 extern kern_return_t
copyp2p(vm_offset_t source
,
84 unsigned int flush_action
);
85 extern void fillPage(ppnum_t pa
, unsigned int fill
);
86 extern void ovbcopy(const char *from
,
89 void machine_callstack(natural_t
*buf
, vm_size_t callstack_max
);
92 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
93 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
101 vm_offset_t src
= low32(src64
);
103 mp_disable_preemption();
104 if (*(pt_entry_t
*) CM2
)
105 panic("bzero_phys: CMAP busy");
107 *(pt_entry_t
*) CM2
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (src
& PG_FRAME
) |
108 INTEL_PTE_REF
| INTEL_PTE_MOD
;
109 save2
=*(pt_entry_t
*)CM2
;
112 bzero((void *)((unsigned int)CA2
| (src
& INTEL_OFFMASK
)), bytes
);
113 if (save2
!= *(pt_entry_t
*)CM2
) panic("bzero_phys CMAP changed");
114 *(pt_entry_t
*) CM2
= 0;
115 mp_enable_preemption();
119 * copy 'size' bytes from physical to physical address
120 * the caller must validate the physical ranges
122 * if flush_action == 0, no cache flush necessary
123 * if flush_action == 1, flush the source
124 * if flush_action == 2, flush the dest
125 * if flush_action == 3, flush both source and dest
129 copyp2p(vm_offset_t source
,
132 unsigned int flush_action
)
135 switch(flush_action
) {
137 flush_dcache(source
, size
, 1);
140 flush_dcache(dest
, size
, 1);
143 flush_dcache(source
, size
, 1);
144 flush_dcache(dest
, size
, 1);
148 bcopy_phys((addr64_t
)source
, (addr64_t
)dest
, (vm_size_t
)size
); /* Do a physical copy */
150 switch(flush_action
) {
152 flush_dcache(source
, size
, 1);
155 flush_dcache(dest
, size
, 1);
158 flush_dcache(source
, size
, 1);
159 flush_dcache(dest
, size
, 1);
167 * bcopy_phys - like bcopy but copies from/to physical addresses.
176 vm_offset_t src
= low32(src64
);
177 vm_offset_t dst
= low32(dst64
);
178 pt_entry_t save1
,save2
;
179 /* ensure we stay within a page */
180 if ( (((src
& (NBPG
-1)) + bytes
) > NBPG
) ||
181 (((dst
& (NBPG
-1)) + bytes
) > NBPG
) ) panic("bcopy_phys");
182 mp_disable_preemption();
183 if (*(pt_entry_t
*) CM1
|| *(pt_entry_t
*) CM2
)
184 panic("bcopy_phys: CMAP busy");
186 *(pt_entry_t
*) CM1
= INTEL_PTE_VALID
| (src
& PG_FRAME
) | INTEL_PTE_REF
;
187 *(pt_entry_t
*) CM2
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (dst
& PG_FRAME
) |
188 INTEL_PTE_REF
| INTEL_PTE_MOD
;
189 save1
= *(pt_entry_t
*)CM1
;save2
= *(pt_entry_t
*)CM2
;
193 bcopy((void *) ((uintptr_t)CA1
| (src
& INTEL_OFFMASK
)),
194 (void *) ((uintptr_t)CA2
| (dst
& INTEL_OFFMASK
)), bytes
);
195 if ( (save1
!= *(pt_entry_t
*)CM1
) || (save2
!= *(pt_entry_t
*)CM2
)) panic("bcopy_phys CMAP changed");
196 *(pt_entry_t
*) CM1
= 0;
197 *(pt_entry_t
*) CM2
= 0;
198 mp_enable_preemption();
203 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
211 vm_size_t bytes
) /* num bytes to copy */
213 /* Assume that bcopy copies left-to-right (low addr first). */
214 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
)
215 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
217 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
219 /* to > from: overlapping, and must copy right-to-left. */
229 * Read data from a physical address. Memory should not be cache inhibited.
234 ml_phys_read_data( vm_offset_t paddr
, int size
)
238 mp_disable_preemption();
239 if (*(pt_entry_t
*) CM3
)
240 panic("ml_phys_read_data: CMAP busy");
242 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| (paddr
& PG_FRAME
) | INTEL_PTE_REF
;
243 save
= *(pt_entry_t
*)CM3
;
251 s1
= *(unsigned char *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
255 s2
= *(unsigned short *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
260 result
= *(unsigned int *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
264 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_read_data CMAP changed");
265 *(pt_entry_t
*) CM3
= 0;
266 mp_enable_preemption();
270 static unsigned long long
271 ml_phys_read_long_long( vm_offset_t paddr
)
273 unsigned long long result
;
275 mp_disable_preemption();
276 if (*(pt_entry_t
*) CM3
)
277 panic("ml_phys_read_data: CMAP busy");
279 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| (paddr
& PG_FRAME
) | INTEL_PTE_REF
;
280 save
= *(pt_entry_t
*)CM3
;
283 result
= *(unsigned long long *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
285 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_read_data CMAP changed");
286 *(pt_entry_t
*) CM3
= 0;
287 mp_enable_preemption();
291 unsigned int ml_phys_read( vm_offset_t paddr
)
293 return ml_phys_read_data(paddr
, 4);
296 unsigned int ml_phys_read_word(vm_offset_t paddr
) {
297 return ml_phys_read_data(paddr
, 4);
300 unsigned int ml_phys_read_64(addr64_t paddr64
)
302 return ml_phys_read_data(low32(paddr64
), 4);
305 unsigned int ml_phys_read_word_64(addr64_t paddr64
)
307 return ml_phys_read_data(low32(paddr64
), 4);
310 unsigned int ml_phys_read_half(vm_offset_t paddr
)
312 return ml_phys_read_data(paddr
, 2);
315 unsigned int ml_phys_read_half_64(addr64_t paddr64
)
317 return ml_phys_read_data(low32(paddr64
), 2);
320 unsigned int ml_phys_read_byte(vm_offset_t paddr
)
322 return ml_phys_read_data(paddr
, 1);
325 unsigned int ml_phys_read_byte_64(addr64_t paddr64
)
327 return ml_phys_read_data(low32(paddr64
), 1);
330 unsigned long long ml_phys_read_double(vm_offset_t paddr
)
332 return ml_phys_read_long_long(paddr
);
335 unsigned long long ml_phys_read_double_64(addr64_t paddr
)
337 return ml_phys_read_long_long(low32(paddr
));
342 * Write data to a physical address. Memory should not be cache inhibited.
346 ml_phys_write_data( vm_offset_t paddr
, unsigned long data
, int size
)
349 mp_disable_preemption();
350 if (*(pt_entry_t
*) CM3
)
351 panic("ml_phys_write_data: CMAP busy");
353 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (paddr
& PG_FRAME
) |
354 INTEL_PTE_REF
| INTEL_PTE_MOD
;
355 save
= *(pt_entry_t
*)CM3
;
360 *(unsigned char *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = (unsigned char)data
;
363 *(unsigned short *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = (unsigned short)data
;
367 *(unsigned int *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = data
;
371 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_write_data CMAP changed");
372 *(pt_entry_t
*) CM3
= 0;
373 mp_enable_preemption();
377 ml_phys_write_long_long( vm_offset_t paddr
, unsigned long long data
)
380 mp_disable_preemption();
381 if (*(pt_entry_t
*) CM3
)
382 panic("ml_phys_write_data: CMAP busy");
384 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (paddr
& PG_FRAME
) |
385 INTEL_PTE_REF
| INTEL_PTE_MOD
;
386 save
= *(pt_entry_t
*)CM3
;
389 *(unsigned long long *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = data
;
391 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_write_data CMAP changed");
392 *(pt_entry_t
*) CM3
= 0;
393 mp_enable_preemption();
396 void ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
398 ml_phys_write_data(paddr
, data
, 1);
401 void ml_phys_write_byte_64(addr64_t paddr
, unsigned int data
)
403 ml_phys_write_data(low32(paddr
), data
, 1);
406 void ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
408 ml_phys_write_data(paddr
, data
, 2);
411 void ml_phys_write_half_64(addr64_t paddr
, unsigned int data
)
413 ml_phys_write_data(low32(paddr
), data
, 2);
416 void ml_phys_write(vm_offset_t paddr
, unsigned int data
)
418 ml_phys_write_data(paddr
, data
, 4);
421 void ml_phys_write_64(addr64_t paddr
, unsigned int data
)
423 ml_phys_write_data(low32(paddr
), data
, 4);
426 void ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
428 ml_phys_write_data(paddr
, data
, 4);
431 void ml_phys_write_word_64(addr64_t paddr
, unsigned int data
)
433 ml_phys_write_data(low32(paddr
), data
, 4);
437 void ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
439 ml_phys_write_long_long(paddr
, data
);
442 void ml_phys_write_double_64(addr64_t paddr
, unsigned long long data
)
444 ml_phys_write_long_long(low32(paddr
), data
);
448 /* PCI config cycle probing
451 * Read the memory location at physical address paddr.
452 * This is a part of a device probe, so there is a good chance we will
453 * have a machine check here. So we have to be able to handle that.
454 * We assume that machine checks are enabled both in MSR and HIDs
458 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
460 *val
= ml_phys_read(paddr
);
465 * Read the memory location at physical address paddr.
466 * This is a part of a device probe, so there is a good chance we will
467 * have a machine check here. So we have to be able to handle that.
468 * We assume that machine checks are enabled both in MSR and HIDs
471 ml_probe_read_64(addr64_t paddr
, unsigned int *val
)
473 *val
= ml_phys_read_64(paddr
);
483 const char *a
= (const char *)pa
;
484 const char *b
= (const char *)pb
;
503 const unsigned char *p1
= s1
, *p2
= s2
;
507 return (*--p1
- *--p2
);
515 * strlen returns the number of characters in "string" preceeding
516 * the terminating null character.
521 register const char *string
)
523 register const char *ret
= string
;
525 while (*string
++ != '\0')
527 return string
- 1 - ret
;
530 #include <libkern/OSAtomic.h>
542 newValue
= (oldValue
+ delt
);
543 } while (!OSCompareAndSwap((UInt32
)oldValue
,
544 (UInt32
)newValue
, (UInt32
*)dest
));
559 newValue
= (oldValue
- delt
);
560 } while (!OSCompareAndSwap((UInt32
)oldValue
,
561 (UInt32
)newValue
, (UInt32
*)dest
));
576 newValue
= (oldValue
| mask
);
577 } while (!OSCompareAndSwap((UInt32
)oldValue
,
578 (UInt32
)newValue
, (UInt32
*)dest
));
593 newValue
= (oldValue
& mask
);
594 } while (!OSCompareAndSwap((UInt32
)oldValue
,
595 (UInt32
)newValue
, (UInt32
*)dest
));
601 hw_compare_and_store(
606 return OSCompareAndSwap((UInt32
)oldval
, (UInt32
)newval
, (UInt32
*)dest
);
612 * Machine-dependent routine to fill in an array with up to callstack_max
613 * levels of return pc information.
615 void machine_callstack(
616 __unused natural_t
*buf
,
617 __unused vm_size_t callstack_max
)
621 #endif /* MACH_ASSERT */
626 void fillPage(ppnum_t pa
, unsigned int fill
)
630 int cnt
= PAGE_SIZE
/sizeof(unsigned int);
632 mp_disable_preemption();
633 if (*(pt_entry_t
*) CM2
)
634 panic("fillPage: CMAP busy");
635 src
= (pmap_paddr_t
)i386_ptob(pa
);
636 *(pt_entry_t
*) CM2
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (src
& PG_FRAME
) |
637 INTEL_PTE_REF
| INTEL_PTE_MOD
;
640 for (i
= 0, addr
= (unsigned int *)CA2
; i
< cnt
; i
++ )
643 *(pt_entry_t
*) CM2
= 0;
644 mp_enable_preemption();
647 static inline void __sfence(void)
649 __asm__
volatile("sfence");
651 static inline void __mfence(void)
653 __asm__
volatile("mfence");
655 static inline void __wbinvd(void)
657 __asm__
volatile("wbinvd");
659 static inline void __clflush(void *ptr
)
661 __asm__
volatile(".byte 0x0F; .byte 0xae; .byte 0x38" : : "a" (ptr
));
664 void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
666 if (cpuid_features() & CPUID_FEATURE_CLFSH
)
668 uint32_t linesize
= cpuid_info()->cache_linesize
;
670 uint32_t offset
, chunk
;
673 istate
= ml_set_interrupts_enabled(FALSE
);
675 if (*(pt_entry_t
*) CM2
)
676 panic("cache_flush_page_phys: CMAP busy");
678 offset
= pa
& (linesize
- 1);
681 offset
= addr
& ((addr64_t
) (page_size
- 1));
682 chunk
= page_size
- offset
;
689 *(pt_entry_t
*) CM2
= i386_ptob(atop_64(addr
)) | INTEL_PTE_VALID
;
692 for (; offset
< chunk
; offset
+= linesize
)
693 __clflush((void *)(((u_int
)CA2
) + offset
));
702 *(pt_entry_t
*) CM2
= 0;
704 (void) ml_set_interrupts_enabled(istate
);
711 void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
713 return(dcache_incoherent_io_store64(pa
,count
));
717 flush_dcache64(__unused addr64_t addr
,
718 __unused
unsigned count
,
724 invalidate_icache64(__unused addr64_t addr
,
725 __unused
unsigned count
,
730 kern_return_t
copypv(addr64_t src64
,
738 vm_offset_t source
, sink
;
742 unsigned int lop
, csize
;
743 int needtran
, bothphys
;
747 map
= (which
& cppvKmap
) ? kernel_map
: current_map_fast();
749 source
= low32(src64
);
752 if((which
& (cppvPsrc
| cppvPsnk
)) == 0 ) { /* Make sure that only one is virtual */
753 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
756 bothphys
= 1; /* Assume both are physical */
758 if(!(which
& cppvPsnk
)) { /* Is there a virtual page here? */
759 vaddr
= sink
; /* Sink side is virtual */
760 bothphys
= 0; /* Show both aren't physical */
761 prot
= VM_PROT_READ
| VM_PROT_WRITE
; /* Sink always must be read/write */
762 } else /* if(!(which & cppvPsrc)) */ { /* Source side is virtual */
763 vaddr
= source
; /* Source side is virtual */
764 bothphys
= 0; /* Show both aren't physical */
765 prot
= VM_PROT_READ
; /* Virtual source is always read only */
768 needtran
= 1; /* Show we need to map the virtual the first time */
769 s
= splhigh(); /* Don't bother me */
773 if(!bothphys
&& (needtran
|| !(vaddr
& 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
776 ptep
= pmap_mapgetpte(map
, vaddr
);
777 if((0 == ptep
) || ((*ptep
& INTEL_PTE_VALID
) == 0)) {
778 splx(s
); /* Restore the interrupt level */
779 ret
= vm_fault(map
, vm_map_trunc_page(vaddr
), prot
, FALSE
, THREAD_UNINT
, NULL
, 0); /* Didn't find it, try to fault it in... */
781 if(ret
!= KERN_SUCCESS
)return KERN_FAILURE
; /* Didn't find any, return no good... */
783 s
= splhigh(); /* Don't bother me */
784 continue; /* Go try for the map again... */
788 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
791 if((which
& cppvPsnk
) || (*ptep
& INTEL_PTE_WRITE
)) break; /* We got it mapped R/W or the source is not virtual, leave... */
792 splx(s
); /* Restore the interrupt level */
794 ret
= vm_fault(map
, vm_map_trunc_page(vaddr
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, THREAD_UNINT
, NULL
, 0); /* check for a COW area */
795 if (ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* We couldn't get it R/W, leave in disgrace... */
796 s
= splhigh(); /* Don't bother me */
799 paddr
= pte_to_pa(*ptep
) | (vaddr
& 4095);
801 if(which
& cppvPsrc
) sink
= paddr
; /* If source is physical, then the sink is virtual */
802 else source
= paddr
; /* Otherwise the source is */
805 lop
= (unsigned int)(4096LL - (sink
& 4095LL)); /* Assume sink smallest */
806 if(lop
> (unsigned int)(4096LL - (source
& 4095LL))) lop
= (unsigned int)(4096LL - (source
& 4095LL)); /* No, source is smaller */
808 csize
= size
; /* Assume we can copy it all */
809 if(lop
< size
) csize
= lop
; /* Nope, we can't do it all */
811 if(which
& cppvFsrc
) flush_dcache64((addr64_t
)source
, csize
, 1); /* If requested, flush source before move */
812 if(which
& cppvFsnk
) flush_dcache64((addr64_t
)sink
, csize
, 1); /* If requested, flush sink before move */
814 bcopy_phys((addr64_t
)source
, (addr64_t
)sink
, csize
); /* Do a physical copy, virtually */
816 if(which
& cppvFsrc
) flush_dcache64((addr64_t
)source
, csize
, 1); /* If requested, flush source after move */
817 if(which
& cppvFsnk
) flush_dcache64((addr64_t
)sink
, csize
, 1); /* If requested, flush sink after move */
821 * Note that for certain ram disk flavors, we may be copying outside of known memory.
822 * Therefore, before we try to mark it modifed, we check if it exists.
825 if( !(which
& cppvNoModSnk
)) {
826 if (phys_page_exists((ppnum_t
)sink
>> 12))
827 mapping_set_mod((ppnum_t
)(sink
>> 12)); /* Make sure we know that it is modified */
829 if( !(which
& cppvNoRefSrc
)) {
830 if (phys_page_exists((ppnum_t
)source
>> 12))
831 mapping_set_ref((ppnum_t
)(source
>> 12)); /* Make sure we know that it is modified */
835 size
= size
- csize
; /* Calculate what is left */
836 vaddr
= vaddr
+ csize
; /* Move to next sink address */
837 source
= source
+ csize
; /* Bump source to next physical address */
838 sink
= sink
+ csize
; /* Bump sink to next physical address */
841 splx(s
); /* Open up for interrupts */
846 void switch_to_serial_console(void)
850 addr64_t vm_last_addr
;
853 mapping_set_mod(ppnum_t pn
)
859 mapping_set_ref(ppnum_t pn
)
861 pmap_set_reference(pn
);
865 cache_flush_page_phys(ppnum_t pa
)
869 unsigned int *cacheline_addr
;
870 int cacheline_size
= cpuid_info()->cache_linesize
;
871 int cachelines_in_page
= PAGE_SIZE
/cacheline_size
;
874 * If there's no clflush instruction, we're sadly forced to use wbinvd.
876 if (!(cpuid_features() & CPUID_FEATURE_CLFSH
)) {
877 asm volatile("wbinvd" : : : "memory");
881 istate
= ml_set_interrupts_enabled(FALSE
);
883 if (*(pt_entry_t
*) CM2
)
884 panic("cache_flush_page_phys: CMAP busy");
886 *(pt_entry_t
*) CM2
= i386_ptob(pa
) | INTEL_PTE_VALID
;
889 for (i
= 0, cacheline_addr
= (unsigned int *)CA2
;
890 i
< cachelines_in_page
;
891 i
++, cacheline_addr
+= cacheline_size
) {
892 asm volatile("clflush %0" : : "m" (cacheline_addr
));
895 *(pt_entry_t
*) CM2
= 0;
897 (void) ml_set_interrupts_enabled(istate
);