2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
52 #include <mach_assert.h>
55 #include <mach/boolean.h>
56 #include <mach/i386/vm_types.h>
57 #include <mach/i386/vm_param.h>
58 #include <kern/kern_types.h>
59 #include <kern/misc_protos.h>
60 #include <i386/param.h>
61 #include <i386/misc_protos.h>
62 #include <i386/cpu_data.h>
63 #include <i386/machine_routines.h>
64 #include <i386/cpuid.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_fault.h>
70 /* XXX - should be gone from here */
71 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
72 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
73 extern boolean_t
phys_page_exists(ppnum_t
);
74 extern pt_entry_t
*pmap_mapgetpte(vm_map_t
, vm_offset_t
);
75 extern void bcopy_no_overwrite(const char *from
, char *to
,vm_size_t bytes
);
76 extern void pmap_set_reference(ppnum_t pn
);
77 extern void mapping_set_mod(ppnum_t pa
);
78 extern void mapping_set_ref(ppnum_t pn
);
79 extern void switch_to_serial_console(void);
80 extern kern_return_t
copyp2p(vm_offset_t source
,
83 unsigned int flush_action
);
84 extern void fillPage(ppnum_t pa
, unsigned int fill
);
85 extern void ovbcopy(const char *from
,
88 void machine_callstack(natural_t
*buf
, vm_size_t callstack_max
);
91 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
92 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
100 vm_offset_t src
= low32(src64
);
102 mp_disable_preemption();
103 if (*(pt_entry_t
*) CM2
)
104 panic("bzero_phys: CMAP busy");
106 *(pt_entry_t
*) CM2
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (src
& PG_FRAME
) |
107 INTEL_PTE_REF
| INTEL_PTE_MOD
;
108 save2
=*(pt_entry_t
*)CM2
;
111 bzero((void *)((unsigned int)CA2
| (src
& INTEL_OFFMASK
)), bytes
);
112 if (save2
!= *(pt_entry_t
*)CM2
) panic("bzero_phys CMAP changed");
113 *(pt_entry_t
*) CM2
= 0;
114 mp_enable_preemption();
118 * copy 'size' bytes from physical to physical address
119 * the caller must validate the physical ranges
121 * if flush_action == 0, no cache flush necessary
122 * if flush_action == 1, flush the source
123 * if flush_action == 2, flush the dest
124 * if flush_action == 3, flush both source and dest
128 copyp2p(vm_offset_t source
,
131 unsigned int flush_action
)
134 switch(flush_action
) {
136 flush_dcache(source
, size
, 1);
139 flush_dcache(dest
, size
, 1);
142 flush_dcache(source
, size
, 1);
143 flush_dcache(dest
, size
, 1);
147 bcopy_phys((addr64_t
)source
, (addr64_t
)dest
, (vm_size_t
)size
); /* Do a physical copy */
149 switch(flush_action
) {
151 flush_dcache(source
, size
, 1);
154 flush_dcache(dest
, size
, 1);
157 flush_dcache(source
, size
, 1);
158 flush_dcache(dest
, size
, 1);
166 * bcopy_phys - like bcopy but copies from/to physical addresses.
175 vm_offset_t src
= low32(src64
);
176 vm_offset_t dst
= low32(dst64
);
177 pt_entry_t save1
,save2
;
178 /* ensure we stay within a page */
179 if ( (((src
& (NBPG
-1)) + bytes
) > NBPG
) ||
180 (((dst
& (NBPG
-1)) + bytes
) > NBPG
) ) panic("bcopy_phys");
181 mp_disable_preemption();
182 if (*(pt_entry_t
*) CM1
|| *(pt_entry_t
*) CM2
)
183 panic("bcopy_phys: CMAP busy");
185 *(pt_entry_t
*) CM1
= INTEL_PTE_VALID
| (src
& PG_FRAME
) | INTEL_PTE_REF
;
186 *(pt_entry_t
*) CM2
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (dst
& PG_FRAME
) |
187 INTEL_PTE_REF
| INTEL_PTE_MOD
;
188 save1
= *(pt_entry_t
*)CM1
;save2
= *(pt_entry_t
*)CM2
;
192 bcopy((void *) ((uintptr_t)CA1
| (src
& INTEL_OFFMASK
)),
193 (void *) ((uintptr_t)CA2
| (dst
& INTEL_OFFMASK
)), bytes
);
194 if ( (save1
!= *(pt_entry_t
*)CM1
) || (save2
!= *(pt_entry_t
*)CM2
)) panic("bcopy_phys CMAP changed");
195 *(pt_entry_t
*) CM1
= 0;
196 *(pt_entry_t
*) CM2
= 0;
197 mp_enable_preemption();
202 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
210 vm_size_t bytes
) /* num bytes to copy */
212 /* Assume that bcopy copies left-to-right (low addr first). */
213 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
)
214 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
216 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
218 /* to > from: overlapping, and must copy right-to-left. */
228 * Read data from a physical address. Memory should not be cache inhibited.
233 ml_phys_read_data( vm_offset_t paddr
, int size
)
237 mp_disable_preemption();
238 if (*(pt_entry_t
*) CM3
)
239 panic("ml_phys_read_data: CMAP busy");
241 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| (paddr
& PG_FRAME
) | INTEL_PTE_REF
;
242 save
= *(pt_entry_t
*)CM3
;
250 s1
= *(unsigned char *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
254 s2
= *(unsigned short *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
259 result
= *(unsigned int *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
263 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_read_data CMAP changed");
264 *(pt_entry_t
*) CM3
= 0;
265 mp_enable_preemption();
269 static unsigned long long
270 ml_phys_read_long_long( vm_offset_t paddr
)
272 unsigned long long result
;
274 mp_disable_preemption();
275 if (*(pt_entry_t
*) CM3
)
276 panic("ml_phys_read_data: CMAP busy");
278 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| (paddr
& PG_FRAME
) | INTEL_PTE_REF
;
279 save
= *(pt_entry_t
*)CM3
;
282 result
= *(unsigned long long *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
));
284 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_read_data CMAP changed");
285 *(pt_entry_t
*) CM3
= 0;
286 mp_enable_preemption();
290 unsigned int ml_phys_read( vm_offset_t paddr
)
292 return ml_phys_read_data(paddr
, 4);
295 unsigned int ml_phys_read_word(vm_offset_t paddr
) {
296 return ml_phys_read_data(paddr
, 4);
299 unsigned int ml_phys_read_64(addr64_t paddr64
)
301 return ml_phys_read_data(low32(paddr64
), 4);
304 unsigned int ml_phys_read_word_64(addr64_t paddr64
)
306 return ml_phys_read_data(low32(paddr64
), 4);
309 unsigned int ml_phys_read_half(vm_offset_t paddr
)
311 return ml_phys_read_data(paddr
, 2);
314 unsigned int ml_phys_read_half_64(addr64_t paddr64
)
316 return ml_phys_read_data(low32(paddr64
), 2);
319 unsigned int ml_phys_read_byte(vm_offset_t paddr
)
321 return ml_phys_read_data(paddr
, 1);
324 unsigned int ml_phys_read_byte_64(addr64_t paddr64
)
326 return ml_phys_read_data(low32(paddr64
), 1);
329 unsigned long long ml_phys_read_double(vm_offset_t paddr
)
331 return ml_phys_read_long_long(paddr
);
334 unsigned long long ml_phys_read_double_64(addr64_t paddr
)
336 return ml_phys_read_long_long(low32(paddr
));
341 * Write data to a physical address. Memory should not be cache inhibited.
345 ml_phys_write_data( vm_offset_t paddr
, unsigned long data
, int size
)
348 mp_disable_preemption();
349 if (*(pt_entry_t
*) CM3
)
350 panic("ml_phys_write_data: CMAP busy");
352 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (paddr
& PG_FRAME
) |
353 INTEL_PTE_REF
| INTEL_PTE_MOD
;
354 save
= *(pt_entry_t
*)CM3
;
359 *(unsigned char *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = (unsigned char)data
;
362 *(unsigned short *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = (unsigned short)data
;
366 *(unsigned int *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = data
;
370 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_write_data CMAP changed");
371 *(pt_entry_t
*) CM3
= 0;
372 mp_enable_preemption();
376 ml_phys_write_long_long( vm_offset_t paddr
, unsigned long long data
)
379 mp_disable_preemption();
380 if (*(pt_entry_t
*) CM3
)
381 panic("ml_phys_write_data: CMAP busy");
383 *(pt_entry_t
*) CM3
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (paddr
& PG_FRAME
) |
384 INTEL_PTE_REF
| INTEL_PTE_MOD
;
385 save
= *(pt_entry_t
*)CM3
;
388 *(unsigned long long *)((unsigned int)CA3
| (paddr
& INTEL_OFFMASK
)) = data
;
390 if (save
!= *(pt_entry_t
*)CM3
) panic("ml_phys_write_data CMAP changed");
391 *(pt_entry_t
*) CM3
= 0;
392 mp_enable_preemption();
395 void ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
397 ml_phys_write_data(paddr
, data
, 1);
400 void ml_phys_write_byte_64(addr64_t paddr
, unsigned int data
)
402 ml_phys_write_data(low32(paddr
), data
, 1);
405 void ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
407 ml_phys_write_data(paddr
, data
, 2);
410 void ml_phys_write_half_64(addr64_t paddr
, unsigned int data
)
412 ml_phys_write_data(low32(paddr
), data
, 2);
415 void ml_phys_write(vm_offset_t paddr
, unsigned int data
)
417 ml_phys_write_data(paddr
, data
, 4);
420 void ml_phys_write_64(addr64_t paddr
, unsigned int data
)
422 ml_phys_write_data(low32(paddr
), data
, 4);
425 void ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
427 ml_phys_write_data(paddr
, data
, 4);
430 void ml_phys_write_word_64(addr64_t paddr
, unsigned int data
)
432 ml_phys_write_data(low32(paddr
), data
, 4);
436 void ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
438 ml_phys_write_long_long(paddr
, data
);
441 void ml_phys_write_double_64(addr64_t paddr
, unsigned long long data
)
443 ml_phys_write_long_long(low32(paddr
), data
);
447 /* PCI config cycle probing
450 * Read the memory location at physical address paddr.
451 * This is a part of a device probe, so there is a good chance we will
452 * have a machine check here. So we have to be able to handle that.
453 * We assume that machine checks are enabled both in MSR and HIDs
457 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
459 *val
= ml_phys_read(paddr
);
464 * Read the memory location at physical address paddr.
465 * This is a part of a device probe, so there is a good chance we will
466 * have a machine check here. So we have to be able to handle that.
467 * We assume that machine checks are enabled both in MSR and HIDs
470 ml_probe_read_64(addr64_t paddr
, unsigned int *val
)
472 *val
= ml_phys_read_64(paddr
);
482 const char *a
= (const char *)pa
;
483 const char *b
= (const char *)pb
;
502 const unsigned char *p1
= s1
, *p2
= s2
;
506 return (*--p1
- *--p2
);
514 * strlen returns the number of characters in "string" preceeding
515 * the terminating null character.
520 register const char *string
)
522 register const char *ret
= string
;
524 while (*string
++ != '\0')
526 return string
- 1 - ret
;
529 #include <libkern/OSAtomic.h>
541 newValue
= (oldValue
+ delt
);
542 } while (!OSCompareAndSwap((UInt32
)oldValue
,
543 (UInt32
)newValue
, (UInt32
*)dest
));
558 newValue
= (oldValue
- delt
);
559 } while (!OSCompareAndSwap((UInt32
)oldValue
,
560 (UInt32
)newValue
, (UInt32
*)dest
));
575 newValue
= (oldValue
| mask
);
576 } while (!OSCompareAndSwap((UInt32
)oldValue
,
577 (UInt32
)newValue
, (UInt32
*)dest
));
592 newValue
= (oldValue
& mask
);
593 } while (!OSCompareAndSwap((UInt32
)oldValue
,
594 (UInt32
)newValue
, (UInt32
*)dest
));
600 hw_compare_and_store(
605 return OSCompareAndSwap((UInt32
)oldval
, (UInt32
)newval
, (UInt32
*)dest
);
611 * Machine-dependent routine to fill in an array with up to callstack_max
612 * levels of return pc information.
614 void machine_callstack(
615 __unused natural_t
*buf
,
616 __unused vm_size_t callstack_max
)
620 #endif /* MACH_ASSERT */
625 void fillPage(ppnum_t pa
, unsigned int fill
)
629 int cnt
= PAGE_SIZE
/sizeof(unsigned int);
631 mp_disable_preemption();
632 if (*(pt_entry_t
*) CM2
)
633 panic("fillPage: CMAP busy");
634 src
= (pmap_paddr_t
)i386_ptob(pa
);
635 *(pt_entry_t
*) CM2
= INTEL_PTE_VALID
| INTEL_PTE_RW
| (src
& PG_FRAME
) |
636 INTEL_PTE_REF
| INTEL_PTE_MOD
;
639 for (i
= 0, addr
= (unsigned int *)CA2
; i
< cnt
; i
++ )
642 *(pt_entry_t
*) CM2
= 0;
643 mp_enable_preemption();
646 static inline void __sfence(void)
648 __asm__
volatile("sfence");
650 static inline void __mfence(void)
652 __asm__
volatile("mfence");
654 static inline void __wbinvd(void)
656 __asm__
volatile("wbinvd");
658 static inline void __clflush(void *ptr
)
660 __asm__
volatile(".byte 0x0F; .byte 0xae; .byte 0x38" : : "a" (ptr
));
663 void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
665 if (cpuid_features() & CPUID_FEATURE_CLFSH
)
667 uint32_t linesize
= cpuid_info()->cache_linesize
;
669 uint32_t offset
, chunk
;
672 istate
= ml_set_interrupts_enabled(FALSE
);
674 if (*(pt_entry_t
*) CM2
)
675 panic("cache_flush_page_phys: CMAP busy");
677 offset
= pa
& (linesize
- 1);
680 offset
= addr
& ((addr64_t
) (page_size
- 1));
681 chunk
= page_size
- offset
;
688 *(pt_entry_t
*) CM2
= i386_ptob(atop_64(addr
)) | INTEL_PTE_VALID
;
691 for (; offset
< chunk
; offset
+= linesize
)
692 __clflush((void *)(((u_int
)CA2
) + offset
));
701 *(pt_entry_t
*) CM2
= 0;
703 (void) ml_set_interrupts_enabled(istate
);
710 void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
712 return(dcache_incoherent_io_store64(pa
,count
));
716 flush_dcache64(__unused addr64_t addr
,
717 __unused
unsigned count
,
723 invalidate_icache64(__unused addr64_t addr
,
724 __unused
unsigned count
,
729 kern_return_t
copypv(addr64_t src64
,
737 vm_offset_t source
, sink
;
741 unsigned int lop
, csize
;
742 int needtran
, bothphys
;
746 map
= (which
& cppvKmap
) ? kernel_map
: current_map_fast();
748 source
= low32(src64
);
751 if((which
& (cppvPsrc
| cppvPsnk
)) == 0 ) { /* Make sure that only one is virtual */
752 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
755 bothphys
= 1; /* Assume both are physical */
757 if(!(which
& cppvPsnk
)) { /* Is there a virtual page here? */
758 vaddr
= sink
; /* Sink side is virtual */
759 bothphys
= 0; /* Show both aren't physical */
760 prot
= VM_PROT_READ
| VM_PROT_WRITE
; /* Sink always must be read/write */
761 } else /* if(!(which & cppvPsrc)) */ { /* Source side is virtual */
762 vaddr
= source
; /* Source side is virtual */
763 bothphys
= 0; /* Show both aren't physical */
764 prot
= VM_PROT_READ
; /* Virtual source is always read only */
767 needtran
= 1; /* Show we need to map the virtual the first time */
768 s
= splhigh(); /* Don't bother me */
772 if(!bothphys
&& (needtran
|| !(vaddr
& 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
775 ptep
= pmap_mapgetpte(map
, vaddr
);
776 if((0 == ptep
) || ((*ptep
& INTEL_PTE_VALID
) == 0)) {
777 splx(s
); /* Restore the interrupt level */
778 ret
= vm_fault(map
, vm_map_trunc_page(vaddr
), prot
, FALSE
, THREAD_UNINT
, NULL
, 0); /* Didn't find it, try to fault it in... */
780 if(ret
!= KERN_SUCCESS
)return KERN_FAILURE
; /* Didn't find any, return no good... */
782 s
= splhigh(); /* Don't bother me */
783 continue; /* Go try for the map again... */
787 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
790 if((which
& cppvPsnk
) || (*ptep
& INTEL_PTE_WRITE
)) break; /* We got it mapped R/W or the source is not virtual, leave... */
791 splx(s
); /* Restore the interrupt level */
793 ret
= vm_fault(map
, vm_map_trunc_page(vaddr
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, THREAD_UNINT
, NULL
, 0); /* check for a COW area */
794 if (ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* We couldn't get it R/W, leave in disgrace... */
795 s
= splhigh(); /* Don't bother me */
798 paddr
= pte_to_pa(*ptep
) | (vaddr
& 4095);
800 if(which
& cppvPsrc
) sink
= paddr
; /* If source is physical, then the sink is virtual */
801 else source
= paddr
; /* Otherwise the source is */
804 lop
= (unsigned int)(4096LL - (sink
& 4095LL)); /* Assume sink smallest */
805 if(lop
> (unsigned int)(4096LL - (source
& 4095LL))) lop
= (unsigned int)(4096LL - (source
& 4095LL)); /* No, source is smaller */
807 csize
= size
; /* Assume we can copy it all */
808 if(lop
< size
) csize
= lop
; /* Nope, we can't do it all */
810 if(which
& cppvFsrc
) flush_dcache64((addr64_t
)source
, csize
, 1); /* If requested, flush source before move */
811 if(which
& cppvFsnk
) flush_dcache64((addr64_t
)sink
, csize
, 1); /* If requested, flush sink before move */
813 bcopy_phys((addr64_t
)source
, (addr64_t
)sink
, csize
); /* Do a physical copy, virtually */
815 if(which
& cppvFsrc
) flush_dcache64((addr64_t
)source
, csize
, 1); /* If requested, flush source after move */
816 if(which
& cppvFsnk
) flush_dcache64((addr64_t
)sink
, csize
, 1); /* If requested, flush sink after move */
820 * Note that for certain ram disk flavors, we may be copying outside of known memory.
821 * Therefore, before we try to mark it modifed, we check if it exists.
824 if( !(which
& cppvNoModSnk
)) {
825 if (phys_page_exists((ppnum_t
)sink
>> 12))
826 mapping_set_mod((ppnum_t
)(sink
>> 12)); /* Make sure we know that it is modified */
828 if( !(which
& cppvNoRefSrc
)) {
829 if (phys_page_exists((ppnum_t
)source
>> 12))
830 mapping_set_ref((ppnum_t
)(source
>> 12)); /* Make sure we know that it is modified */
834 size
= size
- csize
; /* Calculate what is left */
835 vaddr
= vaddr
+ csize
; /* Move to next sink address */
836 source
= source
+ csize
; /* Bump source to next physical address */
837 sink
= sink
+ csize
; /* Bump sink to next physical address */
840 splx(s
); /* Open up for interrupts */
845 void switch_to_serial_console(void)
849 addr64_t vm_last_addr
;
852 mapping_set_mod(ppnum_t pn
)
858 mapping_set_ref(ppnum_t pn
)
860 pmap_set_reference(pn
);
864 cache_flush_page_phys(ppnum_t pa
)
868 unsigned int *cacheline_addr
;
869 int cacheline_size
= cpuid_info()->cache_linesize
;
870 int cachelines_in_page
= PAGE_SIZE
/cacheline_size
;
873 * If there's no clflush instruction, we're sadly forced to use wbinvd.
875 if (!(cpuid_features() & CPUID_FEATURE_CLFSH
)) {
876 asm volatile("wbinvd" : : : "memory");
880 istate
= ml_set_interrupts_enabled(FALSE
);
882 if (*(pt_entry_t
*) CM2
)
883 panic("cache_flush_page_phys: CMAP busy");
885 *(pt_entry_t
*) CM2
= i386_ptob(pa
) | INTEL_PTE_VALID
;
888 for (i
= 0, cacheline_addr
= (unsigned int *)CA2
;
889 i
< cachelines_in_page
;
890 i
++, cacheline_addr
+= cacheline_size
) {
891 asm volatile("clflush %0" : : "m" (cacheline_addr
));
894 *(pt_entry_t
*) CM2
= 0;
896 (void) ml_set_interrupts_enabled(istate
);