+
+
+
+
+void fillPage(ppnum_t pa, unsigned int fill)
+{
+ pmap_paddr_t src;
+ int i;
+ int cnt = PAGE_SIZE/sizeof(unsigned int);
+ unsigned int *addr;
+ mp_disable_preemption();
+ if (*(pt_entry_t *) CM2)
+ panic("fillPage: CMAP busy");
+ src = (pmap_paddr_t)i386_ptob(pa);
+ *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
+ INTEL_PTE_REF | INTEL_PTE_MOD;
+ invlpg((u_int)CA2);
+
+ for (i = 0, addr = (unsigned int *)CA2; i < cnt ; i++ )
+ *addr++ = fill;
+
+ *(pt_entry_t *) CM2 = 0;
+ mp_enable_preemption();
+}
+
+static inline void __sfence(void)
+{
+ __asm__ volatile("sfence");
+}
+static inline void __mfence(void)
+{
+ __asm__ volatile("mfence");
+}
+static inline void __wbinvd(void)
+{
+ __asm__ volatile("wbinvd");
+}
+static inline void __clflush(void *ptr)
+{
+ __asm__ volatile(".byte 0x0F; .byte 0xae; .byte 0x38" : : "a" (ptr));
+}
+
+void dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
+{
+ if (cpuid_features() & CPUID_FEATURE_CLFSH)
+ {
+ uint32_t linesize = cpuid_info()->cache_linesize;
+ addr64_t addr;
+ uint32_t offset, chunk;
+ boolean_t istate;
+
+ istate = ml_set_interrupts_enabled(FALSE);
+
+ if (*(pt_entry_t *) CM2)
+ panic("cache_flush_page_phys: CMAP busy");
+
+ offset = pa & (linesize - 1);
+ count += offset;
+ addr = pa - offset;
+ offset = addr & ((addr64_t) (page_size - 1));
+ chunk = page_size - offset;
+
+ do
+ {
+ if (chunk > count)
+ chunk = count;
+
+ *(pt_entry_t *) CM2 = i386_ptob(atop_64(addr)) | INTEL_PTE_VALID;
+ invlpg((u_int)CA2);
+
+ for (; offset < chunk; offset += linesize)
+ __clflush((void *)(((u_int)CA2) + offset));
+
+ count -= chunk;
+ addr += chunk;
+ chunk = page_size;
+ offset = 0;
+ }
+ while (count);
+
+ *(pt_entry_t *) CM2 = 0;
+
+ (void) ml_set_interrupts_enabled(istate);
+ }
+ else
+ __wbinvd();
+ __sfence();
+}
+
+void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
+{
+ return(dcache_incoherent_io_store64(pa,count));
+}
+
+void
+flush_dcache64(__unused addr64_t addr,
+ __unused unsigned count,
+ __unused int phys)
+{
+}
+
+void
+invalidate_icache64(__unused addr64_t addr,
+ __unused unsigned count,
+ __unused int phys)
+{
+}
+
+kern_return_t copypv(addr64_t src64,
+ addr64_t snk64,
+ unsigned int size,
+ int which)
+{
+
+ vm_map_t map;
+ kern_return_t ret;
+ vm_offset_t source, sink;
+ vm_offset_t vaddr;
+ vm_offset_t paddr;
+ spl_t s;
+ unsigned int lop, csize;
+ int needtran, bothphys;
+ vm_prot_t prot;
+ pt_entry_t *ptep;
+
+ map = (which & cppvKmap) ? kernel_map : current_map_fast();
+
+ source = low32(src64);
+ sink = low32(snk64);
+
+ if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */
+ panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
+ }
+
+ bothphys = 1; /* Assume both are physical */
+
+ if(!(which & cppvPsnk)) { /* Is there a virtual page here? */
+ vaddr = sink; /* Sink side is virtual */
+ bothphys = 0; /* Show both aren't physical */
+ prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */
+ } else /* if(!(which & cppvPsrc)) */ { /* Source side is virtual */
+ vaddr = source; /* Source side is virtual */
+ bothphys = 0; /* Show both aren't physical */
+ prot = VM_PROT_READ; /* Virtual source is always read only */
+ }
+
+ needtran = 1; /* Show we need to map the virtual the first time */
+ s = splhigh(); /* Don't bother me */
+
+ while(size) {
+
+ if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
+ needtran = 0;
+ while(1) {
+ ptep = pmap_mapgetpte(map, vaddr);
+ if((0 == ptep) || ((*ptep & INTEL_PTE_VALID) == 0)) {
+ splx(s); /* Restore the interrupt level */
+ ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0); /* Didn't find it, try to fault it in... */
+
+ if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */
+
+ s = splhigh(); /* Don't bother me */
+ continue; /* Go try for the map again... */
+
+ }
+
+ /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
+ we can just leave.
+ */
+ if((which & cppvPsnk) || (*ptep & INTEL_PTE_WRITE)) break; /* We got it mapped R/W or the source is not virtual, leave... */
+ splx(s); /* Restore the interrupt level */
+
+ ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0); /* check for a COW area */
+ if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */
+ s = splhigh(); /* Don't bother me */
+ }
+
+ paddr = pte_to_pa(*ptep) | (vaddr & 4095);
+
+ if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */
+ else source = paddr; /* Otherwise the source is */
+ }
+
+ lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */
+ if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */
+
+ csize = size; /* Assume we can copy it all */
+ if(lop < size) csize = lop; /* Nope, we can't do it all */
+
+ if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source before move */
+ if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink before move */
+
+ bcopy_phys((addr64_t)source, (addr64_t)sink, csize); /* Do a physical copy, virtually */
+
+ if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source after move */
+ if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink after move */
+
+
+/*
+ * Note that for certain ram disk flavors, we may be copying outside of known memory.
+ * Therefore, before we try to mark it modifed, we check if it exists.
+ */
+
+ if( !(which & cppvNoModSnk)) {
+ if (phys_page_exists((ppnum_t)sink >> 12))
+ mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */
+ }
+ if( !(which & cppvNoRefSrc)) {
+ if (phys_page_exists((ppnum_t)source >> 12))
+ mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */
+ }
+
+
+ size = size - csize; /* Calculate what is left */
+ vaddr = vaddr + csize; /* Move to next sink address */
+ source = source + csize; /* Bump source to next physical address */
+ sink = sink + csize; /* Bump sink to next physical address */
+ }
+
+ splx(s); /* Open up for interrupts */
+
+ return KERN_SUCCESS;
+}
+
+void switch_to_serial_console(void)
+{
+}
+
+addr64_t vm_last_addr;
+
+void
+mapping_set_mod(ppnum_t pn)
+{
+ pmap_set_modify(pn);
+}
+
+void
+mapping_set_ref(ppnum_t pn)
+{
+ pmap_set_reference(pn);
+}
+
+void
+cache_flush_page_phys(ppnum_t pa)
+{
+ boolean_t istate;
+ int i;
+ unsigned int *cacheline_addr;
+ int cacheline_size = cpuid_info()->cache_linesize;
+ int cachelines_in_page = PAGE_SIZE/cacheline_size;
+
+ /*
+ * If there's no clflush instruction, we're sadly forced to use wbinvd.
+ */
+ if (!(cpuid_features() & CPUID_FEATURE_CLFSH)) {
+ asm volatile("wbinvd" : : : "memory");
+ return;
+ }
+
+ istate = ml_set_interrupts_enabled(FALSE);
+
+ if (*(pt_entry_t *) CM2)
+ panic("cache_flush_page_phys: CMAP busy");
+
+ *(pt_entry_t *) CM2 = i386_ptob(pa) | INTEL_PTE_VALID;
+ invlpg((u_int)CA2);
+
+ for (i = 0, cacheline_addr = (unsigned int *)CA2;
+ i < cachelines_in_page;
+ i++, cacheline_addr += cacheline_size) {
+ asm volatile("clflush %0" : : "m" (cacheline_addr));
+ }
+
+ *(pt_entry_t *) CM2 = 0;
+
+ (void) ml_set_interrupts_enabled(istate);
+
+}
+