#include <i386/tss.h>
#include <i386/user_ldt.h>
#include <i386/fpu.h>
-#include <i386/iopb_entries.h>
#include <i386/misc_protos.h>
/*
*/
void
pmap_zero_page(
- ppnum_t pn)
+ ppnum_t pn)
{
assert(pn != vm_page_fictitious_addr);
+ assert(pn != vm_page_guard_addr);
bzero_phys((addr64_t)i386_ptob(pn), PAGE_SIZE);
}
*/
void
pmap_zero_part_page(
- ppnum_t pn,
+ ppnum_t pn,
vm_offset_t offset,
vm_size_t len)
{
assert(pn != vm_page_fictitious_addr);
+ assert(pn != vm_page_guard_addr);
assert(offset + len <= PAGE_SIZE);
- bzero_phys((addr64_t)(i386_ptob(pn) + offset), len);
+ bzero_phys((addr64_t)(i386_ptob(pn) + offset), (uint32_t)len);
}
/*
assert(psrc != vm_page_fictitious_addr);
assert(pdst != vm_page_fictitious_addr);
+ assert(psrc != vm_page_guard_addr);
+ assert(pdst != vm_page_guard_addr);
src = i386_ptob(psrc);
dst = i386_ptob(pdst);
- assert((((uint32_t)dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
- assert((((uint32_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
+ assert((((uintptr_t)dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
+ assert((((uintptr_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
bcopy_phys((addr64_t)src + (src_offset & INTEL_OFFMASK),
(addr64_t)dst + (dst_offset & INTEL_OFFMASK),
*/
void
pmap_copy_part_lpage(
- vm_offset_t src,
- ppnum_t pdst,
- vm_offset_t dst_offset,
- vm_size_t len)
+ __unused vm_offset_t src,
+ __unused ppnum_t pdst,
+ __unused vm_offset_t dst_offset,
+ __unused vm_size_t len)
{
+#ifdef __i386__
mapwindow_t *map;
+#endif
assert(pdst != vm_page_fictitious_addr);
+ assert(pdst != vm_page_guard_addr);
assert((dst_offset + len) <= PAGE_SIZE);
+#ifdef __i386__
mp_disable_preemption();
map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(pdst) & PG_FRAME) |
INTEL_PTE_REF | INTEL_PTE_MOD);
- if (map == 0) {
- panic("pmap_copy_part_lpage");
- }
- invlpg((uintptr_t)map->prv_CADDR);
memcpy((void *) (map->prv_CADDR + (dst_offset & INTEL_OFFMASK)), (void *) src, len);
- *map->prv_CMAP = 0;
+
+ pmap_put_mapwindow(map);
mp_enable_preemption();
+#endif
}
/*
*/
void
pmap_copy_part_rpage(
- ppnum_t psrc,
- vm_offset_t src_offset,
- vm_offset_t dst,
- vm_size_t len)
+ __unused ppnum_t psrc,
+ __unused vm_offset_t src_offset,
+ __unused vm_offset_t dst,
+ __unused vm_size_t len)
{
+#ifdef __i386__
mapwindow_t *map;
+#endif
assert(psrc != vm_page_fictitious_addr);
+ assert(psrc != vm_page_guard_addr);
assert((src_offset + len) <= PAGE_SIZE);
+#ifdef __i386__
mp_disable_preemption();
map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(psrc) & PG_FRAME) |
INTEL_PTE_REF);
- if (map == 0) {
- panic("pmap_copy_part_rpage");
- }
- invlpg((uintptr_t) map->prv_CADDR);
memcpy((void *) dst, (void *) (map->prv_CADDR + (src_offset & INTEL_OFFMASK)), len);
- *map->prv_CMAP = 0;
+
+ pmap_put_mapwindow(map);
mp_enable_preemption();
+#endif
}
/*
kvtophys(
vm_offset_t addr)
{
- pt_entry_t *ptep;
pmap_paddr_t pa;
-
- mp_disable_preemption();
- if ((ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)addr)) == PT_ENTRY_NULL) {
- pa = 0;
- } else {
- pa = pte_to_pa(*ptep) | (addr & INTEL_OFFMASK);
- }
- mp_enable_preemption_no_check();
+
+ pa = ((pmap_paddr_t)pmap_find_phys(kernel_pmap, addr)) << INTEL_PGSHIFT;
+ if (pa)
+ pa |= (addr & INTEL_OFFMASK);
+
return ((addr64_t)pa);
}
+extern pt_entry_t *debugger_ptep;
+extern vm_map_offset_t debugger_window_kva;
+
+__private_extern__ void ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) {
+ void *src, *dst;
+
+ mp_disable_preemption();
+#if NCOPY_WINDOWS > 0
+ mapwindow_t *src_map, *dst_map;
+ /* We rely on MTRRs here */
+ src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF));
+ dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD));
+ src = (void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK));
+ dst = (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK));
+#elif defined(__x86_64__)
+ src = PHYSMAP_PTOV(src64);
+ dst = PHYSMAP_PTOV(dst64);
+
+ addr64_t debug_pa = 0;
+
+ /* If either destination or source are outside the
+ * physical map, establish a physical window onto the target frame.
+ */
+ assert(physmap_enclosed(src64) || physmap_enclosed(dst64));
+
+ if (physmap_enclosed(src64) == FALSE) {
+ src = (void *)(debugger_window_kva | (src64 & INTEL_OFFMASK));
+ debug_pa = src64 & PG_FRAME;
+ } else if (physmap_enclosed(dst64) == FALSE) {
+ dst = (void *)(debugger_window_kva | (dst64 & INTEL_OFFMASK));
+ debug_pa = dst64 & PG_FRAME;
+ }
+ /* DRK: debugger only routine, we don't bother checking for an
+ * identical mapping.
+ */
+ if (debug_pa) {
+ if (debugger_window_kva == 0)
+ panic("%s: invoked in non-debug mode", __FUNCTION__);
+ /* Establish a cache-inhibited physical window; some platforms
+ * may not cover arbitrary ranges with MTRRs
+ */
+ pmap_store_pte(debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF| INTEL_PTE_MOD | INTEL_PTE_VALID);
+ flush_tlb_raw();
+#if DEBUG
+ kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa);
+#endif
+ }
+#endif
+ /* ensure we stay within a page */
+ if (((((uint32_t)src64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) ) {
+ panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64);
+ }
+
+ switch (bytes) {
+ case 1:
+ *((uint8_t *) dst) = *((volatile uint8_t *) src);
+ break;
+ case 2:
+ *((uint16_t *) dst) = *((volatile uint16_t *) src);
+ break;
+ case 4:
+ *((uint32_t *) dst) = *((volatile uint32_t *) src);
+ break;
+ /* Should perform two 32-bit reads */
+ case 8:
+ *((uint64_t *) dst) = *((volatile uint64_t *) src);
+ break;
+ default:
+ bcopy(src, dst, bytes);
+ break;
+ }
+#if NCOPY_WINDOWS > 0
+ pmap_put_mapwindow(src_map);
+ pmap_put_mapwindow(dst_map);
+#endif
+ mp_enable_preemption();
+}