X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..d26ffc64f583ab2d29df48f13518685602bc8832:/osfmk/i386/phys.c diff --git a/osfmk/i386/phys.c b/osfmk/i386/phys.c index c8a2f5206..49147fc2a 100644 --- a/osfmk/i386/phys.c +++ b/osfmk/i386/phys.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -54,7 +54,6 @@ * the rights to redistribute these changes. */ -#include #include #include @@ -156,26 +155,11 @@ pmap_copy_part_lpage( __unused vm_offset_t dst_offset, __unused vm_size_t len) { -#ifdef __i386__ - mapwindow_t *map; -#endif assert(pdst != vm_page_fictitious_addr); assert(pdst != vm_page_guard_addr); assert((dst_offset + len) <= PAGE_SIZE); -#ifdef __i386__ - mp_disable_preemption(); - - map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(pdst) & PG_FRAME) | - INTEL_PTE_REF | INTEL_PTE_MOD); - - memcpy((void *) (map->prv_CADDR + (dst_offset & INTEL_OFFMASK)), (void *) src, len); - - pmap_put_mapwindow(map); - - mp_enable_preemption(); -#endif } /* @@ -189,26 +173,11 @@ pmap_copy_part_rpage( __unused vm_offset_t dst, __unused vm_size_t len) { -#ifdef __i386__ - mapwindow_t *map; -#endif assert(psrc != vm_page_fictitious_addr); assert(psrc != vm_page_guard_addr); assert((src_offset + len) <= PAGE_SIZE); -#ifdef __i386__ - mp_disable_preemption(); - - map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(psrc) & PG_FRAME) | - INTEL_PTE_REF); - - memcpy((void *) dst, (void *) (map->prv_CADDR + (src_offset & INTEL_OFFMASK)), len); - - pmap_put_mapwindow(map); - - mp_enable_preemption(); -#endif } /* @@ -229,8 +198,16 @@ kvtophys( return ((addr64_t)pa); } -__private_extern__ void ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) { +extern pt_entry_t *debugger_ptep; +extern vm_map_offset_t debugger_window_kva; +extern int _bcopy(const void *, void *, vm_size_t); +extern int _bcopy2(const void *, void *); +extern int _bcopy4(const void *, void *); +extern int _bcopy8(const void *, void *); + +__private_extern__ int ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) { void *src, *dst; + int err = 0; mp_disable_preemption(); #if NCOPY_WINDOWS > 0 @@ -241,35 +218,73 @@ __private_extern__ void ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t b src = (void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)); dst = (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK)); #elif defined(__x86_64__) - src = PHYSMAP_PTOV(src64); - dst = PHYSMAP_PTOV(dst64); + addr64_t debug_pa = 0; + + /* If either destination or source are outside the + * physical map, establish a physical window onto the target frame. + */ + assert(physmap_enclosed(src64) || physmap_enclosed(dst64)); + + if (physmap_enclosed(src64) == FALSE) { + src = (void *)(debugger_window_kva | (src64 & INTEL_OFFMASK)); + dst = PHYSMAP_PTOV(dst64); + debug_pa = src64 & PG_FRAME; + } else if (physmap_enclosed(dst64) == FALSE) { + src = PHYSMAP_PTOV(src64); + dst = (void *)(debugger_window_kva | (dst64 & INTEL_OFFMASK)); + debug_pa = dst64 & PG_FRAME; + } else { + src = PHYSMAP_PTOV(src64); + dst = PHYSMAP_PTOV(dst64); + } + /* DRK: debugger only routine, we don't bother checking for an + * identical mapping. + */ + if (debug_pa) { + if (debugger_window_kva == 0) + panic("%s: invoked in non-debug mode", __FUNCTION__); + /* Establish a cache-inhibited physical window; some platforms + * may not cover arbitrary ranges with MTRRs + */ + pmap_store_pte(debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF| INTEL_PTE_MOD | INTEL_PTE_VALID); + flush_tlb_raw(); +#if DEBUG + kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa); +#endif + } #endif /* ensure we stay within a page */ if (((((uint32_t)src64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) ) { panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64); } + /* + * For device register access from the debugger, + * 2-byte/16-bit, 4-byte/32-bit and 8-byte/64-bit copies are handled + * by assembly routines ensuring the required access widths. + * 1-byte and other copies are handled by the regular _bcopy. + */ switch (bytes) { - case 1: - *((uint8_t *) dst) = *((uint8_t *) src); - break; case 2: - *((uint16_t *) dst) = *((uint16_t *) src); + err = _bcopy2(src, dst); break; case 4: - *((uint32_t *) dst) = *((uint32_t *) src); + err = _bcopy4(src, dst); break; - /* Should perform two 32-bit reads */ case 8: - *((uint64_t *) dst) = *((uint64_t *) src); + err = _bcopy8(src, dst); break; + case 1: default: - bcopy(src, dst, bytes); + err = _bcopy(src, dst, bytes); break; } + #if NCOPY_WINDOWS > 0 pmap_put_mapwindow(src_map); pmap_put_mapwindow(dst_map); #endif mp_enable_preemption(); + + return err; }