/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
-#include <string.h>
+#include <mach_debug.h>
+#include <mach_ldebug.h>
+
+#include <sys/kdebug.h>
+
+#include <mach/kern_return.h>
+#include <mach/thread_status.h>
#include <mach/vm_param.h>
-#include <mach/boolean.h>
-#include <vm/pmap.h>
-#include <vm/vm_page.h>
+
+#include <kern/counters.h>
+#include <kern/mach_param.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
#include <kern/misc_protos.h>
+#include <kern/assert.h>
+#include <kern/spl.h>
+#include <ipc/ipc_port.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/pmap.h>
+
+#include <i386/cpu_data.h>
+#include <i386/cpu_number.h>
+#include <i386/thread.h>
+#include <i386/eflags.h>
+#include <i386/proc_reg.h>
+#include <i386/seg.h>
+#include <i386/tss.h>
+#include <i386/user_ldt.h>
+#include <i386/fpu.h>
+#include <i386/misc_protos.h>
/*
* pmap_zero_page zeros the specified (machine independent) page.
*/
void
pmap_zero_page(
- ppnum_t pn)
+ ppnum_t pn)
{
- vm_offset_t p;
assert(pn != vm_page_fictitious_addr);
- p = (vm_offset_t)i386_ptob(pn);
- bzero((char *)phystokv(p), PAGE_SIZE);
+ assert(pn != vm_page_guard_addr);
+ bzero_phys((addr64_t)i386_ptob(pn), PAGE_SIZE);
}
/*
*/
void
pmap_zero_part_page(
- ppnum_t pn,
+ ppnum_t pn,
vm_offset_t offset,
vm_size_t len)
{
assert(pn != vm_page_fictitious_addr);
+ assert(pn != vm_page_guard_addr);
assert(offset + len <= PAGE_SIZE);
- bzero((char *)phystokv(i386_ptob(pn)) + offset, len);
-}
-
-/*
- * pmap_copy_page copies the specified (machine independent) pages.
- */
-void
-pmap_copy_page(
- ppnum_t psrc,
- ppnum_t pdst)
-
-{
- vm_offset_t src,dst;
-
- assert(psrc != vm_page_fictitious_addr);
- assert(pdst != vm_page_fictitious_addr);
- src = (vm_offset_t)i386_ptob(psrc);
- dst = (vm_offset_t)i386_ptob(pdst);
-
- memcpy((void *)phystokv(dst), (void *)phystokv(src), PAGE_SIZE);
+ bzero_phys((addr64_t)(i386_ptob(pn) + offset), (uint32_t)len);
}
/*
vm_offset_t dst_offset,
vm_size_t len)
{
- vm_offset_t src, dst;
+ pmap_paddr_t src, dst;
assert(psrc != vm_page_fictitious_addr);
assert(pdst != vm_page_fictitious_addr);
- src = (vm_offset_t)i386_ptob(psrc);
- dst = (vm_offset_t)i386_ptob(pdst);
- assert(((dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
- assert(((src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
+ assert(psrc != vm_page_guard_addr);
+ assert(pdst != vm_page_guard_addr);
+
+ src = i386_ptob(psrc);
+ dst = i386_ptob(pdst);
+
+ assert((((uintptr_t)dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
+ assert((((uintptr_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
- memcpy((void *)(phystokv(dst) + dst_offset),
- (void *)(phystokv(src) + src_offset), len);
+ bcopy_phys((addr64_t)src + (src_offset & INTEL_OFFMASK),
+ (addr64_t)dst + (dst_offset & INTEL_OFFMASK),
+ len);
}
/*
*/
void
pmap_copy_part_lpage(
- vm_offset_t src,
- ppnum_t pdst,
- vm_offset_t dst_offset,
- vm_size_t len)
+ __unused vm_offset_t src,
+ __unused ppnum_t pdst,
+ __unused vm_offset_t dst_offset,
+ __unused vm_size_t len)
{
- vm_offset_t dst;
- assert(src != vm_page_fictitious_addr);
assert(pdst != vm_page_fictitious_addr);
- dst = (vm_offset_t)i386_ptob(pdst);
- assert(((dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
+ assert(pdst != vm_page_guard_addr);
+ assert((dst_offset + len) <= PAGE_SIZE);
- memcpy((void *)(phystokv(dst) + dst_offset), (void *)src, len);
}
/*
*/
void
pmap_copy_part_rpage(
- ppnum_t psrc,
- vm_offset_t src_offset,
- vm_offset_t dst,
- vm_size_t len)
+ __unused ppnum_t psrc,
+ __unused vm_offset_t src_offset,
+ __unused vm_offset_t dst,
+ __unused vm_size_t len)
{
- vm_offset_t src;
assert(psrc != vm_page_fictitious_addr);
- assert(dst != vm_page_fictitious_addr);
- src = (vm_offset_t)i386_ptob(psrc);
- assert(((src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
+ assert(psrc != vm_page_guard_addr);
+ assert((src_offset + len) <= PAGE_SIZE);
- memcpy((void *)dst, (void *)(phystokv(src) + src_offset), len);
}
/*
*
* Convert a kernel virtual address to a physical address
*/
-vm_offset_t
+addr64_t
kvtophys(
vm_offset_t addr)
{
- pt_entry_t *pte;
+ pmap_paddr_t pa;
+
+ pa = ((pmap_paddr_t)pmap_find_phys(kernel_pmap, addr)) << INTEL_PGSHIFT;
+ if (pa)
+ pa |= (addr & INTEL_OFFMASK);
+
+ return ((addr64_t)pa);
+}
+
+extern pt_entry_t *debugger_ptep;
+extern vm_map_offset_t debugger_window_kva;
+extern int _bcopy(const void *, void *, vm_size_t);
+extern int _bcopy2(const void *, void *);
+extern int _bcopy4(const void *, void *);
+extern int _bcopy8(const void *, void *);
+
+__private_extern__ int ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) {
+ void *src, *dst;
+ int err = 0;
+
+ mp_disable_preemption();
+#if NCOPY_WINDOWS > 0
+ mapwindow_t *src_map, *dst_map;
+ /* We rely on MTRRs here */
+ src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF));
+ dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD));
+ src = (void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK));
+ dst = (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK));
+#elif defined(__x86_64__)
+ addr64_t debug_pa = 0;
+
+ /* If either destination or source are outside the
+ * physical map, establish a physical window onto the target frame.
+ */
+ assert(physmap_enclosed(src64) || physmap_enclosed(dst64));
+
+ if (physmap_enclosed(src64) == FALSE) {
+ src = (void *)(debugger_window_kva | (src64 & INTEL_OFFMASK));
+ dst = PHYSMAP_PTOV(dst64);
+ debug_pa = src64 & PG_FRAME;
+ } else if (physmap_enclosed(dst64) == FALSE) {
+ src = PHYSMAP_PTOV(src64);
+ dst = (void *)(debugger_window_kva | (dst64 & INTEL_OFFMASK));
+ debug_pa = dst64 & PG_FRAME;
+ } else {
+ src = PHYSMAP_PTOV(src64);
+ dst = PHYSMAP_PTOV(dst64);
+ }
+ /* DRK: debugger only routine, we don't bother checking for an
+ * identical mapping.
+ */
+ if (debug_pa) {
+ if (debugger_window_kva == 0)
+ panic("%s: invoked in non-debug mode", __FUNCTION__);
+ /* Establish a cache-inhibited physical window; some platforms
+ * may not cover arbitrary ranges with MTRRs
+ */
+ pmap_store_pte(debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF| INTEL_PTE_MOD | INTEL_PTE_VALID);
+ flush_tlb_raw();
+#if DEBUG
+ kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa);
+#endif
+ }
+#endif
+ /* ensure we stay within a page */
+ if (((((uint32_t)src64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) ) {
+ panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64);
+ }
+
+ /*
+ * For device register access from the debugger,
+ * 2-byte/16-bit, 4-byte/32-bit and 8-byte/64-bit copies are handled
+ * by assembly routines ensuring the required access widths.
+ * 1-byte and other copies are handled by the regular _bcopy.
+ */
+ switch (bytes) {
+ case 2:
+ err = _bcopy2(src, dst);
+ break;
+ case 4:
+ err = _bcopy4(src, dst);
+ break;
+ case 8:
+ err = _bcopy8(src, dst);
+ break;
+ case 1:
+ default:
+ err = _bcopy(src, dst, bytes);
+ break;
+ }
+
+#if NCOPY_WINDOWS > 0
+ pmap_put_mapwindow(src_map);
+ pmap_put_mapwindow(dst_map);
+#endif
+ mp_enable_preemption();
- if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL)
- return 0;
- return i386_trunc_page(*pte) | (addr & INTEL_OFFMASK);
+ return err;
}