/*
* Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
#include <i386/tss.h>
#include <i386/user_ldt.h>
#include <i386/fpu.h>
-#include <i386/iopb_entries.h>
#include <i386/misc_protos.h>
/*
*/
void
pmap_zero_page(
- ppnum_t pn)
+ ppnum_t pn)
{
assert(pn != vm_page_fictitious_addr);
+ assert(pn != vm_page_guard_addr);
bzero_phys((addr64_t)i386_ptob(pn), PAGE_SIZE);
}
*/
void
pmap_zero_part_page(
- ppnum_t pn,
+ ppnum_t pn,
vm_offset_t offset,
vm_size_t len)
{
assert(pn != vm_page_fictitious_addr);
+ assert(pn != vm_page_guard_addr);
assert(offset + len <= PAGE_SIZE);
- bzero_phys((addr64_t)(i386_ptob(pn) + offset), len);
+ bzero_phys((addr64_t)(i386_ptob(pn) + offset), (uint32_t)len);
}
/*
vm_offset_t dst_offset,
vm_size_t len)
{
- vm_offset_t src, dst;
+ pmap_paddr_t src, dst;
+
assert(psrc != vm_page_fictitious_addr);
assert(pdst != vm_page_fictitious_addr);
- src = (vm_offset_t)i386_ptob(psrc);
- dst = (vm_offset_t)i386_ptob(pdst);
- assert(((dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
- assert(((src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
+ assert(psrc != vm_page_guard_addr);
+ assert(pdst != vm_page_guard_addr);
+
+ src = i386_ptob(psrc);
+ dst = i386_ptob(pdst);
+
+ assert((((uintptr_t)dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
+ assert((((uintptr_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
+
bcopy_phys((addr64_t)src + (src_offset & INTEL_OFFMASK),
(addr64_t)dst + (dst_offset & INTEL_OFFMASK),
len);
*/
void
pmap_copy_part_lpage(
- vm_offset_t src,
- ppnum_t pdst,
- vm_offset_t dst_offset,
- vm_size_t len)
+ __unused vm_offset_t src,
+ __unused ppnum_t pdst,
+ __unused vm_offset_t dst_offset,
+ __unused vm_size_t len)
{
- pt_entry_t *ptep;
- thread_t thr_act = current_thread();
+#ifdef __i386__
+ mapwindow_t *map;
+#endif
assert(pdst != vm_page_fictitious_addr);
- ptep = pmap_pte(thr_act->map->pmap, i386_ptob(pdst));
- if (0 == ptep)
- panic("pmap_copy_part_lpage ptep");
- assert(((pdst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
- if (*(pt_entry_t *) CM2)
- panic("pmap_copy_part_lpage");
- *(int *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (*ptep & PG_FRAME) |
- INTEL_PTE_REF | INTEL_PTE_MOD;
- invlpg((unsigned int) CA2);
- memcpy((void *) (CA2 + (dst_offset & INTEL_OFFMASK)), (void *) src, len);
- *(pt_entry_t *) CM2 = 0;
+ assert(pdst != vm_page_guard_addr);
+ assert((dst_offset + len) <= PAGE_SIZE);
+
+#ifdef __i386__
+ mp_disable_preemption();
+
+ map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(pdst) & PG_FRAME) |
+ INTEL_PTE_REF | INTEL_PTE_MOD);
+
+ memcpy((void *) (map->prv_CADDR + (dst_offset & INTEL_OFFMASK)), (void *) src, len);
+
+ pmap_put_mapwindow(map);
+
+ mp_enable_preemption();
+#endif
}
/*
*/
void
pmap_copy_part_rpage(
- ppnum_t psrc,
- vm_offset_t src_offset,
- vm_offset_t dst,
- vm_size_t len)
+ __unused ppnum_t psrc,
+ __unused vm_offset_t src_offset,
+ __unused vm_offset_t dst,
+ __unused vm_size_t len)
{
- pt_entry_t *ptep;
- thread_t thr_act = current_thread();
+#ifdef __i386__
+ mapwindow_t *map;
+#endif
assert(psrc != vm_page_fictitious_addr);
- ptep = pmap_pte(thr_act->map->pmap, i386_ptob(psrc));
- if (0 == ptep)
- panic("pmap_copy_part_rpage ptep");
- assert(((psrc & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
- if (*(pt_entry_t *) CM2)
- panic("pmap_copy_part_rpage");
- *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (*ptep & PG_FRAME) |
- INTEL_PTE_REF;
- invlpg((unsigned int) CA2);
- memcpy((void *) dst, (void *) (CA2 + (src_offset & INTEL_OFFMASK)), len);
- *(pt_entry_t *) CM2 = 0;
+ assert(psrc != vm_page_guard_addr);
+ assert((src_offset + len) <= PAGE_SIZE);
+
+#ifdef __i386__
+ mp_disable_preemption();
+
+ map = pmap_get_mapwindow(INTEL_PTE_VALID | INTEL_PTE_RW | (i386_ptob(psrc) & PG_FRAME) |
+ INTEL_PTE_REF);
+
+ memcpy((void *) dst, (void *) (map->prv_CADDR + (src_offset & INTEL_OFFMASK)), len);
+
+ pmap_put_mapwindow(map);
+
+ mp_enable_preemption();
+#endif
}
/*
*
* Convert a kernel virtual address to a physical address
*/
-vm_offset_t
+addr64_t
kvtophys(
vm_offset_t addr)
{
- pt_entry_t *ptep;
pmap_paddr_t pa;
- if ((ptep = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL) {
- pa = 0;
- } else {
- pa = pte_to_pa(*ptep) | (addr & INTEL_OFFMASK);
+ pa = ((pmap_paddr_t)pmap_find_phys(kernel_pmap, addr)) << INTEL_PGSHIFT;
+ if (pa)
+ pa |= (addr & INTEL_OFFMASK);
+
+ return ((addr64_t)pa);
+}
+
+__private_extern__ void ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) {
+ void *src, *dst;
+
+ mp_disable_preemption();
+#if NCOPY_WINDOWS > 0
+ mapwindow_t *src_map, *dst_map;
+ /* We rely on MTRRs here */
+ src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF));
+ dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD));
+ src = (void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK));
+ dst = (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK));
+#elif defined(__x86_64__)
+ src = PHYSMAP_PTOV(src64);
+ dst = PHYSMAP_PTOV(dst64);
+#endif
+ /* ensure we stay within a page */
+ if (((((uint32_t)src64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) ) {
+ panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64);
+ }
+
+ switch (bytes) {
+ case 1:
+ *((uint8_t *) dst) = *((uint8_t *) src);
+ break;
+ case 2:
+ *((uint16_t *) dst) = *((uint16_t *) src);
+ break;
+ case 4:
+ *((uint32_t *) dst) = *((uint32_t *) src);
+ break;
+ /* Should perform two 32-bit reads */
+ case 8:
+ *((uint64_t *) dst) = *((uint64_t *) src);
+ break;
+ default:
+ bcopy(src, dst, bytes);
+ break;
}
- if (0 == pa)
- kprintf("kvtophys ret 0!\n");
- return (pa);
+#if NCOPY_WINDOWS > 0
+ pmap_put_mapwindow(src_map);
+ pmap_put_mapwindow(dst_map);
+#endif
+ mp_enable_preemption();
}