/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* This file is used to maintain the virtual to real mappings for a PowerPC machine.
void mapping_verify(void);
void mapping_phys_unused(ppnum_t pa);
+int nx_enabled = 1; /* enable no-execute protection */
+int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */
+int allow_stack_exec = VM_ABI_32; /* 32-bit apps may execute from the stack by default, 64-bit apps may not */
+
/*
* ppc_prot translates Mach's representation of protections to that of the PPC hardware.
* For Virtual Machines (VMM), we also provide translation entries where the output is
* 8 table entries; direct translations are placed in the range 8..16, so they fall into
* the second half of the table.
*
- * ***NOTE*** I've commented out the Mach->PPC translations that would set page-level
- * no-execute, pending updates to the VM layer that will properly enable its
- * use. Bob Abeles 08.02.04
*/
-//unsigned char ppc_prot[16] = { 4, 7, 6, 6, 3, 3, 2, 2, /* Mach -> PPC translations */
-unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, /* Mach -> PPC translations */
+unsigned char ppc_prot[16] = { 4, 7, 6, 6, 3, 3, 2, 2, /* Mach -> PPC translations */
0, 1, 2, 3, 4, 5, 6, 7 }; /* VMM direct translations */
+
+
+vm_prot_t getProtPPC(int key, boolean_t disable_NX) {
+ vm_prot_t prot;
+
+ prot = ppc_prot[key & 0xF];
+
+ if (key <= 7 && disable_NX == TRUE)
+ prot &= ~mpN;
+
+ return (prot);
+}
+
+
/*
* About PPC VSID generation:
*
case mapRtNotFnd:
return (nextva | 1); /* Nothing found to unmap */
default:
- panic("mapping_remove: hw_rem_map failed - pmap = %08X, va = %016llX, code = %08X\n",
+ panic("mapping_remove: hw_rem_map failed - pmap = %p, va = %016llX, code = %p\n",
pmap, va, mp);
break;
}
case mapRtEmpty: /* No guest mappings left to scrub */
break;
default:
- panic("mapping_remove: hw_scrub_guest failed - physent = %08X, code = %08X\n",
+ panic("mapping_remove: hw_scrub_guest failed - physent = %p, code = %p\n",
physent, mp); /* Cry havoc, cry wrack,
at least we die with harness on our backs */
break;
unsigned int pindex, mflags, pattr, wimg, rc;
phys_entry_t *physent;
int nlists, pcf;
+ boolean_t disable_NX = FALSE;
pindex = 0;
pcf = (flags & mmFlgPcfg) >> 24; /* Get the physical page config index */
if(!(pPcfg[pcf].pcfFlags)) { /* Validate requested physical page configuration */
- panic("mapping_make: invalid physical page configuration request - pmap = %08X, va = %016llX, cfg = %d\n",
+ panic("mapping_make: invalid physical page configuration request - pmap = %p, va = %016llX, cfg = %d\n",
pmap, va, pcf);
}
psmask = (1ULL << pPcfg[pcf].pcfPSize) - 1; /* Mask to isolate any offset into a page */
if(va & psmask) { /* Make sure we are page aligned on virtual */
- panic("mapping_make: attempt to map unaligned vaddr - pmap = %08X, va = %016llX, cfg = %d\n",
+ panic("mapping_make: attempt to map unaligned vaddr - pmap = %p, va = %016llX, cfg = %d\n",
pmap, va, pcf);
}
if(((addr64_t)pa << 12) & psmask) { /* Make sure we are page aligned on physical */
- panic("mapping_make: attempt to map unaligned paddr - pmap = %08X, pa = %016llX, cfg = %d\n",
+ panic("mapping_make: attempt to map unaligned paddr - pmap = %p, pa = %08X, cfg = %d\n",
pmap, pa, pcf);
}
mp->u.mpBSize = size; /* Set the size */
mp->mpPte = 0; /* Set the PTE invalid */
mp->mpPAddr = pa; /* Set the physical page number */
- mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3) /* Add the protection and attributes to the field */
- | ((PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit)?
- getProtPPC(prot) : (getProtPPC(prot) & 0x3)); /* Mask off no-execute control for 32-bit machines */
-
+
+ if ( !nx_enabled || (pmap->pmapFlags & pmapNXdisabled) )
+ disable_NX = TRUE;
+
+ mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot, disable_NX); /* Add the protection and attributes to the field */
+
while(1) { /* Keep trying... */
colladdr = hw_add_map(pmap, mp); /* Go add the mapping to the pmap */
rc = colladdr & mapRetCode; /* Separate return code */
return (colladdr | mapRtSmash); /* Return colliding address, with some dirt added to avoid
confusion if effective address is 0 */
default:
- panic("mapping_make: hw_add_map failed - collision addr = %016llX, code = %02X, pmap = %08X, va = %016llX, mapping = %08X\n",
+ panic("mapping_make: hw_add_map failed - collision addr = %016llX, code = %02X, pmap = %p, va = %016llX, mapping = %p\n",
colladdr, rc, pmap, va, mp); /* Die dead */
}
mp = hw_find_map(curpmap, curva, nextva); /* Find the mapping for this address */
if((unsigned int)mp == mapRtBadLk) { /* Did we lock up ok? */
- panic("mapping_find: pmap lock failure - rc = %08X, pmap = %08X\n", mp, curpmap); /* Die... */
+ panic("mapping_find: pmap lock failure - rc = %p, pmap = %p\n", mp, curpmap); /* Die... */
}
if(!mp || ((mp->mpFlags & mpType) < mpMinSpecial) || !full) break; /* Are we done looking? */
if((mp->mpFlags & mpType) != mpNest) { /* Don't chain through anything other than a nested pmap */
mapping_drop_busy(mp); /* We have everything we need from the mapping */
- mp = 0; /* Set not found */
+ mp = NULL; /* Set not found */
break;
}
if(nestdepth++ > 64) { /* Have we nested too far down? */
- panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %08X, curpmap = %08X\n",
+ panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %p, curpmap = %p\n",
va, curva, pmap, curpmap);
}
mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva) { /* Change protection of a virtual page */
int ret;
-
- ret = hw_protect(pmap, va, getProtPPC(prot), nextva); /* Try to change the protect here */
+ boolean_t disable_NX = FALSE;
+
+ if ( !nx_enabled || (pmap->pmapFlags & pmapNXdisabled) )
+ disable_NX = TRUE;
+
+ ret = hw_protect(pmap, va, getProtPPC(prot, disable_NX), nextva); /* Try to change the protect here */
switch (ret) { /* Decode return code */
break;
default:
- panic("mapping_protect: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, va);
+ panic("mapping_protect: hw_protect failed - rc = %d, pmap = %p, va = %016llX\n", ret, pmap, va);
}
*
* This routine takes a physical entry and runs through all mappings attached to it and changes
* the protection. If there are PTEs associated with the mappings, they will be invalidated before
- * the protection is changed. There is no limitation on changes, e.g.,
- * higher to lower, lower to higher.
+ * the protection is changed. There is no limitation on changes, e.g., higher to lower, lower to
+ * higher; however, changes to execute protection are ignored.
*
* Any mapping that is marked permanent is not changed
*
unsigned int pindex;
phys_entry_t *physent;
-
+
physent = mapping_phys_lookup(pa, &pindex); /* Get physical entry */
if(!physent) { /* Did we find the physical page? */
panic("mapping_protect_phys: invalid physical page %08X\n", pa);
}
hw_walk_phys(physent, hwpNoop, hwpSPrtMap, hwpNoop,
- getProtPPC(prot), hwpPurgePTE); /* Set the new protection for page and mappings */
+ getProtPPC(prot, FALSE), hwpPurgePTE); /* Set the new protection for page and mappings */
- return; /* Leave... */
+ return; /* Leave... */
}
* the reference bit.
*/
-phys_entry_t *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) { /* Finds the physical entry for the page */
-
- int i;
+phys_entry_t *
+mapping_phys_lookup(ppnum_t pp, unsigned int *pindex)
+{ /* Finds the physical entry for the page */
+ unsigned int i;
for(i = 0; i < pmap_mem_regions_count; i++) { /* Walk through the list */
if(!(unsigned int)pmap_mem_regions[i].mrPhysTab) continue; /* Skip any empty lists */
}
+boolean_t
+pmap_valid_page(ppnum_t pn) {
+ unsigned int tmp;
+ return (mapping_phys_lookup(pn, &tmp) != 0);
+}
/*
splx(s); /* Restore 'rupts */
for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */
- retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
+ retr = kmem_alloc_kobject(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */
if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */
break;
}
}
mbn = mapCtl.mapcrel; /* Get first pending release block */
- mapCtl.mapcrel = 0; /* Dequeue them */
+ mapCtl.mapcrel = NULL; /* Dequeue them */
mapCtl.mapcreln = 0; /* Set count to 0 */
hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */
if(mapCtl.mapcnext == mb) { /* Are we first on the list? */
mapCtl.mapcnext = mb->nextblok; /* Unchain us */
- if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */
+ if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = NULL; /* If last, remove last */
}
else { /* We're not first */
for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */
if(mbn->nextblok == mb) break; /* Is the next one our's? */
}
- if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp);
+ if(!mbn) panic("mapping_free: attempt to release mapping block (%p) not on list\n", mp);
mbn->nextblok = mb->nextblok; /* Dequeue us */
if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */
}
case mapRtNotFnd:
break;
default:
- panic("mapping_alloc: hw_purge_map failed - pmap = %08X, va = %16llX, code = %08X\n", ckpmap, va, mp);
+ panic("mapping_alloc: hw_purge_map failed - pmap = %p, va = %16llX, code = %p\n", ckpmap, va, mp);
break;
}
- if (mapRtNotFnd == ((unsigned int)mp & mapRetCode))
+ if (mapRtNotFnd == ((unsigned int)mp & mapRetCode)) {
if (do_rescan)
do_rescan = FALSE;
else
break;
+ }
va = nextva;
}
if ( !big ) { /* if we need a small (64-byte) mapping */
if(!(mindx = mapalc1(mb))) /* Allocate a 1-bit slot */
- panic("mapping_alloc - empty mapping block detected at %08X\n", mb);
+ panic("mapping_alloc - empty mapping block detected at %p\n", mb);
}
if(mindx < 0) { /* Did we just take the last one */
mindx = -mindx; /* Make positive */
mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */
- if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */
+ if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = NULL; /* Removed the last one */
}
mapCtl.mapcfree--; /* Decrement free count */
}
else { /* Add to the free list */
- mb->nextblok = 0; /* We always add to the end */
+ mb->nextblok = NULL; /* We always add to the end */
mapCtl.mapcfree += MAPPERBLOK; /* Bump count */
if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */
splx(s); /* Restore 'rupts */
for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
- retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
+ retr = kmem_alloc_kobject(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
if(retr != KERN_SUCCESS) /* Did we get some memory? */
break;
mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */
#endif
for(i = 0; i < nmapb; i++) { /* Allocate 'em all */
- retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
+ retr = kmem_alloc_kobject(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */
if(retr != KERN_SUCCESS) { /* Did we get some memory? */
panic("Whoops... Not a bit of wired memory left for anyone\n");
}
}
-/*
- * phystokv(addr)
- *
- * Convert a physical address to a kernel virtual address if
- * there is a mapping, otherwise return NULL
- */
-
-vm_offset_t phystokv(vm_offset_t pa) {
-
- addr64_t va;
- ppnum_t pp;
-
- pp = pa >> 12; /* Convert to a page number */
-
- if(!(va = mapping_p2v(kernel_pmap, pp))) {
- return 0; /* Can't find it, return 0... */
- }
-
- return (va | (pa & (PAGE_SIZE - 1))); /* Build and return VADDR... */
-
-}
/*
* kvtophys(addr)
*
* Convert a kernel virtual address to a physical address
*/
-vm_offset_t kvtophys(vm_offset_t va) {
+addr64_t kvtophys(vm_offset_t va) {
return pmap_extract(kernel_pmap, va); /* Find mapping and lock the physical entry for this mapping */
* Convert a kernel virtual address to a 64-bit physical address
*/
vm_map_offset_t kvtophys64(vm_map_offset_t va) {
+
ppnum_t pa = pmap_find_phys(kernel_pmap, (addr64_t)va);
if (!pa)
- return (vm_map_offset_t)0;
+ return 0;
return (((vm_map_offset_t)pa) << 12) | (va & 0xfff);
+
}
/*
return; /* Return the result or 0... */
}
+/*
+ * no-op in current ppc implementation
+ */
+void inval_copy_windows(__unused thread_t th)
+{
+}
+
/*
* Copies data between a physical page and a virtual page, or 2 physical. This is used to
*
*/
-kern_return_t hw_copypv_32(addr64_t source, addr64_t sink, unsigned int size, int which) {
-
+kern_return_t
+hw_copypv_32(addr64_t source, addr64_t sink, unsigned int size, int which)
+{
vm_map_t map;
kern_return_t ret;
- addr64_t nextva, vaddr, paddr;
- register mapping_t *mp;
+ addr64_t nextva, vaddr = 0, paddr;
+ mapping_t *mp = NULL;
spl_t s;
unsigned int lop, csize;
int needtran, bothphys;
unsigned int pindex;
phys_entry_t *physent;
- vm_prot_t prot;
+ vm_prot_t prot = 0;
int orig_which;
orig_which = which;
mp = mapping_find(map->pmap, vaddr, &nextva, 1); /* Find and busy the mapping */
if(!mp) { /* Was it there? */
if(getPerProc()->istackptr == 0)
- panic("copypv: No vaild mapping on memory %s %x", "RD", vaddr);
+ panic("copypv: No vaild mapping on memory %s %16llx", "RD", vaddr);
splx(s); /* Restore the interrupt level */
ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0); /* Didn't find it, try to fault it in... */
mapping_drop_busy(mp); /* Go ahead and release the mapping for now */
if(getPerProc()->istackptr == 0)
- panic("copypv: No vaild mapping on memory %s %x", "RDWR", vaddr);
+ panic("copypv: No vaild mapping on memory %s %16llx", "RDWR", vaddr);
splx(s); /* Restore the interrupt level */
ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0); /* check for a COW area */
s = splhigh(); /* Don't bother from now on */
- mbn = 0; /* Start with none */
+ mbn = NULL; /* Start with none */
for(mb = mapCtl.mapcnext; mb; mb = mb->nextblok) { /* Walk the free chain */
if((mappingblok_t *)(mb->mapblokflags & 0x7FFFFFFF) != mb) { /* Is tag ok? */
- panic("mapping_verify: flags tag bad, free chain; mb = %08X, tag = %08X\n", mb, mb->mapblokflags);
+ panic("mapping_verify: flags tag bad, free chain; mb = %p, tag = %08X\n", mb, mb->mapblokflags);
}
mbn = mb; /* Remember the last one */
}
if(mapCtl.mapcnext && (mapCtl.mapclast != mbn)) { /* Do we point to the last one? */
- panic("mapping_verify: last pointer bad; mb = %08X, mapclast = %08X\n", mb, mapCtl.mapclast);
+ panic("mapping_verify: last pointer bad; mb = %p, mapclast = %p\n", mb, mapCtl.mapclast);
}
relncnt = 0; /* Clear count */
if(!(physent->ppLink & ~(ppLock | ppFlags))) return; /* No one else is here */
- panic("mapping_phys_unused: physical page (%08X) in use, physent = %08X\n", pa, physent);
+ panic("mapping_phys_unused: physical page (%08X) in use, physent = %p\n", pa, physent);
}
-void mapping_hibernate_flush(void)
+void
+mapping_hibernate_flush(void)
{
- int bank;
- unsigned int page;
+ unsigned int page, bank;
struct phys_entry * entry;
for (bank = 0; bank < pmap_mem_regions_count; bank++)