]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/ppc/mappings.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / osfmk / ppc / mappings.c
index 237e2bc12e55b1b51277592f0640ea9be24db6f8..add0e0eb4069648cdca0cc8fd0c8117e02166ccf 100644 (file)
@@ -1,23 +1,29 @@
 /*
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
  *
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  *     This file is used to maintain the virtual to real mappings for a PowerPC machine.
  */
 /*
  *     This file is used to maintain the virtual to real mappings for a PowerPC machine.
  *
  */
 
  *
  */
 
-#include <cpus.h>
 #include <debug.h>
 #include <mach_kgdb.h>
 #include <mach_vm_debug.h>
 #include <db_machine_commands.h>
 
 #include <debug.h>
 #include <mach_kgdb.h>
 #include <mach_vm_debug.h>
 #include <db_machine_commands.h>
 
-#include <kern/thread.h>
-#include <kern/thread_act.h>
+#include <mach/mach_types.h>
 #include <mach/vm_attributes.h>
 #include <mach/vm_param.h>
 #include <mach/vm_attributes.h>
 #include <mach/vm_param.h>
+
+#include <kern/kern_types.h>
+#include <kern/thread.h>
+#include <kern/spl.h>
+#include <kern/misc_protos.h>
+
+#include <vm/vm_fault.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_page.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_page.h>
-#include <kern/spl.h>
+#include <vm/pmap.h>
 
 
-#include <kern/misc_protos.h>
+#include <ppc/exception.h>
 #include <ppc/misc_protos.h>
 #include <ppc/proc_reg.h>
 #include <ppc/misc_protos.h>
 #include <ppc/proc_reg.h>
-
-#include <vm/pmap.h>
 #include <ppc/pmap.h>
 #include <ppc/pmap.h>
-#include <ppc/pmap_internals.h>
 #include <ppc/mem.h>
 #include <ppc/mem.h>
-
 #include <ppc/new_screen.h>
 #include <ppc/Firmware.h>
 #include <ppc/mappings.h>
 #include <ddb/db_output.h>
 
 #include <ppc/new_screen.h>
 #include <ppc/Firmware.h>
 #include <ppc/mappings.h>
 #include <ddb/db_output.h>
 
-#include <ppc/POWERMAC/video_console.h>                /* (TEST/DEBUG) */
+#include <console/video_console.h>             /* (TEST/DEBUG) */
 
 #define PERFTIMES 0
 
 
 #define PERFTIMES 0
 
-#if PERFTIMES && DEBUG
-#define debugLog2(a, b, c) dbgLog2(a, b, c)
-#else
-#define debugLog2(a, b, c)
-#endif
-
 vm_map_t        mapping_map = VM_MAP_NULL;
 vm_map_t        mapping_map = VM_MAP_NULL;
-#define                MAPPING_MAP_SIZE        33554432        /* 32MB address space */
 
 
-unsigned int   incrVSID = 0;                                                                   /* VSID increment value */
+unsigned int   incrVSID = 0;                                           /* VSID increment value */
 unsigned int   mappingdeb0 = 0;                                                
 unsigned int   mappingdeb0 = 0;                                                
-unsigned int   mappingdeb1 = 0;                                                
-extern unsigned int    hash_table_size;                                                
-extern vm_offset_t mem_size;
+unsigned int   mappingdeb1 = 0;
+int ppc_max_adrsp;                                                                     /* Maximum address spaces */                    
+                               
+addr64_t               *mapdebug;                                                      /* (BRINGUP) */
+extern unsigned int DebugWork;                                         /* (BRINGUP) */
+                                               
+void mapping_verify(void);
+void mapping_phys_unused(ppnum_t pa);
+
+int nx_enabled = 0;                    /* enable no-execute protection */
+
 /*
 /*
- *     ppc_prot translates from the mach representation of protections to the PPC version.
- *  We also allow for a direct setting of the protection bits. This extends the mach
- *     concepts to allow the greater control we need for Virtual Machines (VMM).
- *     Calculation of it like this saves a memory reference - and maybe a couple of microseconds.
- *     It eliminates the used of this table.
- *     unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2, 0, 1, 2, 3, 0, 1, 2, 3 };
+ *  ppc_prot translates Mach's representation of protections to that of the PPC hardware.
+ *  For Virtual Machines (VMM), we also provide translation entries where the output is
+ *  the same as the input, allowing direct specification of PPC protections. Mach's 
+ *     representations are always in the range 0..7, so they always fall into the first
+ *     8 table entries; direct translations are placed in the range 8..16, so they fall into
+ *  the second half of the table.
+ *
  */
  */
+unsigned char ppc_prot[16] = { 4, 7, 6, 6, 3, 3, 2, 2,         /* Mach -> PPC translations */
+                               0, 1, 2, 3, 4, 5, 6, 7 };       /* VMM direct  translations */
+
+
+
+vm_prot_t getProtPPC(int key, boolean_t disable_NX) {
+        vm_prot_t prot;
+
+       prot = ppc_prot[key & 0xF];
+
+       if (key <= 7 && disable_NX == TRUE)
+               prot &= ~mpN;
+
+       return (prot);
+}
 
 
-#define ppc_prot(p) ((0xE4E4AFAC >> (p << 1)) & 3)
 
 /*
  *                     About PPC VSID generation:
 
 /*
  *                     About PPC VSID generation:
@@ -148,893 +172,602 @@ extern vm_offset_t mem_size;
 
 void mapping_init(void) {
 
 
 void mapping_init(void) {
 
-       unsigned int tmp;
+       unsigned int tmp, maxeff, rwidth;
+       
+       ppc_max_adrsp = maxAdrSp;                                                                       /* Set maximum address spaces */                        
+       
+       maxeff = 32;                                                                                            /* Assume 32-bit */
+       if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) maxeff = 64;      /* Is this a 64-bit machine? */
        
        
-       __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
+       rwidth = PerProcTable[0].ppe_vaddr->pf.pfMaxVAddr - maxAdrSpb;          /* Reduce address width by width of address space ID */
+       if(rwidth > maxeff) rwidth = maxeff;                                            /* If we still have more virtual than effective, clamp at effective */
+       
+       vm_max_address = 0xFFFFFFFFFFFFFFFFULL >> (64 - rwidth);                /* Get maximum effective address supported */
+       vm_max_physical = 0xFFFFFFFFFFFFFFFFULL >> (64 - PerProcTable[0].ppe_vaddr->pf.pfMaxPAddr);     /* Get maximum physical address supported */
+       
+       if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) {                         /* Are we 64 bit? */
+               tmp = 12;                                                                                               /* Size of hash space */
+       }
+       else {
+               __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
+               tmp = 32 - tmp;                                                                                 /* Size of hash space */
+       }
 
 
-       incrVSID = 1 << ((32 - tmp + 1) >> 1);                                          /* Get ceiling of sqrt of table size */
-       incrVSID |= 1 << ((32 - tmp + 1) >> 2);                                         /* Get ceiling of quadroot of table size */
+       incrVSID = 1 << ((tmp + 1) >> 1);                                                       /* Get ceiling of sqrt of table size */
+       incrVSID |= 1 << ((tmp + 1) >> 2);                                                      /* Get ceiling of quadroot of table size */
        incrVSID |= 1;                                                                                          /* Set bit and add 1 */
        incrVSID |= 1;                                                                                          /* Set bit and add 1 */
+
        return;
 
 }
 
 
 /*
        return;
 
 }
 
 
 /*
- *             mapping_remove(pmap_t pmap, vm_offset_t va);
- *                     Given a pmap and virtual address, this routine finds the mapping and removes it from
- *                     both its PTEG hash list and the physical entry list.  The mapping block will be added to
+ *             mapping_remove(pmap_t pmap, addr64_t va);
+ *                     Given a pmap and virtual address, this routine finds the mapping and unmaps it.
+ *                     The mapping block will be added to
  *                     the free list.  If the free list threshold is reached, garbage collection will happen.
  *                     the free list.  If the free list threshold is reached, garbage collection will happen.
- *                     We also kick back a return code to say whether or not we had one to remove.
- *
- *                     We have a strict ordering here:  the mapping must be removed from the PTEG hash list before
- *                     it can be removed from the physical entry list.  This allows us to get by with only the PTEG
- *                     hash lock at page fault time. The physical entry lock must be held while we remove the mapping 
- *                     from both lists. The PTEG lock is one of the lowest level locks.  No PTE fault, interruptions,
- *                     losing control, getting other locks, etc., are allowed when you hold it. You do, and you die.
- *                     It's just that simple!
- *
- *                     When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around.
- *                     However, a mapping's order on the PTEG hash chain is not.  The interrupt handler uses the PTEG
- *                     lock to control the hash cahin and may move the position of the mapping for MRU calculations.
- *
- *                     Note that mappings do not need to point to a physical entry. When they don't, it indicates 
- *                     the mapping is outside of physical memory and usually refers to a memory mapped device of
- *                     some sort.  Naturally, we can't lock what we don't have, so the phys entry lock and unlock
- *                     routines return normally, but don't do anything.
- */
-
-boolean_t mapping_remove(pmap_t pmap, vm_offset_t va) {                        /* Remove a single mapping for this VADDR 
-                                                                                                                                  Returns TRUE if a mapping was found to remove */
-
-       mapping         *mp, *mpv;
-       register blokmap *blm;
-       spl_t           s;
-       unsigned int *useadd, *useaddr;
-       int i;
-       
-       debugLog2(1, va, pmap->space);                                                          /* start mapping_remove */
-
-       s=splhigh();                                                                                            /* Don't bother me */
-       
-       mp = hw_lock_phys_vir(pmap->space, va);                                         /* Lock the physical entry for this mapping */
-
-       if(!mp) {                                                                                                       /* Did we find one? */
-               splx(s);                                                                                        /* Allow 'rupts now */
-               if(mp = (mapping *)hw_rem_blk(pmap, va, va)) {                  /* No normal pages, try to remove an odd-sized one */
-                       
-                       if((unsigned int)mp & 1) {                                                      /* Make sure we don't unmap a permanent one */
-                               blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC));            /* Get virtual address */
-                               panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
-                                       pmap, va, blm);
-                       }
-                       while ((unsigned int)mp & 2)
-                               mp = (mapping *)hw_rem_blk(pmap, va, va);
-#if 0
-                       blm = (blokmap *)hw_cpv(mp);                                            /* (TEST/DEBUG) */
-                       kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n",       /* (TEST/DEBUG) */
-                        blm, blm->start, blm->end, blm->PTEr);
-#endif
-                       mapping_free(hw_cpv(mp));                                                       /* Release it */
-                       debugLog2(2, 1, 0);                                                                     /* End mapping_remove */
-                       return TRUE;                                                                            /* Tell them we did it */
-               }
-               debugLog2(2, 0, 0);                                                                             /* end mapping_remove */
-               return FALSE;                                                                                   /* Didn't find any, return FALSE... */
-       }
-       if((unsigned int)mp&1) {                                                                        /* Did we timeout? */
-               panic("mapping_remove: timeout locking physical entry\n");      /* Yeah, scream about it! */
-               splx(s);                                                                                                /* Restore the interrupt level */
-               return FALSE;                                                                                   /* Bad hair day, return FALSE... */
-       }
-       
-       mpv = hw_cpv(mp);                                                                                       /* Get virtual address of mapping */
-#if DEBUG
-       if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
-#else
-       (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1);       /* Decrement the resident page count */
-#endif
-       useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask];       /* Point to slot to bump */
-       useaddr = (unsigned int *)((unsigned int)useadd & -4);          /* Round down to word */
-       (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1);     /* Increment the even or odd slot */
-
-#if 0
-       for(i = 0; i < (pmapUsageMask + 1); i++) {                                      /* (TEST/DEBUG) */
-               if((mpv->pmap->pmapUsage[i]) > 8192) {                                  /* (TEST/DEBUG) */
-                       panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
-                               i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
-               }
-       }
-#endif
-       
-       hw_rem_map(mp);                                                                                         /* Remove the corresponding mapping */
-       
-       if(mpv->physent)hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock physical entry associated with mapping */
-       
-       splx(s);                                                                                                        /* Was there something you needed? */
-               
-       mapping_free(mpv);                                                                                      /* Add mapping to the free list */
-       debugLog2(2, 1, 0);                                                                                     /* end mapping_remove */
-       return TRUE;                                                                                            /* Tell them we did it */
-}
-
-/*
- *             mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map
  *
  *
- *             This guy releases any mappings that exist for a physical page on a specified map.
- *             We get the lock on the phys_entry, and hold it through out this whole routine.
- *             That way, no one can change the queue out from underneath us.  We keep fetching
- *             the physents mapping anchor until it is null, then we're done.  
+ *                     We also pass back the next higher mapped address. This is done so that the higher level
+ *                     pmap_remove function can release a range of addresses simply by calling mapping_remove
+ *                     in a loop until it finishes the range or is returned a vaddr of 0.
  *
  *
- *             For each mapping, we call the remove routine to remove it from the PTEG hash list and 
- *             decriment the pmap's residency count.  Then we release the mapping back to the free list.
+ *                     Note that if the mapping is not found, we return the next VA ORed with 1
  *
  */
  *
  */
-
-void mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) {          /* Remove all mappings from specified pmap for this physent */
 
 
-       mapping         *mp, *mp_next, *mpv;
-       spl_t           s;
-       unsigned int *useadd, *useaddr, uindx;
-       int i;
-               
-       s=splhigh();                                                                    /* Don't bother me */
-       
-       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry */
-               panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
-                       pp, pp->phys_link, pp->pte1);   /* Complain about timeout */
-       }
+addr64_t mapping_remove(pmap_t pmap, addr64_t va) {            /* Remove a single mapping for this VADDR 
+                                                                                                                  Returns TRUE if a mapping was found to remove */
 
 
-       mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS);
+       mapping_t       *mp;
+       addr64_t        nextva;
+       ppnum_t         pgaddr;
        
        
-       while(mp) {     /* Keep going so long as there's another */
-
-               mpv = hw_cpv(mp);                                       /* Get the virtual address */
-               if(mpv->pmap != pmap) {
-                       mp = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
-                       continue;
-               }
-#if DEBUG
-               if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
-#else
-               (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1);       /* Decrement the resident page count */
-#endif
-
-               uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7);    /* Join seg # and top 2 bits of API */
-               useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx];  /* Point to slot to bump */
-               useaddr = (unsigned int *)((unsigned int)useadd & -4);  /* Round down to word */
-               (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Incr the even or odd slot */
-
+       va &= ~PAGE_MASK;                                                                       /* Scrub noise bits */
        
        
+       do {                                                                                            /* Keep trying until we truely fail */
+               mp = hw_rem_map(pmap, va, &nextva);                             /* Remove a mapping from this pmap */
+       } while (mapRtRemove == ((unsigned int)mp & mapRetCode));
        
        
-               mp_next = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
-               hw_rem_map(mp);                                         /* Remove the mapping */
-               mapping_free(mpv);                                      /* Add mapping to the free list */
-               mp = mp_next;
+       switch ((unsigned int)mp & mapRetCode) {
+               case mapRtOK:
+                       break;                                                                          /* Mapping removed */
+               case mapRtNotFnd:
+                       return (nextva | 1);                                            /* Nothing found to unmap */
+               default:
+                       panic("mapping_remove: hw_rem_map failed - pmap = %08X, va = %016llX, code = %08X\n",
+                               pmap, va, mp);
+                       break;
        }
        }
-               
-       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);       /* We're done, unlock the physical entry */
-       splx(s);
-       return;
-}
-/*
- *             mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list 
- *
- *             This guy releases any mappings that exist for a physical page.
- *             We get the lock on the phys_entry, and hold it through out this whole routine.
- *             That way, no one can change the queue out from underneath us.  We keep fetching
- *             the physents mapping anchor until it is null, then we're done.  
- *
- *             For each mapping, we call the remove routine to remove it from the PTEG hash list and 
- *             decriment the pmap's residency count.  Then we release the mapping back to the free list.
- *
- */
-void mapping_purge(struct phys_entry *pp) {                                            /* Remove all mappings for this physent */
 
 
-       mapping         *mp, *mpv;
-       spl_t           s;
-       unsigned int *useadd, *useaddr, uindx;
-       int i;
-               
-       s=splhigh();                                                                                            /* Don't bother me */
-       debugLog2(3, pp->pte1, 0);                                                                      /* start mapping_purge */
+       pgaddr = mp->mpPAddr;                                                           /* Get page number from mapping */
        
        
-       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {              /* Lock the physical entry */
-               panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
-                       pp, pp->phys_link, pp->pte1);   /* Complain about timeout */
-       }
+       mapping_free(mp);                                                                       /* Add mapping to the free list */
        
        
-       while(mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) {    /* Keep going so long as there's another */
-
-               mpv = hw_cpv(mp);                                                                               /* Get the virtual address */
-#if DEBUG
-               if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
-#else
-               (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1);       /* Decrement the resident page count */
-#endif
-
-               uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7);    /* Join segment number and top 2 bits of the API */
-               useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx];  /* Point to slot to bump */
-               useaddr = (unsigned int *)((unsigned int)useadd & -4);  /* Round down to word */
-               (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1);     /* Increment the even or odd slot */
-
-#if 0
-       for(i = 0; i < (pmapUsageMask + 1); i++) {                                      /* (TEST/DEBUG) */
-               if((mpv->pmap->pmapUsage[i]) > 8192) {                                  /* (TEST/DEBUG) */
-                       panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
-                               i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
+       if ((pmap->pmapFlags & pmapVMhost) && pmap->pmapVmmExt) {
+                                                                                                               /* If this is an assisted host, scrub any guest mappings */
+               unsigned int  idx;
+               phys_entry_t *physent = mapping_phys_lookup(pgaddr, &idx);
+                                                                                                               /* Get physent for our physical page */
+               if (!physent) {                                                                 /* No physent, could be in I/O area, so exit */
+                       return (nextva);
                }
                }
-       }
-#endif
-       
-       
-               hw_rem_map(mp);                                                                                 /* Remove the mapping */
-               mapping_free(mpv);                                                                              /* Add mapping to the free list */
-       }
                
                
-       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);       /* We're done, unlock the physical entry */
-       
-       debugLog2(4, pp->pte1, 0);                                                                      /* end mapping_purge */
-       splx(s);                                                                                                        /* Was there something you needed? */
-       return;                                                                                                         /* Tell them we did it */
-}
+               do {                                                                                    /* Iterate 'till all guest mappings are gone */
+                       mp = hw_scrub_guest(physent, pmap);                     /* Attempt to scrub a guest mapping */
+                       switch ((unsigned int)mp & mapRetCode) {
+                               case mapRtGuest:                                                /* Found a guest mapping */
+                               case mapRtNotFnd:                                               /* Mapping was there, but disappeared, must retry */
+                               case mapRtEmpty:                                                /* No guest mappings left to scrub */
+                                       break;
+                               default:
+                                       panic("mapping_remove: hw_scrub_guest failed - physent = %08X, code = %08X\n",
+                                               physent, mp);                                   /* Cry havoc, cry wrack,
+                                                                                                                       at least we die with harness on our backs */
+                                       break;
+                       }
+               } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
+       }
 
 
+       return nextva;                                                                          /* Tell them we did it */
+}
 
 /*
 
 /*
- *             mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one 
+ *             mapping_make(pmap, va, pa, flags, size, prot) - map a virtual address to a real one 
  *
  *             This routine takes the given parameters, builds a mapping block, and queues it into the 
  *             correct lists.
  *             
  *
  *             This routine takes the given parameters, builds a mapping block, and queues it into the 
  *             correct lists.
  *             
- *             The pp parameter can be null.  This allows us to make a mapping that is not
- *             associated with any physical page.  We may need this for certain I/O areas.
+ *             pmap (virtual address)          is the pmap to map into
+ *             va   (virtual address)          is the 64-bit virtual address that is being mapped
+ *             pa      (physical page number)  is the physical page number (i.e., physcial address >> 12). This is
+ *                                                                     a 32-bit quantity.
+ *             Flags:
+ *                     block                                   if 1, mapping is a block, size parameter is used. Note: we do not keep 
+ *                                                                     reference and change information or allow protection changes of blocks.
+ *                                                                     any changes must first unmap and then remap the area.
+ *                     use attribute                   Use specified attributes for map, not defaults for physical page
+ *                     perm                                    Mapping is permanent
+ *                     cache inhibited                 Cache inhibited (used if use attribute or block set )
+ *                     guarded                                 Guarded access (used if use attribute or block set )
+ *             size                                            size of block in pages - 1 (not used if not block)
+ *             prot                                            VM protection bits
+ *             attr                                            Cachability/Guardedness    
+ *
+ *             Returns 0 if mapping was successful.  Returns vaddr that overlaps/collides.
+ *             Returns 1 for any other failure.
+ *
+ *             Note that we make an assumption that all memory in the range 0f 0x0000000080000000 to 0x00000000FFFFFFFF is reserved
+ *             for I/O and default the cache attrubutes appropriately.  The caller is free to set whatever they want however.
+ *
+ *             If there is any physical page that is not found in the physent table, the mapping is forced to be a
+ *             block mapping of length 1.  This keeps us from trying to update a physent during later mapping use,
+ *             e.g., fault handling.
+ *
  *
  *
- *             If the phys_entry address is null, we neither lock or chain into it.
- *             If locked is 1, we already hold the lock on the phys_entry and won't get nor release it.
  */
  
  */
  
-mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked) {        /* Make an address mapping */
+addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot) {   /* Make an address mapping */
 
 
-       register mapping *mp, *mpv;
-       unsigned int *useadd, *useaddr;
-       spl_t           s;
-       int i;
+       register mapping_t *mp;
+       addr64_t colladdr, psmask;
+       unsigned int pindex, mflags, pattr, wimg, rc;
+       phys_entry_t *physent;
+       int nlists, pcf;
+       boolean_t disable_NX = FALSE;
 
 
-       debugLog2(5, va, pa);                                                                           /* start mapping_purge */
-       mpv = mapping_alloc();                                                                          /* Get a spare mapping block */
+       pindex = 0;
        
        
-       mpv->pmap = pmap;                                                                                       /* Initialize the pmap pointer */
-       mpv->physent = pp;                                                                                      /* Initialize the pointer to the physical entry */
-       mpv->PTEr = ((unsigned int)pa & ~(PAGE_SIZE - 1)) | attr<<3 | ppc_prot(prot);   /* Build the real portion of the PTE */
-       mpv->PTEv = (((unsigned int)va >> 1) & 0x78000000) | (pmap->space << 7) | (((unsigned int)va >> 22) & 0x0000003F);      /* Build the VSID */
+       mflags = 0x01000000;                                                                            /* Start building mpFlags field (busy count = 1) */
 
 
-       s=splhigh();                                                                                            /* Don't bother from now on */
+       pcf = (flags & mmFlgPcfg) >> 24;                                                        /* Get the physical page config index */
+       if(!(pPcfg[pcf].pcfFlags)) {                                                            /* Validate requested physical page configuration */
+               panic("mapping_make: invalid physical page configuration request - pmap = %08X, va = %016llX, cfg = %d\n",
+                       pmap, va, pcf);
+       }
+       
+       psmask = (1ULL << pPcfg[pcf].pcfPSize) - 1;                                     /* Mask to isolate any offset into a page */
+       if(va & psmask) {                                                                                       /* Make sure we are page aligned on virtual */
+               panic("mapping_make: attempt to map unaligned vaddr - pmap = %08X, va = %016llX, cfg = %d\n",
+                       pmap, va, pcf);
+       }
+       if(((addr64_t)pa << 12) & psmask) {                                                     /* Make sure we are page aligned on physical */
+               panic("mapping_make: attempt to map unaligned paddr - pmap = %08X, pa = %016llX, cfg = %d\n",
+                       pmap, pa, pcf);
+       }
        
        
-       mp = hw_cvp(mpv);                                                                                       /* Get the physical address of this */
+       mflags |= (pcf << (31-mpPcfgb));                                                        /* Insert physical page configuration index */
 
 
-       if(pp && !locked) {                                                                                     /* Is there a physical entry? Or do we already hold the lock? */
-               if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry */
-                       panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
-                               pp, pp->phys_link, pp->pte1);                                   /* Complain about timeout */
+       if(!(flags & mmFlgBlock)) {                                                                     /* Is this a block map? */
+
+               size = 1;                                                                                               /* Set size to 1 page if not block */
+        
+               physent = mapping_phys_lookup(pa, &pindex);                             /* Get physical entry */
+               if(!physent) {                                                                                  /* Did we find the physical page? */
+                       mflags |= mpBlock;                                                                      /* Force this to a block if no physent */
+                       pattr = 0;                                                                                      /* Assume normal, non-I/O memory */
+                       if((pa & 0xFFF80000) == 0x00080000) pattr = mmFlgCInhib | mmFlgGuarded; /* If this page is in I/O range, set I/O attributes */
                }
                }
-       }
+               else pattr = ((physent->ppLink & (ppI | ppG)) >> 60);   /* Get the default attributes from physent */
                
                
-       if(pp) {                                                                                                        /* See of there is a physcial entry */
-               mpv->next = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS);             /* Move the old anchor to the new mappings forward */
-               pp->phys_link = (mapping *)((unsigned int)mp | (unsigned int)pp->phys_link & PHYS_FLAGS);       /* Point the anchor at us.  Now we're on the list (keep the flags) */
+               if(flags & mmFlgUseAttr) pattr = flags & (mmFlgCInhib | mmFlgGuarded);  /* Use requested attributes */
        }
        }
+       else {                                                                                                          /* This is a block */
+                
+               pattr = flags & (mmFlgCInhib | mmFlgGuarded);                   /* Use requested attributes */
+               mflags |= mpBlock;                                                                              /* Show that this is a block */
        
        
-       hw_add_map(mp, pmap->space, va);                                                        /* Stick it on the PTEG hash list */
-       
-       (void)hw_atomic_add(&mpv->pmap->stats.resident_count, 1);       /* Increment the resident page count */
-       useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask];       /* Point to slot to bump */
-       useaddr = (unsigned int *)((unsigned int)useadd & -4);          /* Round down to word */
-       (void)hw_atomic_add(useaddr, (useaddr == useadd) ? 0x00010000 : 1);     /* Increment the even or odd slot */
-#if 0
-       for(i = 0; i < (pmapUsageMask + 1); i++) {                                      /* (TEST/DEBUG) */
-               if((mpv->pmap->pmapUsage[i]) > 8192) {                                  /* (TEST/DEBUG) */
-                       panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
-                               i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
+               if(size > pmapSmallBlock) {                                                             /* Is it one? */
+                       if(size & 0x00001FFF) return mapRtBadSz;                        /* Fail if bigger than 256MB and not a 32MB multiple */
+                       size = size >> 13;                                                                      /* Convert to 32MB chunks */
+                       mflags = mflags | mpBSu;                                                        /* Show 32MB basic size unit */
                }
        }
                }
        }
-#endif
-
-       if(pp && !locked)hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);      /* If we have one and we didn't hold on entry, unlock the physical entry */
-               
-       splx(s);                                                                                                        /* Ok for interruptions now */
-       debugLog2(6, pmap->space, prot);                                                        /* end mapping_purge */
-       return mpv;                                                                                                     /* Leave... */
-}
-
-
-/*
- *             Enters optimal translations for odd-sized V=F blocks.
- *
- *             Builds a block map for each power-of-two hunk o' address
- *             that exists.  This is specific to the processor type.  
- *             PPC uses BAT register size stuff.  Future PPC might have
- *             something else.
- *
- *             The supplied va is expected to be maxoptimal vs the supplied boundary. We're too
- *             stupid to know otherwise so we only look at the va anyhow, so there...
- *
- */
-void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr) {   /* Maps optimal autogenned blocks */
-
-       register blokmap *blm, *oblm;
-       unsigned int    pg;
-       unsigned int    maxsize, boundary, leading, trailing, cbsize, minsize, tomin;
-       int                             i, maxshft, nummax, minshft;
-
-#if 1
-       kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n",    /* (TEST/DEBUG) */
-        pmap, va, pa, bnd, size, prot, attr);
-#endif
        
        
-       minsize = blokValid ^ (blokValid & (blokValid - 1));    /* Set minimum subblock size */
-       maxsize = 0x80000000 >> cntlzw(blokValid);              /* Set maximum subblock size */
+       wimg = 0x2;                                                                                                     /* Set basic PPC wimg to 0b0010 - Coherent */
+       if(pattr & mmFlgCInhib) wimg |= 0x4;                                            /* Add cache inhibited if we need to */
+       if(pattr & mmFlgGuarded) wimg |= 0x1;                                           /* Add guarded if we need to */
        
        
-       minshft = 31 - cntlzw(minsize);                                 /* Shift to position minimum size */
-       maxshft = 31 - cntlzw(blokValid);                               /* Shift to position maximum size */
-       
-       leading = ((va + bnd - 1) & -bnd) - va;                 /* Get size of leading area */
-       trailing = size - leading;                                              /* Get size of trailing area */
-       tomin = ((va + minsize - 1) & -minsize) - va;   /* Get size needed to round up to the minimum block size */
-       
-#if 1
-       kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd, leading, trailing, tomin);           /* (TEST/DEBUG) */
-#endif
-
-       if(tomin)pmap_map_block(pmap, va, pa, tomin, prot, attr, 0); /* Map up to minimum block size */
+       mflags = mflags | (pindex << 16);                                                       /* Stick in the physical entry table index */
        
        
-       va = va + tomin;                                                                /* Adjust virtual start */
-       pa = pa + tomin;                                                                /* Adjust physical start */
-       leading = leading - tomin;                                              /* Adjust leading size */
+       if(flags & mmFlgPerm) mflags |= mpPerm;                                         /* Set permanent mapping */
        
        
-/*
- *     Some of this code is very classic PPC.  We need to fix this up.
- */
-       leading = leading >> minshft;                                   /* Position for bit testing */
-       cbsize = minsize;                                                               /* Set the minimum size */
+       size = size - 1;                                                                                        /* Change size to offset */
+       if(size > 0xFFFF) return mapRtBadSz;                                            /* Leave if size is too big */
        
        
-       for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, small to large */
-               
-               if(leading & 1) {               
-                       pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
-                       pa = pa + cbsize;                                               /* Bump up physical address */
-                       va = va + cbsize;                                               /* Bump up virtual address */
-               }
+       nlists = mapSetLists(pmap);                                                                     /* Set number of lists this will be on */
        
        
-               leading = leading >> 1;                                         /* Shift up to next size */
-               cbsize = cbsize << 1;                                           /* Here too */
+       mp = mapping_alloc(nlists);                                                                     /* Get a spare mapping block with this many lists */
 
 
-       }
-       
-       nummax = trailing >> maxshft;                                   /* Get number of max size blocks left */
-       for(i=0; i < nummax - 1; i++) {                                 /* Account for all max size block left but 1 */
-               pmap_map_block(pmap, va, pa, maxsize, prot, attr, 0); /* Map up to next boundary */
+                                                                /* the mapping is zero except that the mpLists field is set */
+       mp->mpFlags |= mflags;                                                                          /* Add in the rest of the flags to mpLists */
+       mp->mpSpace = pmap->space;                                                                      /* Set the address space/pmap lookup ID */
+       mp->u.mpBSize = size;                                                                           /* Set the size */
+       mp->mpPte = 0;                                                                                          /* Set the PTE invalid */
+       mp->mpPAddr = pa;                                                                                       /* Set the physical page number */
 
 
-               pa = pa + maxsize;                                                      /* Bump up physical address */
-               va = va + maxsize;                                                      /* Bump up virtual address */
-               trailing -= maxsize;                                            /* Back off what we just did */
-       }
-       
-       cbsize = maxsize;                                                               /* Start at maximum size */
-       
-       for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, high to low */
+       if ( !nx_enabled || (pmap->pmapFlags & pmapNXdisabled) )
+               disable_NX = TRUE;
+
+       mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot, disable_NX);                   /* Add the protection and attributes to the field */
+         
+       while(1) {                                                                                                      /* Keep trying... */
+               colladdr = hw_add_map(pmap, mp);                                                /* Go add the mapping to the pmap */
+               rc = colladdr & mapRetCode;                                                             /* Separate return code */
+               colladdr &= ~mapRetCode;                                                                /* Clean up collision effective address */
+               
+               switch (rc) {
+                       case mapRtOK:
+                               return mapRtOK;                                                                 /* Mapping added successfully */
+                               
+                       case mapRtRemove:                                                                       /* Remove in progress */
+                               (void)mapping_remove(pmap, colladdr);                   /* Lend a helping hand to another CPU doing block removal */
+                               continue;                                                                               /* Retry mapping add */
+                               
+                       case mapRtMapDup:                                                                       /* Identical mapping already present */
+                               mapping_free(mp);                                                               /* Free duplicate mapping */
+                               return mapRtOK;                                                                         /* Return success */
+                               
+                       case mapRtSmash:                                                                        /* Mapping already present but does not match new mapping */
+                               mapping_free(mp);                                                               /* Free duplicate mapping */
+                               return (colladdr | mapRtSmash);                                 /* Return colliding address, with some dirt added to avoid
+                                                                                                                                  confusion if effective address is 0 */
+                       default:
+                               panic("mapping_make: hw_add_map failed - collision addr = %016llX, code = %02X, pmap = %08X, va = %016llX, mapping = %08X\n",
+                                       colladdr, rc, pmap, va, mp);                            /* Die dead */
+               }
                
                
-               if(trailing & cbsize) { 
-                       trailing &= ~cbsize;                                    /* Remove the block we are allocating */                                                
-                       pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
-                       pa = pa + cbsize;                                               /* Bump up physical address */
-                       va = va + cbsize;                                               /* Bump up virtual address */
-               }       
-               cbsize = cbsize >> 1;                                           /* Next size down */
        }
        
        }
        
-       if(trailing) pmap_map_block(pmap, va, pa, trailing, prot, attr, 0); /* Map up to end */
-       
-       return;                                                                                                 /* Return */
+       return 1;                                                                                                       /* Unreachable, but pleases compiler */
 }
 
 
 /*
 }
 
 
 /*
- *             Enters translations for odd-sized V=F blocks.
+ *             mapping *mapping_find(pmap, va, *nextva, full) - Finds a mapping 
  *
  *
- *             Checks to insure that the request is at least ODDBLKMIN in size.  If smaller, the request
- *             will be split into normal-sized page mappings.
+ *             Looks up the vaddr and returns the mapping and the next mapped va
+ *             If full is true, it will descend through all nested pmaps to find actual mapping
  *
  *
- *             The higher level VM map should be locked to insure that we don't have a
- *             double diddle here.
+ *             Must be called with interruptions disabled or we can hang trying to remove found mapping.
  *
  *
- *             We panic if we get a block that overlaps with another. We do not merge adjacent
- *             blocks because removing any address within a block removes the entire block and if
- *             would really mess things up if we trashed too much.
+ *             Returns 0 if not found and the virtual address of the mapping if it is
+ *             Note that the mappings busy count is bumped. It is the responsibility of the caller
+ *             to drop the count.  If this is not done, any attempt to remove the mapping will hang.
  *
  *
- *             Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
- *             not be changed.  The block must be unmapped and then remapped with the new stuff.
- *             We also do not keep track of reference or change flags.
+ *             NOTE: The nextva field is not valid when full is TRUE.
  *
  *
- *             Blocks are kept in MRU order anchored from the pmap. The chain is traversed only
- *             with interruptions and translation disabled and under the control of the lock located
- *             in the first block map. MRU is used because it is expected that the same entry 
- *             will be accessed repeatedly while PTEs are being generated to cover those addresses.
  *
  */
  
  *
  */
  
-void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) {       /* Map an autogenned block */
-
-       register blokmap *blm, *oblm, *oblm_virt;;
-       unsigned int pg;
-
-#if 0
-       kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n",       /* (TEST/DEBUG) */
-        pmap, va, pa, size, prot, attr);
-#endif
-
-       if(size < ODDBLKMIN) {                                                                  /* Is this below the minimum size? */
-               for(pg = 0; pg < size; pg += PAGE_SIZE) {                       /* Add all pages in this block */
-                       mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
-#if 0
-                       kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n",       /* (TEST/DEBUG) */
-                               va + pg, pa + pg);
-#endif
-               }
-               return;                                                                                         /* All done */
-       }
-       
-       blm = (blokmap *)mapping_alloc();                                               /* Get a block mapping */
-       
-       blm->start = (unsigned int)va & -PAGE_SIZE;                             /* Get virtual block start */
-       blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1);   /* Get virtual block end */
-       blm->current = 0;
-       blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
-       blm->space = pmap->space;                                                               /* Set the space (only needed for remove) */
-       blm->blkFlags = flags;                                                                  /* Set the block's flags */
-       
-#if 0
-       kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n",   /* (TEST/DEBUG) */
-        blm, blm->start, blm->end, blm->PTEr);
-#endif
-
-       blm = (blokmap *)hw_cvp((mapping *)blm);                                /* Get the physical address of this */
-
-#if 0
-       kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n",   /* (TEST/DEBUG) */
-        blm, pmap->bmaps);
-#endif
-
-       do {
-               oblm = hw_add_blk(pmap, blm); 
-               if ((unsigned int)oblm & 2) {
-                       oblm_virt = (blokmap *)hw_cpv((mapping *)((unsigned int)oblm & 0xFFFFFFFC));
-                       mapping_remove(pmap, oblm_virt->start);
-               };
-       } while ((unsigned int)oblm & 2);
-
-       if (oblm) {
-               oblm = (blokmap *)hw_cpv((mapping *) oblm);                             /* Get the old block virtual address */
-               blm = (blokmap *)hw_cpv((mapping *)blm);                                /* Back to the virtual address of this */
-               if((oblm->start != blm->start) ||                                       /* If we have a match, then this is a fault race and */
-                               (oblm->end != blm->end) ||                              /* is acceptable */
-                               (oblm->PTEr != blm->PTEr))
-                       panic("pmap_map_block: block map overlap - blm = %08X\n", oblm);/* Otherwise, Squeak loudly and carry a big stick */
-               mapping_free((struct mapping *)blm);
-       }
-
-#if 0
-       kprintf("pmap_map_block: pmap->bmaps=%08X\n",                   /* (TEST/DEBUG) */
-        blm, pmap->bmaps);
-#endif
-
-       return;                                                                                                 /* Return */
-}
-
+mapping_t *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full) {        /* Make an address mapping */
 
 
-/*
- *             Optimally enters translations for odd-sized V=F blocks.
- *
- *             Checks to insure that the request is at least ODDBLKMIN in size.  If smaller, the request
- *             will be split into normal-sized page mappings.
- *
- *             This one is different than pmap_map_block in that it will allocate it's own virtual
- *             target address. Rather than allocating a single block,
- *             it will also allocate multiple blocks that are power-of-two aligned/sized.  This allows
- *             hardware-level mapping that takes advantage of BAT maps or large page sizes.
- *
- *             Most considerations for pmap_map_block apply.
- *
- *
- */
-kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va, 
-       vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {     /* Map an optimal autogenned block */
+       register mapping_t *mp;
+       addr64_t        curva;
+       pmap_t  curpmap;
+       int     nestdepth;
 
 
-       register blokmap *blm, *oblm;
-       unsigned int    pg;
-    kern_return_t      err;
-       unsigned int    bnd;
+       curpmap = pmap;                                                                                         /* Remember entry */
+       nestdepth = 0;                                                                                          /* Set nest depth */
+       curva = (addr64_t)va;                                                                           /* Set current va */
 
 
-#if 1
-       kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n",     /* (TEST/DEBUG) */
-               map, pa, size, prot, attr);
-#endif
+       while(1) {
 
 
-       if(size < ODDBLKMIN) {                                                                  /* Is this below the minimum size? */
-               err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE);    /* Make us some memories */
-               if(err) {
-#if DEBUG
-                       kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err);        /* Say we died */
-#endif
-                       return(err);                                                                    /* Pass back the error */
+               mp = hw_find_map(curpmap, curva, nextva);                               /* Find the mapping for this address */
+               if((unsigned int)mp == mapRtBadLk) {                                    /* Did we lock up ok? */
+                       panic("mapping_find: pmap lock failure - rc = %08X, pmap = %08X\n", mp, curpmap);       /* Die... */
                }
                }
-#if 1
-               kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va);      /* (TEST/DEBUG) */
-#endif
+               
+               if(!mp || ((mp->mpFlags & mpType) < mpMinSpecial) || !full) break;              /* Are we done looking? */
 
 
-               for(pg = 0; pg < size; pg += PAGE_SIZE) {                       /* Add all pages in this block */
-                       mapping_make(map->pmap, 0, *va + pg, pa + pg, prot, attr, 0);   /* Map this page on in */
+               if((mp->mpFlags & mpType) != mpNest) {                                  /* Don't chain through anything other than a nested pmap */
+                       mapping_drop_busy(mp);                                                          /* We have everything we need from the mapping */
+                       mp = 0;                                                                                         /* Set not found */
+                       break;
                }
                }
-               return(KERN_SUCCESS);                                                           /* All done */
-       }
-       
-       err = vm_map_block(map, va, &bnd, pa, size, prot);              /* Go get an optimal allocation */
 
 
-       if(err == KERN_INVALID_ADDRESS) {                                               /* Can we try a brute force block mapping? */
-               err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE);    /* Make us some memories */
-               if(err) {
-#if DEBUG
-                       kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err);    /* Say we died */
-#endif
-                       return(err);                                                                    /* Pass back the error */
+               if(nestdepth++ > 64) {                                                                  /* Have we nested too far down? */
+                       panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %08X, curpmap = %08X\n",
+                               va, curva, pmap, curpmap);
                }
                }
-#if 1
-               kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va);       /* (TEST/DEBUG) */
-#endif
-               pmap_map_block(map->pmap, *va, pa, size, prot, attr, 0);        /* Set up a block mapped area */
-               return KERN_SUCCESS;                                                            /* All done now */
-       }
-
-       if(err != KERN_SUCCESS) {                                                               /* We couldn't get any address range to map this... */
-#if DEBUG
-               kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err);        /* Say we couldn' do it */
-#endif
-               return(err);
+               
+               curva = curva + mp->mpNestReloc;                                                /* Relocate va to new pmap */
+               curpmap = (pmap_t) pmapTrans[mp->mpSpace].pmapVAddr;    /* Get the address of the nested pmap */
+               mapping_drop_busy(mp);                                                                  /* We have everything we need from the mapping */
+               
        }
 
        }
 
-#if 1
-       kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va, bnd);      /* (TEST/DEBUG) */
-#endif
-       mapping_block_map_opt(map->pmap, *va, pa, bnd, size, prot, attr);       /* Go build the maps */
-       return(KERN_SUCCESS);                                                                   /* All done */
+       return mp;                                                                                                      /* Return the mapping if we found one */
 }
 
 }
 
-
-#if 0
-
 /*
 /*
- *             Enters translations for odd-sized V=F blocks and merges adjacent or overlapping
- *             areas.
+ *             void mapping_protect(pmap_t pmap, addt_t va, vm_prot_t prot, addr64_t *nextva) - change the protection of a virtual page
  *
  *
- *             Once blocks are merged, they act like one block, i.e., if you remove it,
- *             it all goes...
- *
- *             This can only be used during boot.  Ain't no way we can handle SMP
- *             or preemption easily, so we restrict it.  We don't check either. We
- *             assume only skilled professional programmers will attempt using this
- *             function. We assume no responsibility, either real or imagined, for
- *             injury or death resulting from unauthorized use of this function.
+ *             This routine takes a pmap and virtual address and changes
+ *             the protection.  If there are PTEs associated with the mappings, they will be invalidated before
+ *             the protection is changed. 
  *
  *
- *             No user servicable parts inside. Notice to be removed by end-user only,
- *             under penalty of applicable federal and state laws.
+ *             We return success if we change the protection or if there is no page mapped at va.  We return failure if
+ *             the va corresponds to a block mapped area or the mapping is permanant.
  *
  *
- *             See descriptions of pmap_map_block. Ignore the part where we say we panic for
- *             overlapping areas.  Note that we do panic if we can't merge.
  *
  */
  *
  */
-void pmap_map_block_merge(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {     /* Map an autogenned block */
 
 
-       register blokmap *blm, *oblm;
-       unsigned int pg;
-       spl_t           s;
-
-#if 1
-       kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
-        pmap, va, pa, size, prot, attr);
-#endif
+void
+mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva) {  /* Change protection of a virtual page */
 
 
-       s=splhigh();                                                                                            /* Don't bother from now on */
-       if(size < ODDBLKMIN) {                                                                          /* Is this below the minimum size? */
-               for(pg = 0; pg < size; pg += PAGE_SIZE) {                               /* Add all pages in this block */
-                       mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
-               }
-               return;                                                                                                 /* All done */
-       }
-       
-       blm = (blokmap *)mapping_alloc();                                                       /* Get a block mapping */
-       
-       blm->start = (unsigned int)va & -PAGE_SIZE;                                     /* Get virtual block start */
-       blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1);           /* Get virtual block end */
-       blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
-       
-#if 1
-       kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n",     /* (TEST/DEBUG) */
-        blm, blm->start, blm->end, blm->PTEr);
-#endif
+       int     ret;
+       boolean_t disable_NX = FALSE;
 
 
-       blm = (blokmap *)hw_cvp((mapping *)blm);                                        /* Get the physical address of this */
+       if ( !nx_enabled || (pmap->pmapFlags & pmapNXdisabled) )
+               disable_NX = TRUE;
 
 
-#if 1
-       kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n",     /* (TEST/DEBUG) */
-        blm, pmap->bmaps);
-#endif
+       ret = hw_protect(pmap, va, getProtPPC(prot, disable_NX), nextva);               /* Try to change the protect here */
 
 
-       if(oblm = hw_add_blk(pmap, blm)) {                                                      /* Add to list and make sure we don't overlap anything */
-               panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm);  /* Squeak loudly and carry a big stick */
+       switch (ret) {                                                          /* Decode return code */
+       
+               case mapRtOK:                                                   /* Changed */
+               case mapRtNotFnd:                                               /* Didn't find it */
+               case mapRtBlock:                                                /* Block map, just ignore request */
+               case mapRtNest:                                                 /* Nested pmap, just ignore request */
+                       break;
+                       
+               default:
+                       panic("mapping_protect: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, va);
+               
        }
 
        }
 
-#if 1
-       kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n",                     /* (TEST/DEBUG) */
-        blm, pmap->bmaps);
-#endif
-       splx(s);                                                                                                        /* Ok for interruptions now */
-
-       return;                                                                                                         /* Return */
 }
 }
-#endif
 
 /*
 
 /*
- *             void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page
+ *             void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) - change the protection of a physical page
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and changes
  *             the protection.  If there are PTEs associated with the mappings, they will be invalidated before
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and changes
  *             the protection.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the protection is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
- *             either (I don't think, maybe I'll change my mind later).  There is no limitation on changes, e.g., 
- *             higher to lower, lower to higher.
+ *             the protection is changed.  There is no limitation on changes, e.g., higher to lower, lower to
+ *             higher; however, changes to execute protection are ignored.
+ *
+ *             Any mapping that is marked permanent is not changed
  *
  *             Phys_entry is unlocked.
  */
 
  *
  *             Phys_entry is unlocked.
  */
 
-void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked) {   /* Change protection of all mappings to page */
-
-       spl_t                           spl;
-       
-       debugLog2(9, pp->pte1, prot);                                                           /* end remap */
-       spl=splhigh();                                                                                          /* No interruptions during this */
-       if(!locked) {                                                                                           /* Do we need to lock the physent? */
-               if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry */
-                       panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
-                               pp, pp->phys_link, pp->pte1);                                           /* Complain about timeout */
-               }
-       }       
-
-       hw_prot(pp, ppc_prot(prot));                                                            /* Go set the protection on this physical page */
-
-       if(!locked) hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);   /* We're done, unlock the physical entry */
-       splx(spl);                                                                                                      /* Restore interrupt state */
-       debugLog2(10, pp->pte1, 0);                                                                     /* end remap */
+void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) {        /* Change protection of all mappings to page */
        
        
-       return;                                                                                                         /* Leave... */
-}
-
-/*
- *             void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page
- *
- *             This routine takes a pmap and virtual address and changes
- *             the protection.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the protection is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
- *             either (I don't think, maybe I'll change my mind later).  There is no limitation on changes, e.g., 
- *             higher to lower, lower to higher.
- *
- */
-
-void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) { /* Change protection of a virtual page */
+       unsigned int pindex;
+       phys_entry_t *physent;
 
 
-       mapping         *mp, *mpv;
-       spl_t           s;
-
-       debugLog2(9, vaddr, pmap);                                      /* start mapping_protect */
-       s = splhigh();                                                          /* Don't bother me */
-               
-       mp = hw_lock_phys_vir(pmap->space, vaddr);      /* Lock the physical entry for this mapping */
-
-       if(!mp) {                                                                       /* Did we find one? */
-               splx(s);                                                                /* Restore the interrupt level */
-               debugLog2(10, 0, 0);                                            /* end mapping_pmap */
-               return;                                                                 /* Didn't find any... */
-       }
-       if((unsigned int)mp & 1) {                                      /* Did we timeout? */
-               panic("mapping_protect: timeout locking physical entry\n");     /* Yeah, scream about it! */
-               splx(s);                                                                /* Restore the interrupt level */
-               return;                                                                 /* Bad hair day... */
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if(!physent) {                                                                                          /* Did we find the physical page? */
+               panic("mapping_protect_phys: invalid physical page %08X\n", pa);
        }
        }
-               
-       hw_prot_virt(mp, ppc_prot(prot));                       /* Go set the protection on this virtual mapping */
 
 
-       mpv = hw_cpv(mp);                                                       /* Get virtual address of mapping */
-       if(mpv->physent) {                                                      /* If there is a physical page, */
-               hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock the physical entry */
-       }
-       splx(s);                                                                        /* Restore interrupt state */
-       debugLog2(10, mpv->PTEr, 0);                            /* end remap */
-       
-       return;                                                                         /* Leave... */
+       hw_walk_phys(physent, hwpNoop, hwpSPrtMap, hwpNoop,
+                    getProtPPC(prot, FALSE), hwpPurgePTE);                     /* Set the new protection for page and mappings */
+
+       return;                                                                 /* Leave... */
 }
 
 }
 
+
 /*
 /*
- *             mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes
+ *             void mapping_clr_mod(ppnum_t pa) - clears the change bit of a physical page
  *
  *
- *             This routine takes a physical entry and sets the physical attributes.  There can be no mappings
- *             associated with this page when we do it.
+ *             This routine takes a physical entry and runs through all mappings attached to it and turns
+ *             off the change bit. 
  */
 
  */
 
-void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) {     /* Sets the default physical page attributes */
+void mapping_clr_mod(ppnum_t pa) {                                                             /* Clears the change bit of a physical page */
 
 
-       debugLog2(11, pp->pte1, prot);                                                          /* end remap */
-
-       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry */
-               panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
-                       pp, pp->phys_link, pp->pte1);                                           /* Complain about timeout */
+       unsigned int pindex;
+       phys_entry_t *physent;
+       
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if(!physent) {                                                                                          /* Did we find the physical page? */
+               panic("mapping_clr_mod: invalid physical page %08X\n", pa);
        }
 
        }
 
-       hw_phys_attr(pp, ppc_prot(prot), wimg);                                         /* Go set the default WIMG and protection */
-
-       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);       /* We're done, unlock the physical entry */
-       debugLog2(12, pp->pte1, wimg);                                                          /* end remap */
-       
+       hw_walk_phys(physent, hwpNoop, hwpCCngMap, hwpCCngPhy,
+                                0, hwpPurgePTE);                                                               /* Clear change for page and mappings */
        return;                                                                                                         /* Leave... */
 }
 
        return;                                                                                                         /* Leave... */
 }
 
+
 /*
 /*
- *             void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page
+ *             void mapping_set_mod(ppnum_t pa) - set the change bit of a physical page
  *
  *
- *             This routine takes a physical entry and runs through all mappings attached to it and invalidates
- *             any PTEs it finds.
- *
- *             Interruptions must be disabled and the physical entry locked at entry.
+ *             This routine takes a physical entry and runs through all mappings attached to it and turns
+ *             on the change bit.  
  */
 
  */
 
-void mapping_invall(struct phys_entry *pp) {                                   /* Clear all PTEs pointing to a physical page */
+void mapping_set_mod(ppnum_t pa) {                                                             /* Sets the change bit of a physical page */
 
 
-       hw_inv_all(pp);                                                                                         /* Go set the change bit of a physical page */
+       unsigned int pindex;
+       phys_entry_t *physent;
        
        
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if(!physent) {                                                                                          /* Did we find the physical page? */
+               panic("mapping_set_mod: invalid physical page %08X\n", pa);
+       }
+
+       hw_walk_phys(physent, hwpNoop, hwpSCngMap, hwpSCngPhy,
+                                0, hwpNoopPTE);                                                                /* Set change for page and mappings */
        return;                                                                                                         /* Leave... */
 }
 
 
 /*
        return;                                                                                                         /* Leave... */
 }
 
 
 /*
- *             void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page
+ *             void mapping_clr_ref(ppnum_t pa) - clears the reference bit of a physical page
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and turns
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and turns
- *             off the change bit.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the change bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
- *             either (I don't think, maybe I'll change my mind later).
- *
- *             Interruptions must be disabled and the physical entry locked at entry.
+ *             off the reference bit.  
  */
 
  */
 
-void mapping_clr_mod(struct phys_entry *pp) {                                  /* Clears the change bit of a physical page */
+void mapping_clr_ref(ppnum_t pa) {                                                             /* Clears the reference bit of a physical page */
+
+       unsigned int pindex;
+       phys_entry_t *physent;
+       
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if(!physent) {                                                                                          /* Did we find the physical page? */
+               panic("mapping_clr_ref: invalid physical page %08X\n", pa);
+       }
 
 
-       hw_clr_mod(pp);                                                                                         /* Go clear the change bit of a physical page */
+       hw_walk_phys(physent, hwpNoop, hwpCRefMap, hwpCRefPhy,
+                                0, hwpPurgePTE);                                                               /* Clear reference for page and mappings */
        return;                                                                                                         /* Leave... */
 }
 
 
 /*
        return;                                                                                                         /* Leave... */
 }
 
 
 /*
- *             void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page
+ *             void mapping_set_ref(ppnum_t pa) - set the reference bit of a physical page
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and turns
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and turns
- *             on the change bit.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the change bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
- *             either (I don't think, maybe I'll change my mind later).
- *
- *             Interruptions must be disabled and the physical entry locked at entry.
+ *             on the reference bit. 
  */
 
  */
 
-void mapping_set_mod(struct phys_entry *pp) {                                  /* Sets the change bit of a physical page */
+void mapping_set_ref(ppnum_t pa) {                                                             /* Sets the reference bit of a physical page */
+
+       unsigned int pindex;
+       phys_entry_t *physent;
+       
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if(!physent) {                                                                                          /* Did we find the physical page? */
+               panic("mapping_set_ref: invalid physical page %08X\n", pa);
+       }
 
 
-       hw_set_mod(pp);                                                                                         /* Go set the change bit of a physical page */
+       hw_walk_phys(physent, hwpNoop, hwpSRefMap, hwpSRefPhy,
+                                0, hwpNoopPTE);                                                                /* Set reference for page and mappings */
        return;                                                                                                         /* Leave... */
 }
 
 
 /*
        return;                                                                                                         /* Leave... */
 }
 
 
 /*
- *             void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page
+ *             boolean_t mapping_tst_mod(ppnum_t pa) - test the change bit of a physical page
  *
  *
- *             This routine takes a physical entry and runs through all mappings attached to it and turns
- *             off the reference bit.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the reference bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
- *             either (I don't think, maybe I'll change my mind later).
- *
- *             Interruptions must be disabled at entry.
+ *             This routine takes a physical entry and runs through all mappings attached to it and tests
+ *             the changed bit. 
  */
 
  */
 
-void mapping_clr_ref(struct phys_entry *pp) {                                  /* Clears the reference bit of a physical page */
-
-       mapping *mp;
+boolean_t mapping_tst_mod(ppnum_t pa) {                                                        /* Tests the change bit of a physical page */
 
 
-       debugLog2(13, pp->pte1, 0);                                                                     /* end remap */
-       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry for this mapping */
-               panic("Lock timeout getting lock on physical entry\n"); /* Just die... */
+       unsigned int pindex, rc;
+       phys_entry_t *physent;
+       
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if(!physent) {                                                                                          /* Did we find the physical page? */
+               panic("mapping_tst_mod: invalid physical page %08X\n", pa);
        }
        }
-       hw_clr_ref(pp);                                                                                         /* Go clear the reference bit of a physical page */
-       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);       /* Unlock physical entry */
-       debugLog2(14, pp->pte1, 0);                                                                     /* end remap */
-       return;                                                                                                         /* Leave... */
+
+       rc = hw_walk_phys(physent, hwpTCngPhy, hwpTCngMap, hwpNoop,
+                                         0, hwpMergePTE);                                                      /* Set change for page and mappings */
+       return ((rc & (unsigned long)ppC) != 0);                                        /* Leave with change bit */
 }
 
 
 /*
 }
 
 
 /*
- *             void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page
- *
- *             This routine takes a physical entry and runs through all mappings attached to it and turns
- *             on the reference bit.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the reference bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
- *             either (I don't think, maybe I'll change my mind later).
+ *             boolean_t mapping_tst_ref(ppnum_t pa) - tests the reference bit of a physical page
  *
  *
- *             Interruptions must be disabled and the physical entry locked at entry.
+ *             This routine takes a physical entry and runs through all mappings attached to it and tests
+ *             the reference bit. 
  */
 
  */
 
-void mapping_set_ref(struct phys_entry *pp) {                                  /* Sets the reference bit of a physical page */
+boolean_t mapping_tst_ref(ppnum_t pa) {                                                        /* Tests the reference bit of a physical page */
 
 
-       hw_set_ref(pp);                                                                                         /* Go set the reference bit of a physical page */
-       return;                                                                                                         /* Leave... */
+       unsigned int pindex, rc;
+       phys_entry_t *physent;
+       
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if(!physent) {                                                                                          /* Did we find the physical page? */
+               panic("mapping_tst_ref: invalid physical page %08X\n", pa);
+       }
+
+       rc = hw_walk_phys(physent, hwpTRefPhy, hwpTRefMap, hwpNoop,
+                         0, hwpMergePTE);                                                      /* Test reference for page and mappings */
+       return ((rc & (unsigned long)ppR) != 0);                                        /* Leave with reference bit */
 }
 
 
 /*
 }
 
 
 /*
- *             void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page
+ *             unsigned int mapping_tst_refmod(ppnum_t pa) - tests the reference and change bits of a physical page
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and tests
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and tests
- *             the changed bit.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the changed bit is tested.  We don't try to save the PTE.  We won't worry about the LRU calculations
- *             either (I don't think, maybe I'll change my mind later).
- *
- *             Interruptions must be disabled and the physical entry locked at entry.
+ *             their reference and changed bits. 
  */
 
  */
 
-boolean_t mapping_tst_mod(struct phys_entry *pp) {                             /* Tests the change bit of a physical page */
+unsigned int mapping_tst_refmod(ppnum_t pa) {                                  /* Tests the reference and change bits of a physical page */
+       
+       unsigned int  pindex, rc;
+       phys_entry_t *physent;
+       
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if (!physent) {                                                                                         /* Did we find the physical page? */
+               panic("mapping_tst_refmod: invalid physical page %08X\n", pa);
+       }
 
 
-       return(hw_tst_mod(pp));                                                                         /* Go test the change bit of a physical page */
+       rc = hw_walk_phys(physent, hwpTRefCngPhy, hwpTRefCngMap, hwpNoop,
+                                         0, hwpMergePTE);                                                      /* Test reference and change bits in page and mappings */
+       return (((rc & ppC)? VM_MEM_MODIFIED : 0) | ((rc & ppR)? VM_MEM_REFERENCED : 0));
+                                                                                                                               /* Convert bits to generic format and return */
+       
 }
 
 
 /*
 }
 
 
 /*
- *             void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page
- *
- *             This routine takes a physical entry and runs through all mappings attached to it and tests
- *             the reference bit.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the reference bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
- *             either (I don't think, maybe I'll change my mind later).
+ *             void mapping_clr_refmod(ppnum_t pa, unsigned int mask) - clears the reference and change bits specified
+ *        by mask of a physical page
  *
  *
- *             Interruptions must be disabled and the physical entry locked at entry.
+ *             This routine takes a physical entry and runs through all mappings attached to it and turns
+ *             off all the reference and change bits.  
  */
 
  */
 
-boolean_t mapping_tst_ref(struct phys_entry *pp) {                             /* Tests the reference bit of a physical page */
+void mapping_clr_refmod(ppnum_t pa, unsigned int mask) {               /* Clears the reference and change bits of a physical page */
+
+       unsigned int  pindex;
+       phys_entry_t *physent;
+       unsigned int  ppcMask;
+       
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if(!physent) {                                                                                          /* Did we find the physical page? */
+               panic("mapping_clr_refmod: invalid physical page %08X\n", pa);
+       }
 
 
-       return(hw_tst_ref(pp));                                                                         /* Go test the reference bit of a physical page */
+       ppcMask = (((mask & VM_MEM_MODIFIED)? ppC : 0) | ((mask & VM_MEM_REFERENCED)? ppR : 0));
+                                                                                                                               /* Convert mask bits to PPC-specific format */
+       hw_walk_phys(physent, hwpNoop, hwpCRefCngMap, hwpCRefCngPhy,
+                    ppcMask, hwpPurgePTE);                                                     /* Clear reference and change bits for page and mappings */
+       return;                                                                                                         /* Leave... */
 }
 
 
 }
 
 
+
 /*
 /*
- *             void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent
+ *             phys_ent  *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) - tests the reference bit of a physical page
  *
  *
- *             Currently, this sets the default word 1 of the PTE.  The only bits set are the WIMG bits
+ *             This routine takes a physical page number and returns the phys_entry associated with it.  It also
+ *             calculates the bank address associated with the entry
+ *             the reference bit. 
  */
 
  */
 
-void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg) {            /* Initializes hw specific storage attributes */
-
-       pp->pte1 = (pa & -PAGE_SIZE) | ((wimg << 3) & 0x00000078);      /* Set the WIMG and phys addr in the default PTE1 */
+phys_entry_t *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) {  /* Finds the physical entry for the page */
 
 
-       return;                                                                                                         /* Leave... */
+       int i;
+       
+       for(i = 0; i < pmap_mem_regions_count; i++) {                           /* Walk through the list */
+               if(!(unsigned int)pmap_mem_regions[i].mrPhysTab) continue;      /* Skip any empty lists */
+               if((pp < pmap_mem_regions[i].mrStart) || (pp > pmap_mem_regions[i].mrEnd)) continue;    /* This isn't ours */
+               
+               *pindex = (i * sizeof(mem_region_t)) / 4;                               /* Make the word index to this list */
+               
+               return &pmap_mem_regions[i].mrPhysTab[pp - pmap_mem_regions[i].mrStart];        /* Return the physent pointer */
+       }
+       
+       return (phys_entry_t *)0;                                                                               /* Shucks, can't find it... */
+       
 }
 
 
 }
 
 
+
+
 /*
  *             mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones 
  *
 /*
  *             mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones 
  *
@@ -1043,8 +776,8 @@ void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg
  *             The list will be replenshed from mapCtl.mapcrel if there are enough.  Otherwise,
  *             a new one is allocated.
  *
  *             The list will be replenshed from mapCtl.mapcrel if there are enough.  Otherwise,
  *             a new one is allocated.
  *
- *             This routine allocates and/or memory and must be called from a safe place. 
- *             Currently, vm_pageout_scan is the safest place. We insure that the 
+ *             This routine allocates and/or frees memory and must be called from a safe place. 
+ *             Currently, vm_pageout_scan is the safest place. 
  */
 
 thread_call_t                          mapping_adjust_call;
  */
 
 thread_call_t                          mapping_adjust_call;
@@ -1052,14 +785,13 @@ static thread_call_data_t        mapping_adjust_call_data;
 
 void mapping_adjust(void) {                                                                            /* Adjust free mappings */
 
 
 void mapping_adjust(void) {                                                                            /* Adjust free mappings */
 
-       kern_return_t   retr;
-       mappingblok     *mb, *mbn;
+       kern_return_t   retr = KERN_SUCCESS;
+       mappingblok_t   *mb, *mbn;
        spl_t                   s;
        spl_t                   s;
-       int                             allocsize, i;
-       extern int vm_page_free_count;
+       int                             allocsize;
 
        if(mapCtl.mapcmin <= MAPPERBLOK) {
 
        if(mapCtl.mapcmin <= MAPPERBLOK) {
-               mapCtl.mapcmin = (mem_size / PAGE_SIZE) / 16;
+               mapCtl.mapcmin = (sane_size / PAGE_SIZE) / 16;
 
 #if DEBUG
                kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
 
 #if DEBUG
                kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
@@ -1090,10 +822,10 @@ void mapping_adjust(void) {                                                                              /* Adjust free mappings */
                        mapCtl.mapcreln--;                                                                      /* Back off the count */
                        allocsize = MAPPERBLOK;                                                         /* Show we allocated one block */                       
                }
                        mapCtl.mapcreln--;                                                                      /* Back off the count */
                        allocsize = MAPPERBLOK;                                                         /* Show we allocated one block */                       
                }
-               else {                                                                                                  /* No free ones, try to get it */
+        else {                                                                                                 /* No free ones, try to get it */
                        
                        allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK;  /* Get the number of pages we need */
                        
                        allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK;  /* Get the number of pages we need */
-
+                       
                        hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);            /* Unlock our stuff */
                        splx(s);                                                                                        /* Restore 'rupts */
 
                        hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);            /* Unlock our stuff */
                        splx(s);                                                                                        /* Restore 'rupts */
 
@@ -1104,18 +836,21 @@ void mapping_adjust(void) {                                                                              /* Adjust free mappings */
                                }
                                if(retr == KERN_SUCCESS) break;                                 /* We got some memory, bail out... */
                        }
                                }
                                if(retr == KERN_SUCCESS) break;                                 /* We got some memory, bail out... */
                        }
+               
                        allocsize = allocsize * MAPPERBLOK;                                     /* Convert pages to number of maps allocated */
                        s = splhigh();                                                                          /* Don't bother from now on */
                        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
                                panic("mapping_adjust - timeout getting control lock (2)\n");   /* Tell all and die */
                        }
                }
                        allocsize = allocsize * MAPPERBLOK;                                     /* Convert pages to number of maps allocated */
                        s = splhigh();                                                                          /* Don't bother from now on */
                        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
                                panic("mapping_adjust - timeout getting control lock (2)\n");   /* Tell all and die */
                        }
                }
+
                if (retr != KERN_SUCCESS)
                        break;                                                                                          /* Fail to alocate, bail out... */
                for(; allocsize > 0; allocsize -= MAPPERBLOK) {                 /* Release one block at a time */
                        mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
                if (retr != KERN_SUCCESS)
                        break;                                                                                          /* Fail to alocate, bail out... */
                for(; allocsize > 0; allocsize -= MAPPERBLOK) {                 /* Release one block at a time */
                        mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
-                       mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE);   /* Point to the next slot */
+                       mbn = (mappingblok_t *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
                }
                }
+
                if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
                        mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
        }
                if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
                        mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
        }
@@ -1136,11 +871,13 @@ void mapping_adjust(void) {                                                                              /* Adjust free mappings */
 
        while((unsigned int)mbn) {                                                                      /* Toss 'em all */
                mb = mbn->nextblok;                                                                             /* Get the next */
 
        while((unsigned int)mbn) {                                                                      /* Toss 'em all */
                mb = mbn->nextblok;                                                                             /* Get the next */
+               
                kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE);   /* Release this mapping block */
                kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE);   /* Release this mapping block */
+       
                mbn = mb;                                                                                               /* Chain to the next */
        }
 
                mbn = mb;                                                                                               /* Chain to the next */
        }
 
-       __asm__ volatile("sync");                                                                       /* Make sure all is well */
+       __asm__ volatile("eieio");                                                                      /* Make sure all is well */
        mapCtl.mapcrecurse = 0;                                                                         /* We are done now */
        return;
 }
        mapCtl.mapcrecurse = 0;                                                                         /* We are done now */
        return;
 }
@@ -1157,20 +894,55 @@ void mapping_adjust(void) {                                                                              /* Adjust free mappings */
 
 void mapping_free(struct mapping *mp) {                                                        /* Release a mapping */
 
 
 void mapping_free(struct mapping *mp) {                                                        /* Release a mapping */
 
-       mappingblok     *mb, *mbn;
+       mappingblok_t   *mb, *mbn;
        spl_t                   s;
        spl_t                   s;
-       unsigned int    full, mindx;
+       unsigned int    full, mindx, lists;
 
 
-       mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 5;                      /* Get index to mapping */
-       mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE);            /* Point to the mapping block */
+       mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 6;                      /* Get index to mapping */
+       mb = (mappingblok_t *)((unsigned int)mp & -PAGE_SIZE);          /* Point to the mapping block */
+    lists = (mp->mpFlags & mpLists);                                                   /* get #lists */
+    if ((lists == 0) || (lists > kSkipListMaxLists))                   /* panic if out of range */
+        panic("mapping_free: mpLists invalid\n");
+
+#if 0
+       mp->mpFlags = 0x99999999;                                                                       /* (BRINGUP) */ 
+       mp->mpSpace = 0x9999;                                                                           /* (BRINGUP) */ 
+       mp->u.mpBSize = 0x9999;                                                                         /* (BRINGUP) */ 
+       mp->mpPte   = 0x99999998;                                                                       /* (BRINGUP) */ 
+       mp->mpPAddr = 0x99999999;                                                                       /* (BRINGUP) */ 
+       mp->mpVAddr = 0x9999999999999999ULL;                                            /* (BRINGUP) */ 
+       mp->mpAlias = 0x9999999999999999ULL;                                            /* (BRINGUP) */ 
+       mp->mpList0 = 0x9999999999999999ULL;                                            /* (BRINGUP) */ 
+       mp->mpList[0] = 0x9999999999999999ULL;                                          /* (BRINGUP) */ 
+       mp->mpList[1] = 0x9999999999999999ULL;                                          /* (BRINGUP) */ 
+       mp->mpList[2] = 0x9999999999999999ULL;                                          /* (BRINGUP) */ 
+
+       if(lists > mpBasicLists) {                                                                      /* (BRINGUP) */ 
+               mp->mpList[3] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
+               mp->mpList[4] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
+               mp->mpList[5] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
+               mp->mpList[6] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
+               mp->mpList[7] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
+               mp->mpList[8] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
+               mp->mpList[9] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
+               mp->mpList[10] = 0x9999999999999999ULL;                                 /* (BRINGUP) */ 
+       }
+#endif 
+       
 
        s = splhigh();                                                                                          /* Don't bother from now on */
        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
                panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
        }
        
 
        s = splhigh();                                                                                          /* Don't bother from now on */
        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
                panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
        }
        
-       full = !(mb->mapblokfree[0] | mb->mapblokfree[1] | mb->mapblokfree[2] | mb->mapblokfree[3]);    /* See if full now */ 
+       full = !(mb->mapblokfree[0] | mb->mapblokfree[1]);                      /* See if full now */ 
        mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));    /* Flip on the free bit */
        mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));    /* Flip on the free bit */
+    if ( lists > mpBasicLists ) {                                                              /* if big block, lite the 2nd bit too */
+        mindx++;
+        mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));
+        mapCtl.mapcfree++;
+        mapCtl.mapcinuse--;
+    }
        
        if(full) {                                                                                                      /* If it was full before this: */
                mb->nextblok = mapCtl.mapcnext;                                                 /* Move head of list to us */
        
        if(full) {                                                                                                      /* If it was full before this: */
                mb->nextblok = mapCtl.mapcnext;                                                 /* Move head of list to us */
@@ -1185,8 +957,7 @@ void mapping_free(struct mapping *mp) {                                                    /* Release a mapping */
        mapCtl.mapcfreec++;                                                                                     /* Count total calls */
 
        if(mapCtl.mapcfree > mapCtl.mapcmin) {                                          /* Should we consider releasing this? */
        mapCtl.mapcfreec++;                                                                                     /* Count total calls */
 
        if(mapCtl.mapcfree > mapCtl.mapcmin) {                                          /* Should we consider releasing this? */
-               if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1] & mb->mapblokfree[2] & mb->mapblokfree[3]) 
-                  == 0xFFFFFFFF) {                                                                             /* See if empty now */ 
+               if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1]) == 0xFFFFFFFF) {    /* See if empty now */ 
 
                        if(mapCtl.mapcnext == mb) {                                                     /* Are we first on the list? */
                                mapCtl.mapcnext = mb->nextblok;                                 /* Unchain us */
 
                        if(mapCtl.mapcnext == mb) {                                                     /* Are we first on the list? */
                                mapCtl.mapcnext = mb->nextblok;                                 /* Unchain us */
@@ -1228,70 +999,174 @@ void mapping_free(struct mapping *mp) {                                                 /* Release a mapping */
 
 
 /*
 
 
 /*
- *             mapping_alloc(void) - obtain a mapping from the free list 
+ *             mapping_alloc(lists) - obtain a mapping from the free list 
  *
  *
- *             This routine takes a mapping off of the free list and returns it's address.
+ *             This routine takes a mapping off of the free list and returns its address.
+ *             The mapping is zeroed, and its mpLists count is set.  The caller passes in
+ *             the number of skiplists it would prefer; if this number is greater than 
+ *             mpBasicLists (ie, 4) then we need to allocate a 128-byte mapping, which is
+ *             just two consequtive free entries coallesced into one.  If we cannot find
+ *             two consequtive free entries, we clamp the list count down to mpBasicLists
+ *             and return a basic 64-byte node.  Our caller never knows the difference.
  *
  *
- *             We do this by finding a free entry in the first block and allocating it.
- *             If this allocation empties the block, we remove it from the free list.
+ *             If this allocation empties a block, we remove it from the free list.
  *             If this allocation drops the total number of free entries below a threshold,
  *             we allocate a new block.
  *
  */
  *             If this allocation drops the total number of free entries below a threshold,
  *             we allocate a new block.
  *
  */
+decl_simple_lock_data(extern,free_pmap_lock)
 
 
-mapping *mapping_alloc(void) {                                                                 /* Obtain a mapping */
+mapping_t *
+mapping_alloc(int lists) {                                                             /* Obtain a mapping */
 
 
-       register mapping *mp;
-       mappingblok     *mb, *mbn;
+       register mapping_t *mp;
+       mappingblok_t   *mb, *mbn;
        spl_t                   s;
        int                             mindx;
        spl_t                   s;
        int                             mindx;
-       kern_return_t   retr;
-
+    int                                big = (lists > mpBasicLists);                           /* set flag if big block req'd */
+       pmap_t                  refpmap, ckpmap;
+       unsigned int    space, i;
+       addr64_t                va, nextva;
+       boolean_t               found_mapping;
+       boolean_t               do_rescan;
+    
        s = splhigh();                                                                                          /* Don't bother from now on */
        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
                panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
        }
 
        s = splhigh();                                                                                          /* Don't bother from now on */
        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
                panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
        }
 
-       if(!(mb = mapCtl.mapcnext)) {                                                           /* Get the first block entry */
-               unsigned int                    i;
-               struct mappingflush             mappingflush;
-               PCA                                             *pca_min, *pca_max;
-               PCA                                             *pca_base;
-
-               pca_min = (PCA *)(hash_table_base+hash_table_size);
-               pca_max = (PCA *)(hash_table_base+hash_table_size+hash_table_size);
-
-               while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
-                       mapCtl.mapcflush.mappingcnt = 0;
-                       pca_base = mapCtl.mapcflush.pcaptr;
-                       do {
-                               hw_select_mappings(&mapCtl.mapcflush);
-                               mapCtl.mapcflush.pcaptr++;
-                               if (mapCtl.mapcflush.pcaptr >= pca_max)
-                                       mapCtl.mapcflush.pcaptr = pca_min;
-                       } while ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr != pca_base));
-
-                       if ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr == pca_base)) {
-                               hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
-                               panic("mapping_alloc - all mappings are wired\n");
+       if(!((unsigned int)mapCtl.mapcnext)) {                                          /* Are there any free mappings? */
+       
+/*
+ *             No free mappings.  First, there may be some mapping blocks on the "to be released"
+ *             list.  If so, rescue one.  Otherwise, try to steal a couple blocks worth.
+ */
+
+               if((mbn = mapCtl.mapcrel) != 0) {                                               /* Try to rescue a block from impending doom */
+                       mapCtl.mapcrel = mbn->nextblok;                                         /* Pop the queue */
+                       mapCtl.mapcreln--;                                                                      /* Back off the count */
+                       mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
+                       goto rescued;
+               }
+
+               hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
+
+               simple_lock(&free_pmap_lock);
+
+               if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
+                       panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
+               }
+
+               if (!((unsigned int)mapCtl.mapcnext)) {
+
+                       refpmap = (pmap_t)cursor_pmap->pmap_link.next;
+                       space = mapCtl.mapcflush.spacenum;
+                       while (refpmap != cursor_pmap) {
+                               if(((pmap_t)(refpmap->pmap_link.next))->spaceNum > space) break;
+                               refpmap = (pmap_t)refpmap->pmap_link.next;
                        }
                        }
-                       mappingflush = mapCtl.mapcflush;
-                       hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
-                       splx(s);
-                       for (i=0;i<mappingflush.mappingcnt;i++)
-                               mapping_remove(mappingflush.mapping[i].pmap, 
-                                              mappingflush.mapping[i].offset);
-                       s = splhigh();
-                       if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {
-                               panic("mapping_alloc - timeout getting control lock\n");
+
+                       ckpmap = refpmap;
+                       va = mapCtl.mapcflush.addr;
+                       found_mapping = FALSE;
+
+                       while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
+
+                               hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
+
+                               ckpmap = (pmap_t)ckpmap->pmap_link.next;
+
+                               /* We don't steal mappings from the kernel pmap, a VMM host pmap, or a VMM guest pmap with guest
+                                  shadow assist active.
+                                */
+                               if ((ckpmap->stats.resident_count != 0) && (ckpmap != kernel_pmap)
+                                                                                                               && !(ckpmap->pmapFlags & (pmapVMgsaa|pmapVMhost))) {
+                                       do_rescan = TRUE;
+                                       for (i=0;i<8;i++) {
+                                               mp = hw_purge_map(ckpmap, va, &nextva);
+
+                                               switch ((unsigned int)mp & mapRetCode) {
+                                                       case mapRtOK:
+                                                               mapping_free(mp);
+                                                               found_mapping = TRUE;
+                                                               break;
+                                                       case mapRtNotFnd:
+                                                               break;
+                                                       default:
+                                                               panic("mapping_alloc: hw_purge_map failed - pmap = %08X, va = %16llX, code = %08X\n", ckpmap, va, mp);
+                                                               break;
+                                               }
+
+                                               if (mapRtNotFnd == ((unsigned int)mp & mapRetCode)) 
+                                                       if (do_rescan)
+                                                               do_rescan = FALSE;
+                                                       else
+                                                               break;
+
+                                               va = nextva;
+                                       }
+                               }
+
+                               if (ckpmap == refpmap) {
+                                       if (found_mapping == FALSE)
+                                               panic("no valid pmap to purge mappings\n");
+                                       else
+                                               found_mapping = FALSE;
+                               }
+
+                               if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
+                                       panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
+                               }
+
                        }
                        }
+
+                       mapCtl.mapcflush.spacenum = ckpmap->spaceNum;
+                       mapCtl.mapcflush.addr = nextva;
                }
                }
-               mb = mapCtl.mapcnext;
-       }
-       
-       if(!(mindx = mapalc(mb))) {                                                                     /* Allocate a slot */
-               panic("mapping_alloc - empty mapping block detected at %08X\n", mb);    /* Not allowed to find none */
-       }
+
+               simple_unlock(&free_pmap_lock);
+       }
+
+rescued:
+
+       mb = mapCtl.mapcnext;
+    
+    if ( big ) {                                                                                               /* if we need a big (128-byte) mapping */
+        mapCtl.mapcbig++;                                                                              /* count attempts to allocate a big mapping */
+        mbn = NULL;                                                                                            /* this will be prev ptr */
+        mindx = 0;
+        while( mb ) {                                                                                  /* loop over mapping blocks with free entries */
+            mindx = mapalc2(mb);                                                               /* try for 2 consequtive free bits in this block */
+
+           if ( mindx )        break;                                                                  /* exit loop if we found them */
+            mbn = mb;                                                                                  /* remember previous block */
+            mb = mb->nextblok;                                                                 /* move on to next block */
+        }
+        if ( mindx == 0 ) {                                                                            /* if we couldn't find 2 consequtive bits... */
+            mapCtl.mapcbigfails++;                                                             /* count failures */
+            big = 0;                                                                                   /* forget that we needed a big mapping */
+            lists = mpBasicLists;                                                              /* clamp list count down to the max in a 64-byte mapping */
+            mb = mapCtl.mapcnext;                                                              /* back to the first block with a free entry */
+        }
+        else {                                                                                                 /* if we did find a big mapping */
+            mapCtl.mapcfree--;                                                                 /* Decrement free count twice */
+            mapCtl.mapcinuse++;                                                                        /* Bump in use count twice */
+            if ( mindx < 0 ) {                                                                 /* if we just used the last 2 free bits in this block */
+                if (mbn) {                                                                             /* if this wasn't the first block */
+                    mindx = -mindx;                                                            /* make positive */
+                    mbn->nextblok = mb->nextblok;                              /* unlink this one from the middle of block list */
+                    if (mb ==  mapCtl.mapclast)        {                               /* if we emptied last block */
+                        mapCtl.mapclast = mbn;                                 /* then prev block is now last */
+                    }
+                }
+            }
+        }
+    }
+    
+    if ( !big ) {                                                                                              /* if we need a small (64-byte) mapping */
+        if(!(mindx = mapalc1(mb)))                                                             /* Allocate a 1-bit slot */
+            panic("mapping_alloc - empty mapping block detected at %08X\n", mb);
+    }
        
        if(mindx < 0) {                                                                                         /* Did we just take the last one */
                mindx = -mindx;                                                                                 /* Make positive */
        
        if(mindx < 0) {                                                                                         /* Did we just take the last one */
                mindx = -mindx;                                                                                 /* Make positive */
@@ -1312,8 +1187,9 @@ mapping *mapping_alloc(void) {                                                                    /* Obtain a mapping */
  *     For early boot, we are set up to only rescue one block at a time.  This is because we prime
  *     the release list with as much as we need until threads start.
  */
  *     For early boot, we are set up to only rescue one block at a time.  This is because we prime
  *     the release list with as much as we need until threads start.
  */
+
        if(mapCtl.mapcfree < mapCtl.mapcmin) {                                          /* See if we need to replenish */
        if(mapCtl.mapcfree < mapCtl.mapcmin) {                                          /* See if we need to replenish */
-               if(mbn = mapCtl.mapcrel) {                                                              /* Try to rescue a block from impending doom */
+               if((mbn = mapCtl.mapcrel) != 0) {                                               /* Try to rescue a block from impending doom */
                        mapCtl.mapcrel = mbn->nextblok;                                         /* Pop the queue */
                        mapCtl.mapcreln--;                                                                      /* Back off the count */
                        mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
                        mapCtl.mapcrel = mbn->nextblok;                                         /* Pop the queue */
                        mapCtl.mapcreln--;                                                                      /* Back off the count */
                        mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
@@ -1330,20 +1206,22 @@ mapping *mapping_alloc(void) {                                                                  /* Obtain a mapping */
        hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                            /* Unlock our stuff */
        splx(s);                                                                                                        /* Restore 'rupts */
        
        hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                            /* Unlock our stuff */
        splx(s);                                                                                                        /* Restore 'rupts */
        
-       mp = &((mapping *)mb)[mindx];                                                           /* Point to the allocated mapping */
-    __asm__ volatile("dcbz 0,%0" : : "r" (mp));                                        /* Clean it up */
+       mp = &((mapping_t *)mb)[mindx];                                                         /* Point to the allocated mapping */
+    mp->mpFlags = lists;                                                                               /* set the list count */
+
+
        return mp;                                                                                                      /* Send it back... */
 }
 
 
 void
        return mp;                                                                                                      /* Send it back... */
 }
 
 
 void
-consider_mapping_adjust()
+consider_mapping_adjust(void)
 {
        spl_t                   s;
 
        s = splhigh();                                                                                          /* Don't bother from now on */
        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
 {
        spl_t                   s;
 
        s = splhigh();                                                                                          /* Don't bother from now on */
        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
-               panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
+               panic("consider_mapping_adjust -- lock timeout\n");
        }
 
         if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
        }
 
         if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
@@ -1362,8 +1240,15 @@ consider_mapping_adjust()
 /*
  *             void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
  *
 /*
  *             void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
  *
- *             The mapping block is a page size area on a page boundary.  It contains 1 header and 127
- *             mappings.  This call adds and initializes a block for use.
+ *             The mapping block is a page size area on a page boundary.  It contains 1 header and 63
+ *             mappings.  This call adds and initializes a block for use.  Mappings come in two sizes,
+ *             64 and 128 bytes (the only difference is the number of skip-lists.)  When we allocate a
+ *             128-byte mapping we just look for two consequtive free 64-byte mappings, so most of the
+ *             code only deals with "basic" 64-byte mappings.  This works for two reasons:
+ *                     - Only one in 256 mappings is big, so they are rare.
+ *                     - If we cannot find two consequtive free mappings, we just return a small one.
+ *                       There is no problem with doing this, except a minor performance degredation.
+ *             Therefore, all counts etc in the mapping control structure are in units of small blocks.
  *     
  *             The header contains a chain link, bit maps, a virtual to real translation mask, and
  *             some statistics. Bit maps map each slot on the page (bit 0 is not used because it 
  *     
  *             The header contains a chain link, bit maps, a virtual to real translation mask, and
  *             some statistics. Bit maps map each slot on the page (bit 0 is not used because it 
@@ -1392,36 +1277,40 @@ void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
                                                                                                                           or goes straight to the release queue .
                                                                                                                           locked indicates if the lock is held already */
                                                                                                                   
                                                                                                                           or goes straight to the release queue .
                                                                                                                           locked indicates if the lock is held already */
                                                                                                                   
-       mappingblok     *mb;
+       mappingblok_t   *mb;
        spl_t           s;
        spl_t           s;
-       int                     i;
-       unsigned int    raddr;
+       addr64_t        raddr;
+       ppnum_t         pp;
 
 
-       mb = (mappingblok *)mbl;                                                                /* Start of area */
-       
+       mb = (mappingblok_t *)mbl;                                                              /* Start of area */     
        
        if(perm >= 0) {                                                                                 /* See if we need to initialize the block */
                if(perm) {
        
        if(perm >= 0) {                                                                                 /* See if we need to initialize the block */
                if(perm) {
-                       raddr = (unsigned int)mbl;                                              /* Perm means V=R */
+                       raddr = (addr64_t)((unsigned int)mbl);                  /* Perm means V=R */
                        mb->mapblokflags = mbPerm;                                              /* Set perm */
                        mb->mapblokflags = mbPerm;                                              /* Set perm */
+//                     mb->mapblokflags |= (unsigned int)mb;                   /* (BRINGUP) */
                }
                else {
                }
                else {
-                       raddr = kvtophys(mbl);                                                  /* Get real address */
+                       pp = pmap_find_phys(kernel_pmap, (addr64_t)mbl);        /* Get the physical page */
+                       if(!pp) {                                                                               /* What gives?  Where's the page? */
+                               panic("mapping_free_init: could not find translation for vaddr %016llX\n", (addr64_t)mbl);
+                       }
+                       
+                       raddr = (addr64_t)pp << 12;                                             /* Convert physical page to physical address */
                        mb->mapblokflags = 0;                                                   /* Set not perm */
                        mb->mapblokflags = 0;                                                   /* Set not perm */
+//                     mb->mapblokflags |= (unsigned int)mb;                   /* (BRINGUP) */
                }
                
                }
                
-               mb->mapblokvrswap = raddr ^ (unsigned int)mbl;          /* Form translation mask */
+               mb->mapblokvrswap = raddr ^ (addr64_t)((unsigned int)mbl);              /* Form translation mask */
                
                mb->mapblokfree[0] = 0x7FFFFFFF;                                        /* Set first 32 (minus 1) free */
                mb->mapblokfree[1] = 0xFFFFFFFF;                                        /* Set next 32 free */
                
                mb->mapblokfree[0] = 0x7FFFFFFF;                                        /* Set first 32 (minus 1) free */
                mb->mapblokfree[1] = 0xFFFFFFFF;                                        /* Set next 32 free */
-               mb->mapblokfree[2] = 0xFFFFFFFF;                                        /* Set next 32 free */
-               mb->mapblokfree[3] = 0xFFFFFFFF;                                        /* Set next 32 free */
        }
        
        s = splhigh();                                                                                  /* Don't bother from now on */
        if(!locked) {                                                                                   /* Do we need the lock? */
                if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {             /* Lock the control header */ 
        }
        
        s = splhigh();                                                                                  /* Don't bother from now on */
        if(!locked) {                                                                                   /* Do we need the lock? */
                if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {             /* Lock the control header */ 
-                       panic("mapping_free_init - timeout getting control lock\n");    /* Tell all and die */
+                       panic("mapping_free_init: timeout getting control lock\n");     /* Tell all and die */
                }
        }
        
                }
        }
        
@@ -1447,7 +1336,8 @@ void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
        if(!locked) {                                                                                   /* Do we need to unlock? */
                hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);            /* Unlock our stuff */
        }
        if(!locked) {                                                                                   /* Do we need to unlock? */
                hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);            /* Unlock our stuff */
        }
-               splx(s);                                                                                        /* Restore 'rupts */
+
+       splx(s);                                                                                                /* Restore 'rupts */
        return;                                                                                                 /* All done, leave... */
 }
 
        return;                                                                                                 /* All done, leave... */
 }
 
@@ -1467,7 +1357,7 @@ void mapping_prealloc(unsigned int size) {                                        /* Preallocates mapppings for lar
 
        int     nmapb, i;
        kern_return_t   retr;
 
        int     nmapb, i;
        kern_return_t   retr;
-       mappingblok     *mbn;
+       mappingblok_t   *mbn;
        spl_t           s;
 
        s = splhigh();                                                                                  /* Don't bother from now on */
        spl_t           s;
 
        s = splhigh();                                                                                  /* Don't bother from now on */
@@ -1484,9 +1374,9 @@ void mapping_prealloc(unsigned int size) {                                        /* Preallocates mapppings for lar
                splx(s);                                                                                        /* Restore 'rupts */
                return;
        }
                splx(s);                                                                                        /* Restore 'rupts */
                return;
        }
-       if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) {                 /* Make sure we aren't recursing */
+       if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) {     /* Make sure we aren't recursing */
                hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                    /* Unlock our stuff */
                hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                    /* Unlock our stuff */
-               splx(s);                                                        /* Restore 'rupts */
+               splx(s);                                                                                        /* Restore 'rupts */
                return;
        }
        nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK;                  /* Get number of blocks to get */
                return;
        }
        nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK;                  /* Get number of blocks to get */
@@ -1496,9 +1386,8 @@ void mapping_prealloc(unsigned int size) {                                        /* Preallocates mapppings for lar
        
        for(i = 0; i < nmapb; i++) {                                                    /* Allocate 'em all */
                retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE);   /* Find a virtual address to use */
        
        for(i = 0; i < nmapb; i++) {                                                    /* Allocate 'em all */
                retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE);   /* Find a virtual address to use */
-               if(retr != KERN_SUCCESS) {                                                      /* Did we get some memory? */
-                       panic("Whoops...  Not a bit of wired memory left for anyone\n");
-               }
+               if(retr != KERN_SUCCESS)                                                        /* Did we get some memory? */
+                       break;
                mapping_free_init((vm_offset_t)mbn, -1, 0);                     /* Initialize on to the release queue */
        }
        if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
                mapping_free_init((vm_offset_t)mbn, -1, 0);                     /* Initialize on to the release queue */
        }
        if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
@@ -1546,11 +1435,11 @@ void mapping_free_prime(void) {                                                                 /* Primes the mapping block release list
 
        int     nmapb, i;
        kern_return_t   retr;
 
        int     nmapb, i;
        kern_return_t   retr;
-       mappingblok     *mbn;
+       mappingblok_t   *mbn;
        vm_offset_t     mapping_min;
        
        vm_offset_t     mapping_min;
        
-       retr = kmem_suballoc(kernel_map, &mapping_min, MAPPING_MAP_SIZE,
-                            FALSE, TRUE, &mapping_map);
+       retr = kmem_suballoc(kernel_map, &mapping_min, sane_size / 16,
+                            FALSE, VM_FLAGS_ANYWHERE, &mapping_map);
 
        if (retr != KERN_SUCCESS)
                panic("mapping_free_prime: kmem_suballoc failed");
 
        if (retr != KERN_SUCCESS)
                panic("mapping_free_prime: kmem_suballoc failed");
@@ -1576,7 +1465,7 @@ void mapping_free_prime(void) {                                                                   /* Primes the mapping block release list
 }
 
 
 }
 
 
-
+void
 mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
                       vm_size_t *alloc_size, int *collectable, int *exhaustable)
 {
 mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
                       vm_size_t *alloc_size, int *collectable, int *exhaustable)
 {
@@ -1592,108 +1481,52 @@ mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_
 
 
 /*
 
 
 /*
- *             vm_offset_t     mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space
+ *             addr64_t        mapping_p2v(pmap_t pmap, ppnum_t pa) - Finds first virtual mapping of a physical page in a space
  *
  *
- *             Gets a lock on the physical entry.  Then it searches the list of attached mappings for one with
- *             the same space.  If it finds it, it returns the virtual address.
+ *             First looks up  the physical entry associated witht the physical page.  Then searches the alias
+ *             list for a matching pmap.  It grabs the virtual address from the mapping, drops busy, and returns 
+ *             that.
  *
  *
- *             Note that this will fail if the pmap has nested pmaps in it.  Fact is, I'll check
- *             for it and fail it myself...
  */
 
  */
 
-vm_offset_t    mapping_p2v(pmap_t pmap, struct phys_entry *pp) {               /* Finds first virtual mapping of a physical page in a space */
-
-       spl_t                           s;
-       register mapping        *mp, *mpv;
-       vm_offset_t                     va;
+addr64_t       mapping_p2v(pmap_t pmap, ppnum_t pa) {                          /* Finds first virtual mapping of a physical page in a space */
 
 
-       if(pmap->vflags & pmapAltSeg) return 0;                                 /* If there are nested pmaps, fail immediately */
+       spl_t s;
+       mapping_t *mp;
+       unsigned int pindex;
+       phys_entry_t *physent;
+       addr64_t va;
 
 
-       s = splhigh();
-       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Try to get the lock on the physical entry */
-               splx(s);                                                                                        /* Restore 'rupts */
-               panic("mapping_p2v: timeout getting lock on physent\n");                        /* Arrrgghhhh! */
-               return(0);                                                                                      /* Should die before here */
+       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
+       if(!physent) {                                                                                          /* Did we find the physical page? */
+               panic("mapping_p2v: invalid physical page %08X\n", pa);
        }
        }
-       
-       va = 0;                                                                                                 /* Assume failure */
-       
-       for(mpv = hw_cpv(pp->phys_link); mpv; mpv = hw_cpv(mpv->next)) {        /* Scan 'em all */
-               
-               if(!(((mpv->PTEv >> 7) & 0x000FFFFF) == pmap->space)) continue; /* Skip all the rest if this is not the right space... */ 
-               
-               va = ((((unsigned int)mpv->PTEhash & -64) << 6) ^ (pmap->space  << 12)) & 0x003FF000;   /* Backward hash to the wrapped VADDR */
-               va = va | ((mpv->PTEv << 1) & 0xF0000000);                      /* Move in the segment number */
-               va = va | ((mpv->PTEv << 22) & 0x0FC00000);                     /* Add in the API for the top of the address */
-               break;                                                                                          /* We're done now, pass virtual address back */
-       }
-       
-       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);                               /* Unlock the physical entry */
-       splx(s);                                                                                                /* Restore 'rupts */
-       return(va);                                                                                             /* Return the result or 0... */
-}
 
 
-/*
- *     kvtophys(addr)
- *
- *     Convert a kernel virtual address to a physical address
- */
-vm_offset_t kvtophys(vm_offset_t va) {
-
-       register mapping                *mp, *mpv;
-       register blokmap                *bmp;
-       register vm_offset_t    pa;
-       spl_t                           s;
-       
-       s=splhigh();                                                                                    /* Don't bother from now on */
-       mp = hw_lock_phys_vir(PPC_SID_KERNEL, va);                              /* Find mapping and lock the physical entry for this mapping */
+       s = splhigh();                                                                                  /* Make sure interruptions are disabled */
 
 
-       if((unsigned int)mp&1) {                                                                /* Did the lock on the phys entry time out? */
-               splx(s);                                                                                        /* Restore 'rupts */
-               panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va); /* Scream bloody murder! */
-               return 0;
-       }
+       mp = hw_find_space(physent, pmap->space);                               /* Go find the first mapping to the page from the requested pmap */
 
 
-       if(!mp) {                                                                                               /* If it was not a normal page */
-               pa = hw_cvp_blk(kernel_pmap, va);                                       /* Try to convert odd-sized page (returns 0 if not found) */
-               splx(s);                                                                                        /* Restore 'rupts */
-               return pa;                                                                                      /* Return physical address */
+       if(mp) {                                                                                                /* Did we find one? */
+               va = mp->mpVAddr & -4096;                                                       /* If so, get the cleaned up vaddr */
+               mapping_drop_busy(mp);                                                          /* Go ahead and relase the mapping now */
        }
        }
+       else va = 0;                                                                                    /* Return failure */
 
 
-       mpv = hw_cpv(mp);                                                                               /* Convert to virtual addressing */
+       splx(s);                                                                                                /* Restore 'rupts */
        
        
-       if(!mpv->physent) {                                                                             /* Was there a physical entry? */
-               pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1)));      /* Get physical address from physent */
-       }
-       else {
-               pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1)));     /* Get physical address from physent */
-               hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock the physical entry */
-       }
+       return va;                                                                                              /* Bye, bye... */
        
        
-       splx(s);                                                                                                /* Restore 'rupts */
-       return pa;                                                                                              /* Return the physical address... */
 }
 
 }
 
+
 /*
 /*
- *     phystokv(addr)
+ *     kvtophys(addr)
  *
  *
- *     Convert a physical address to a kernel virtual address if
- *     there is a mapping, otherwise return NULL
+ *     Convert a kernel virtual address to a physical address
  */
  */
+addr64_t kvtophys(vm_offset_t va) {
 
 
-vm_offset_t phystokv(vm_offset_t pa) {
-
-       struct phys_entry       *pp;
-       vm_offset_t                     va;
-
-       pp = pmap_find_physentry(pa);                                                   /* Find the physical entry */
-       if (PHYS_NULL == pp) {
-               return (vm_offset_t)NULL;                                                       /* If none, return null */
-       }
-       if(!(va=mapping_p2v(kernel_pmap, pp))) {
-               return 0;                                                                                       /* Can't find it, return 0... */
-       }
-       return (va | (pa & (PAGE_SIZE-1)));                                             /* Build and return VADDR... */
+       return pmap_extract(kernel_pmap, va);                                   /* Find mapping and lock the physical entry for this mapping */
 
 }
 
 
 }
 
@@ -1708,353 +1541,240 @@ vm_offset_t phystokv(vm_offset_t pa) {
 
 void ignore_zero_fault(boolean_t type) {                               /* Sets up to ignore or honor any fault on page 0 access for the current thread */
 
 
 void ignore_zero_fault(boolean_t type) {                               /* Sets up to ignore or honor any fault on page 0 access for the current thread */
 
-       if(type) current_act()->mact.specFlags |= ignoreZeroFault;      /* Ignore faults on page 0 */
-       else     current_act()->mact.specFlags &= ~ignoreZeroFault;     /* Honor faults on page 0 */
+       if(type) current_thread()->machine.specFlags |= ignoreZeroFault;        /* Ignore faults on page 0 */
+       else     current_thread()->machine.specFlags &= ~ignoreZeroFault;       /* Honor faults on page 0 */
        
        return;                                                                                         /* Return the result or 0... */
 }
 
        
        return;                                                                                         /* Return the result or 0... */
 }
 
-
 /*
 /*
- *     Allocates a range of virtual addresses in a map as optimally as
- *     possible for block mapping.  The start address is aligned such
- *     that a minimum number of power-of-two sized/aligned blocks is
- *     required to cover the entire range. 
- *
- *     We also use a mask of valid block sizes to determine optimality.
- *
- *     Note that the passed in pa is not actually mapped to the selected va,
- *     rather, it is used to figure the optimal boundary.  The actual 
- *     V to R mapping is done externally.
- *
- *     This function will return KERN_INVALID_ADDRESS if an optimal address 
- *     can not be found.  It is not necessarily a fatal error, the caller may still be
- *     still be able to do a non-optimal assignment.
+ * nop in current ppc implementation
  */
  */
-
-kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa, 
-       vm_size_t size, vm_prot_t prot) {
-
-       vm_map_entry_t  entry, next, tmp_entry, new_entry;
-       vm_offset_t             start, end, algnpa, endadr, strtadr, curradr;
-       vm_offset_t             boundary;
-       
-       unsigned int    maxsize, minsize, leading, trailing;
-       
-       assert(page_aligned(pa));
-       assert(page_aligned(size));
-
-       if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT);  /* Dude, like we need a target map */
-       
-       minsize = blokValid ^ (blokValid & (blokValid - 1));    /* Set minimum subblock size */
-       maxsize = 0x80000000 >> cntlzw(blokValid);      /* Set maximum subblock size */
-       
-       boundary = 0x80000000 >> cntlzw(size);          /* Get optimal boundary */
-       if(boundary > maxsize) boundary = maxsize;      /* Pin this at maximum supported hardware size */
-       
-       vm_map_lock(map);                                                       /* No touchee no mapee */
-
-       for(; boundary > minsize; boundary >>= 1) {     /* Try all optimizations until we find one */
-               if(!(boundary & blokValid)) continue;   /* Skip unavailable block sizes */
-               algnpa = (pa + boundary - 1) & -boundary;       /* Round physical up */
-               leading = algnpa - pa;                                  /* Get leading size */
-               
-               curradr = 0;                                                    /* Start low */
-               
-               while(1) {                                                              /* Try all possible values for this opt level */
-
-                       curradr = curradr + boundary;           /* Get the next optimal address */
-                       strtadr = curradr - leading;            /* Calculate start of optimal range */
-                       endadr = strtadr + size;                        /* And now the end */
-                       
-                       if((curradr < boundary) ||                      /* Did address wrap here? */
-                               (strtadr > curradr) ||                  /* How about this way? */
-                               (endadr < strtadr)) break;              /* We wrapped, try next lower optimization... */
-               
-                       if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */
-                       if(endadr > map->max_offset) break;     /* No room right now... */
-                       
-                       if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */
-               
-                       next = entry->vme_next;                         /* Get the next entry */
-                       if((next == vm_map_to_entry(map)) ||    /* Are we the last entry? */
-                               (next->vme_start >= endadr)) {  /* or do we end before the next entry? */
-                       
-                               new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */
-                                       VM_OBJECT_NULL,
-                                       0,                                                      /* Offset into object of 0 */
-                                       FALSE,                                          /* No copy needed */
-                                       FALSE,                                          /* Not shared */
-                                       FALSE,                                          /* Not in transition */
-                                       prot,                                           /* Set the protection to requested */
-                                       prot,                                           /* We can't change protection */
-                                       VM_BEHAVIOR_DEFAULT,            /* Use default behavior, but makes no never mind,
-                                                                                                  'cause we don't page in this area */
-                                       VM_INHERIT_DEFAULT,             /* Default inheritance */
-                                       0);                                                     /* Nothing is wired */
-                       
-                               vm_map_unlock(map);                             /* Let the world see it all */
-                               *va = strtadr;                                  /* Tell everyone */
-                               *bnd = boundary;                                /* Say what boundary we are aligned to */
-                               return(KERN_SUCCESS);                   /* Leave, all is right with the world... */
-                       }
-               }               
-       }       
-
-       vm_map_unlock(map);                                                     /* Couldn't find a slot */
-       return(KERN_INVALID_ADDRESS);
+void inval_copy_windows(__unused thread_t t)
+{
 }
 
 }
 
+
 /* 
 /* 
- *             Copies data from a physical page to a virtual page.  This is used to 
- *             move data from the kernel to user state.
+ *             Copies data between a physical page and a virtual page, or 2 physical.  This is used to 
+ *             move data from the kernel to user state. Note that the "which" parm
+ *             says which of the parameters is physical and if we need to flush sink/source.  
+ *             Note that both addresses may be physical, but only one may be virtual.
+ *
+ *             The rules are that the size can be anything.  Either address can be on any boundary
+ *             and span pages.  The physical data must be contiguous as must the virtual.
  *
  *
- *             Note that it is invalid to have a source that spans a page boundry.
- *             This can block.
- *             We don't check protection either.
- *             And we don't handle a block mapped sink address either.
+ *             We can block when we try to resolve the virtual address at each page boundary.
+ *             We don't check protection on the physical page.
+ *
+ *             Note that we will not check the entire range and if a page translation fails,
+ *             we will stop with partial contents copied.
  *
  */
  
  *
  */
  
-kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size) {
+kern_return_t hw_copypv_32(addr64_t source, addr64_t sink, unsigned int size, int which) {
  
        vm_map_t map;
        kern_return_t ret;
  
        vm_map_t map;
        kern_return_t ret;
-       unsigned int spaceid;
-       int left, csize;
-       vm_offset_t pa;
-       register mapping *mpv, *mp;
+       addr64_t nextva, vaddr, paddr;
+       register mapping_t *mp;
        spl_t s;
        spl_t s;
+       unsigned int lop, csize;
+       int needtran, bothphys;
+       unsigned int pindex;
+       phys_entry_t *physent;
+       vm_prot_t prot;
+       int orig_which;
 
 
-       if((size == 0) || ((source ^ (source + size - 1)) & -PAGE_SIZE)) return KERN_FAILURE;   /* We don't allow a source page crosser */
-       map = current_act()->map;                                               /* Get the current map */
+       orig_which = which;
 
 
-       while(size) {
-               s=splhigh();                                                            /* Don't bother me */
-       
-               spaceid = map->pmap->pmapSegs[(unsigned int)sink >> 28];        /* Get space ID. Don't bother to clean top bits */
+       map = (which & cppvKmap) ? kernel_map : current_map_fast();
 
 
-               mp = hw_lock_phys_vir(spaceid, sink);           /* Lock the physical entry for the sink */
-               if(!mp) {                                                                       /* Was it there? */
-                       splx(s);                                                                /* Restore the interrupt level */
-                       ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0);    /* Didn't find it, try to fault it in... */
-                       if (ret == KERN_SUCCESS) continue;              /* We got it in, try again to find it... */
+       if((which & (cppvPsrc | cppvPsnk)) == 0 ) {             /* Make sure that only one is virtual */
+               panic("copypv: no more than 1 parameter may be virtual\n");     /* Not allowed */
+       }
+       
+       bothphys = 1;                                                                   /* Assume both are physical */
+       
+       if(!(which & cppvPsnk)) {                                               /* Is sink page virtual? */
+               vaddr = sink;                                                           /* Sink side is virtual */
+               bothphys = 0;                                                           /* Show both aren't physical */
+               prot = VM_PROT_READ | VM_PROT_WRITE;            /* Sink always must be read/write */
+       } else if (!(which & cppvPsrc)) {                               /* Is source page virtual? */
+               vaddr = source;                                                         /* Source side is virtual */
+               bothphys = 0;                                                           /* Show both aren't physical */
+               prot = VM_PROT_READ;                                            /* Virtual source is always read only */
+       }
 
 
-                       return KERN_FAILURE;                                    /* Didn't find any, return no good... */
-               }
-               if((unsigned int)mp&1) {                                        /* Did we timeout? */
-                       panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink);   /* Yeah, scream about it! */
-                       splx(s);                                                                /* Restore the interrupt level */
-                       return KERN_FAILURE;                                    /* Bad hair day, return FALSE... */
-               }
+       needtran = 1;                                                                   /* Show we need to map the virtual the first time */
+       s = splhigh();                                                                  /* Don't bother me */
 
 
-               mpv = hw_cpv(mp);                                                       /* Convert mapping block to virtual */
+       while(size) {
 
 
-               if(mpv->PTEr & 1) {                                                     /* Are we write protected? yes, could indicate COW */
-                       hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock the sink */
-                       splx(s);                                                                /* Restore the interrupt level */
-                       ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0);    /* check for a COW area */
-                       if (ret == KERN_SUCCESS) continue;              /* We got it in, try again to find it... */
-                       return KERN_FAILURE;                                    /* Didn't find any, return no good... */
+               if(!bothphys && (needtran || !(vaddr & 4095LL))) {      /* If first time or we stepped onto a new page, we need to translate */
+                       if(!needtran) {                                                 /* If this is not the first translation, we need to drop the old busy */
+                               mapping_drop_busy(mp);                          /* Release the old mapping now */
+                       }
+                       needtran = 0;
+                       
+                       while(1) {
+                               mp = mapping_find(map->pmap, vaddr, &nextva, 1);        /* Find and busy the mapping */
+                               if(!mp) {                                                       /* Was it there? */
+                                       if(getPerProc()->istackptr == 0)
+                                               panic("copypv: No vaild mapping on memory %s %x", "RD", vaddr);
+
+                                       splx(s);                                                /* Restore the interrupt level */
+                                       ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0);      /* Didn't find it, try to fault it in... */
+                               
+                                       if(ret != KERN_SUCCESS)return KERN_FAILURE;     /* Didn't find any, return no good... */
+                                       
+                                       s = splhigh();                                  /* Don't bother me */
+                                       continue;                                               /* Go try for the map again... */
+       
+                               }
+                               if (mp->mpVAddr & mpI) {                 /* cache inhibited, so force the appropriate page to be flushed before */
+                                       if (which & cppvPsrc)            /* and after the copy to avoid cache paradoxes */
+                                               which |= cppvFsnk;
+                                       else
+                                               which |= cppvFsrc;
+                               } else
+                                       which = orig_which;
+
+                               /* Note that we have to have the destination writable.  So, if we already have it, or we are mapping the source,
+                                       we can just leave.
+                               */              
+                               if((which & cppvPsnk) || !(mp->mpVAddr & 1)) break;             /* We got it mapped R/W or the source is not virtual, leave... */
+                       
+                               mapping_drop_busy(mp);                          /* Go ahead and release the mapping for now */
+                               if(getPerProc()->istackptr == 0)
+                                       panic("copypv: No vaild mapping on memory %s %x", "RDWR", vaddr);
+                               splx(s);                                                        /* Restore the interrupt level */
+                               
+                               ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0);      /* check for a COW area */
+                               if (ret != KERN_SUCCESS) return KERN_FAILURE;   /* We couldn't get it R/W, leave in disgrace... */
+                               s = splhigh();                                          /* Don't bother me */
+                       }
+                       paddr = ((addr64_t)mp->mpPAddr << 12) + (vaddr - (mp->mpVAddr & -4096LL));        /* construct the physical address... this calculation works */
+                                                                                                         /* properly on both single page and block mappings */
+                       if(which & cppvPsrc) sink = paddr;              /* If source is physical, then the sink is virtual */
+                       else source = paddr;                                    /* Otherwise the source is */
                }
                }
-               left = PAGE_SIZE - (sink & PAGE_MASK);          /* Get amount left on sink page */
-
-               csize = size < left ? size : left;              /* Set amount to copy this pass */
-
-               pa = (vm_offset_t)((mpv->physent->pte1 & ~PAGE_MASK) | ((unsigned int)sink & PAGE_MASK));       /* Get physical address of sink */
-
-               bcopy_physvir((char *)source, (char *)pa, csize);       /* Do a physical copy, virtually */
-
-               hw_set_mod(mpv->physent);                                       /* Go set the change of the sink */
-
-               hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock the sink */
-               splx(s);                                                                        /* Open up for interrupts */
-
-               sink += csize;                                                          /* Move up to start of next page */
-               source += csize;                                                        /* Move up source */
-               size -= csize;                                                          /* Set amount for next pass */
-       }
-       return KERN_SUCCESS;
-}
+                       
+               lop = (unsigned int)(4096LL - (sink & 4095LL));         /* Assume sink smallest */
+               if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL));  /* No, source is smaller */
+               
+               csize = size;                                                           /* Assume we can copy it all */
+               if(lop < size) csize = lop;                                     /* Nope, we can't do it all */
+               
+               if(which & cppvFsrc) flush_dcache64(source, csize, 1);  /* If requested, flush source before move */
+               if(which & cppvFsnk) flush_dcache64(sink, csize, 1);    /* If requested, flush sink before move */
 
 
+               bcopy_physvir_32(source, sink, csize);                  /* Do a physical copy, virtually */
+               
+               if(which & cppvFsrc) flush_dcache64(source, csize, 1);  /* If requested, flush source after move */
+               if(which & cppvFsnk) flush_dcache64(sink, csize, 1);    /* If requested, flush sink after move */
 
 /*
 
 /*
- * copy 'size' bytes from physical to physical address
- * the caller must validate the physical ranges 
- *
- * if flush_action == 0, no cache flush necessary
- * if flush_action == 1, flush the source
- * if flush_action == 2, flush the dest
- * if flush_action == 3, flush both source and dest
+ *             Note that for certain ram disk flavors, we may be copying outside of known memory.
+ *             Therefore, before we try to mark it modifed, we check if it exists.
  */
 
  */
 
-kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, unsigned int flush_action) {
-
-        switch(flush_action) {
-       case 1:
-               flush_dcache(source, size, 1);
-               break;
-       case 2:
-               flush_dcache(dest, size, 1);
-               break;
-       case 3:
-               flush_dcache(source, size, 1);
-               flush_dcache(dest, size, 1);
-               break;
-
+               if( !(which & cppvNoModSnk)) {
+                       physent = mapping_phys_lookup(sink >> 12, &pindex);     /* Get physical entry for sink */
+                       if(physent) mapping_set_mod((ppnum_t)(sink >> 12));             /* Make sure we know that it is modified */
+               }
+               if( !(which & cppvNoRefSrc)) {
+                       physent = mapping_phys_lookup(source >> 12, &pindex);   /* Get physical entry for source */
+                       if(physent) mapping_set_ref((ppnum_t)(source >> 12));           /* Make sure we know that it is modified */
+               }
+               size = size - csize;                                            /* Calculate what is left */
+               vaddr = vaddr + csize;                                          /* Move to next sink address */
+               source = source + csize;                                        /* Bump source to next physical address */
+               sink = sink + csize;                                            /* Bump sink to next physical address */
        }
        }
-        bcopy_phys((char *)source, (char *)dest, size);        /* Do a physical copy */
-
-        switch(flush_action) {
-       case 1:
-               flush_dcache(source, size, 1);
-               break;
-       case 2:
-               flush_dcache(dest, size, 1);
-               break;
-       case 3:
-               flush_dcache(source, size, 1);
-               flush_dcache(dest, size, 1);
-               break;
+       
+       if(!bothphys) mapping_drop_busy(mp);                    /* Go ahead and release the mapping of the virtual page if any */
+       splx(s);                                                                                /* Open up for interrupts */
 
 
-       }
+       return KERN_SUCCESS;
 }
 
 
 }
 
 
-
-#if DEBUG
 /*
 /*
- *             Dumps out the mapping stuff associated with a virtual address
+ *     Debug code 
  */
  */
-void dumpaddr(space_t space, vm_offset_t va) {
-
-       mapping         *mp, *mpv;
-       vm_offset_t     pa;
-       spl_t           s;
 
 
-       s=splhigh();                                                                                    /* Don't bother me */
-
-       mp = hw_lock_phys_vir(space, va);                                               /* Lock the physical entry for this mapping */
-       if(!mp) {                                                                                               /* Did we find one? */
-               splx(s);                                                                                        /* Restore the interrupt level */
-               printf("dumpaddr: virtual address (%08X) not mapped\n", va);    
-               return;                                                                                         /* Didn't find any, return FALSE... */
-       }
-       if((unsigned int)mp&1) {                                                                /* Did we timeout? */
-               panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va);     /* Yeah, scream about it! */
-               splx(s);                                                                                        /* Restore the interrupt level */
-               return;                                                                                         /* Bad hair day, return FALSE... */
-       }
-       printf("dumpaddr: space=%08X; vaddr=%08X\n", space, va);        /* Say what address were dumping */
-       mpv = hw_cpv(mp);                                                                               /* Get virtual address of mapping */
-       dumpmapping(mpv);
-       if(mpv->physent) {
-               dumppca(mpv);
-               hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock physical entry associated with mapping */
-       }
-       splx(s);                                                                                                /* Was there something you needed? */
-       return;                                                                                                 /* Tell them we did it */
-}
+void mapping_verify(void) {
 
 
+       spl_t           s;
+       mappingblok_t   *mb, *mbn;
+       unsigned int    relncnt;
+       unsigned int    dumbodude;
 
 
+       dumbodude = 0;
+       
+       s = splhigh();                                                                                  /* Don't bother from now on */
 
 
-/*
- *             Prints out a mapping control block
- *
- */
-void dumpmapping(struct mapping *mp) {                                                 /* Dump out a mapping */
-
-       printf("Dump of mapping block: %08X\n", mp);                    /* Header */
-       printf("                 next: %08X\n", mp->next);                 
-       printf("             hashnext: %08X\n", mp->hashnext);                 
-       printf("              PTEhash: %08X\n", mp->PTEhash);                 
-       printf("               PTEent: %08X\n", mp->PTEent);                 
-       printf("              physent: %08X\n", mp->physent);                 
-       printf("                 PTEv: %08X\n", mp->PTEv);                 
-       printf("                 PTEr: %08X\n", mp->PTEr);                 
-       printf("                 pmap: %08X\n", mp->pmap);
-       
-       if(mp->physent) {                                                                       /* Print physent if it exists */
-               printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1);
+       mbn = 0;                                                                                                /* Start with none */
+       for(mb = mapCtl.mapcnext; mb; mb = mb->nextblok) {              /* Walk the free chain */
+               if((mappingblok_t *)(mb->mapblokflags & 0x7FFFFFFF) != mb) {    /* Is tag ok? */
+                       panic("mapping_verify: flags tag bad, free chain; mb = %08X, tag = %08X\n", mb, mb->mapblokflags);
+               }
+               mbn = mb;                                                                                       /* Remember the last one */
        }
        }
-       else {
-               printf("Associated physical entry: none\n");
+       
+       if(mapCtl.mapcnext && (mapCtl.mapclast != mbn)) {               /* Do we point to the last one? */
+               panic("mapping_verify: last pointer bad; mb = %08X, mapclast = %08X\n", mb, mapCtl.mapclast);
        }
        
        }
        
-       dumppca(mp);                                                                            /* Dump out the PCA information */
+       relncnt = 0;                                                                                    /* Clear count */
+       for(mb = mapCtl.mapcrel; mb; mb = mb->nextblok) {               /* Walk the release chain */
+               dumbodude |= mb->mapblokflags;                                          /* Just touch it to make sure it is mapped */
+               relncnt++;                                                                                      /* Count this one */
+       }
        
        
-       return;
-}
+       if(mapCtl.mapcreln != relncnt) {                                                        /* Is the count on release queue ok? */
+               panic("mapping_verify: bad release queue count; mapcreln = %d, cnt = %d, ignore this = %08X\n", mapCtl.mapcreln, relncnt, dumbodude);
+       }
 
 
-/*
- *             Prints out a PTEG control area
- *
- */
-void dumppca(struct mapping *mp) {                                             /* PCA */
-
-       PCA                             *pca;
-       unsigned int    *pteg;
-       
-       pca = (PCA *)((unsigned int)mp->PTEhash&-64);           /* Back up to the start of the PCA */
-       pteg=(unsigned int *)((unsigned int)pca-(((hash_table_base&0x0000FFFF)+1)<<16));
-       printf(" Dump of PCA: %08X\n", pca);            /* Header */
-       printf("     PCAlock: %08X\n", pca->PCAlock);                 
-       printf("     PCAallo: %08X\n", pca->flgs.PCAallo);                 
-       printf("     PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]);                 
-       printf("              %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]);                 
-       printf("Dump of PTEG: %08X\n", pteg);           /* Header */
-       printf("              %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]);                 
-       printf("              %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]);                 
-       printf("              %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]);                 
-       printf("              %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]);                 
-       return;
-}
+       splx(s);                                                                                                /* Restore 'rupts */
 
 
-/*
- *             Dumps starting with a physical entry
- */
-void dumpphys(struct phys_entry *pp) {                                                 /* Dump from physent */
-
-       mapping                 *mp;
-       PCA                             *pca;
-       unsigned int    *pteg;
-
-       printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1);
-       mp = hw_cpv(pp->phys_link);
-       while(mp) {
-               dumpmapping(mp);
-               dumppca(mp);
-               mp = hw_cpv(mp->next);
-       }
-       
        return;
 }
 
        return;
 }
 
-#endif
+void mapping_phys_unused(ppnum_t pa) {
 
 
+       unsigned int pindex;
+       phys_entry_t *physent;
 
 
-kern_return_t bmapvideo(vm_offset_t *info);
-kern_return_t bmapvideo(vm_offset_t *info) {
+       physent = mapping_phys_lookup(pa, &pindex);                             /* Get physical entry */
+       if(!physent) return;                                                                    /* Did we find the physical page? */
 
 
-       extern struct vc_info vinfo;
+       if(!(physent->ppLink & ~(ppLock | ppFlags))) return;    /* No one else is here */
        
        
-       (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info));    /* Copy out the video info */
-       return KERN_SUCCESS;
-}
-
-kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
-kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
+       panic("mapping_phys_unused: physical page (%08X) in use, physent = %08X\n", pa, physent);
        
        
-       pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr, 0);    /* Map it in */
-       return KERN_SUCCESS;
 }
 }
-
-kern_return_t bmapmapr(vm_offset_t va);
-kern_return_t bmapmapr(vm_offset_t va) {
        
        
-       mapping_remove(current_act()->task->map->pmap, va);     /* Remove map */
-       return KERN_SUCCESS;
+void mapping_hibernate_flush(void)
+{
+    int bank;
+    unsigned int page;
+    struct phys_entry * entry;
+
+    for (bank = 0; bank < pmap_mem_regions_count; bank++)
+    {
+       entry = (struct phys_entry *) pmap_mem_regions[bank].mrPhysTab;
+       for (page = pmap_mem_regions[bank].mrStart; page <= pmap_mem_regions[bank].mrEnd; page++)
+       {
+           hw_walk_phys(entry, hwpNoop, hwpNoop, hwpNoop, 0, hwpPurgePTE);
+           entry++;
+       }
+    }
 }
 }
+
+
+
+
+
+