]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/ppc/mappings.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / ppc / mappings.c
index 5fc22ea7de538ff5a7189ebf5c168547d8676922..fb337d30554f1f8b91366d13c72c2ec81dc77737 100644 (file)
@@ -1,24 +1,21 @@
 /*
- * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License").  You may not use this file except in compliance with the
+ * License.  Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
  * 
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- * 
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
 #include <kern/spl.h>
 
 #include <kern/misc_protos.h>
-#include <ppc/exception.h>
 #include <ppc/misc_protos.h>
 #include <ppc/proc_reg.h>
 
 #include <vm/pmap.h>
 #include <ppc/pmap.h>
+#include <ppc/pmap_internals.h>
 #include <ppc/mem.h>
 
 #include <ppc/new_screen.h>
 
 #define PERFTIMES 0
 
+#if PERFTIMES && DEBUG
+#define debugLog2(a, b, c) dbgLog2(a, b, c)
+#else
+#define debugLog2(a, b, c)
+#endif
+
 vm_map_t        mapping_map = VM_MAP_NULL;
+#define                MAPPING_MAP_SIZE        33554432        /* 32MB address space */
 
-unsigned int   incrVSID = 0;                                           /* VSID increment value */
+unsigned int   incrVSID = 0;                                                                   /* VSID increment value */
 unsigned int   mappingdeb0 = 0;                                                
-unsigned int   mappingdeb1 = 0;
-int ppc_max_adrsp;                                                                     /* Maximum address spaces */                    
-                               
-addr64_t               *mapdebug;                                                      /* (BRINGUP) */
-extern unsigned int DebugWork;                                         /* (BRINGUP) */
-                                               
+unsigned int   mappingdeb1 = 0;                                                
 extern unsigned int    hash_table_size;                                                
-
-void mapping_verify(void);
-void mapping_phys_unused(ppnum_t pa);
-
+extern vm_offset_t mem_size;
 /*
  *     ppc_prot translates from the mach representation of protections to the PPC version.
  *  We also allow for a direct setting of the protection bits. This extends the mach
@@ -152,485 +148,925 @@ void mapping_phys_unused(ppnum_t pa);
 
 void mapping_init(void) {
 
-       unsigned int tmp, maxeff, rwidth;
-       
-       ppc_max_adrsp = maxAdrSp;                                                                       /* Set maximum address spaces */                        
+       unsigned int tmp;
        
-       maxeff = 32;                                                                                            /* Assume 32-bit */
-       if(per_proc_info[0].pf.Available & pf64Bit) maxeff = 64;        /* Is this a 64-bit machine? */
-       
-       rwidth = per_proc_info[0].pf.pfMaxVAddr - maxAdrSpb;            /* Reduce address width by width of address space ID */
-       if(rwidth > maxeff) rwidth = maxeff;                                            /* If we still have more virtual than effective, clamp at effective */
+       __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
+
+       incrVSID = 1 << ((32 - tmp + 1) >> 1);                                          /* Get ceiling of sqrt of table size */
+       incrVSID |= 1 << ((32 - tmp + 1) >> 2);                                         /* Get ceiling of quadroot of table size */
+       incrVSID |= 1;                                                                                          /* Set bit and add 1 */
+       return;
+
+}
+
+
+/*
+ *             mapping_remove(pmap_t pmap, vm_offset_t va);
+ *                     Given a pmap and virtual address, this routine finds the mapping and removes it from
+ *                     both its PTEG hash list and the physical entry list.  The mapping block will be added to
+ *                     the free list.  If the free list threshold is reached, garbage collection will happen.
+ *                     We also kick back a return code to say whether or not we had one to remove.
+ *
+ *                     We have a strict ordering here:  the mapping must be removed from the PTEG hash list before
+ *                     it can be removed from the physical entry list.  This allows us to get by with only the PTEG
+ *                     hash lock at page fault time. The physical entry lock must be held while we remove the mapping 
+ *                     from both lists. The PTEG lock is one of the lowest level locks.  No PTE fault, interruptions,
+ *                     losing control, getting other locks, etc., are allowed when you hold it. You do, and you die.
+ *                     It's just that simple!
+ *
+ *                     When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around.
+ *                     However, a mapping's order on the PTEG hash chain is not.  The interrupt handler uses the PTEG
+ *                     lock to control the hash cahin and may move the position of the mapping for MRU calculations.
+ *
+ *                     Note that mappings do not need to point to a physical entry. When they don't, it indicates 
+ *                     the mapping is outside of physical memory and usually refers to a memory mapped device of
+ *                     some sort.  Naturally, we can't lock what we don't have, so the phys entry lock and unlock
+ *                     routines return normally, but don't do anything.
+ */
+
+boolean_t mapping_remove(pmap_t pmap, vm_offset_t va) {                        /* Remove a single mapping for this VADDR 
+                                                                                                                                  Returns TRUE if a mapping was found to remove */
+
+       mapping         *mp, *mpv;
+       register blokmap *blm;
+       spl_t           s;
+       unsigned int *useadd, *useaddr, uindx;
+       int i;
+       struct phys_entry       *pp;
+       mapping                 *mp1, *mpv1;
        
-       vm_max_address = 0xFFFFFFFFFFFFFFFFULL >> (64 - rwidth);                /* Get maximum effective address supported */
-       vm_max_physical = 0xFFFFFFFFFFFFFFFFULL >> (64 - per_proc_info[0].pf.pfMaxPAddr);       /* Get maximum physical address supported */
+       debugLog2(1, va, pmap->space);                                                          /* start mapping_remove */
+
+       s=splhigh();                                                                                            /* Don't bother me */
        
-       if(per_proc_info[0].pf.Available & pf64Bit) {                           /* Are we 64 bit? */
-               tmp = 12;                                                                                               /* Size of hash space */
+       mp = hw_lock_phys_vir(pmap->space, va);                                         /* Lock the physical entry for this mapping */
+
+       if(!mp) {                                                                                                       /* Did we find one? */
+               splx(s);                                                                                        /* Allow 'rupts now */
+               if(mp = (mapping *)hw_rem_blk(pmap, va, va)) {                  /* No normal pages, try to remove an odd-sized one */
+                       
+                       if((unsigned int)mp & 1) {                                                      /* Make sure we don't unmap a permanent one */
+                               blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC));            /* Get virtual address */
+                               panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
+                                       pmap, va, blm);
+                       }
+                       while ((unsigned int)mp & 2)
+                               mp = (mapping *)hw_rem_blk(pmap, va, va);
+#if 0
+                       blm = (blokmap *)hw_cpv(mp);                                            /* (TEST/DEBUG) */
+                       kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n",       /* (TEST/DEBUG) */
+                        blm, blm->start, blm->end, blm->PTEr);
+#endif
+                       mapping_free(hw_cpv(mp));                                                       /* Release it */
+                       debugLog2(2, 1, 0);                                                                     /* End mapping_remove */
+                       return TRUE;                                                                            /* Tell them we did it */
+               }
+               debugLog2(2, 0, 0);                                                                             /* end mapping_remove */
+               return FALSE;                                                                                   /* Didn't find any, return FALSE... */
        }
-       else {
-               __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
-               tmp = 32 - tmp;                                                                                 /* Size of hash space */
+       if((unsigned int)mp&1) {                                                                        /* Did we timeout? */
+               panic("mapping_remove: timeout locking physical entry\n");      /* Yeah, scream about it! */
+               splx(s);                                                                                                /* Restore the interrupt level */
+               return FALSE;                                                                                   /* Bad hair day, return FALSE... */
        }
+       
+       mpv = hw_cpv(mp);                                                                                       /* Get virtual address of mapping */
+#if DEBUG
+       if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
+#else
+       (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1);       /* Decrement the resident page count */
+#endif
+       useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask];       /* Point to slot to bump */
+       useaddr = (unsigned int *)((unsigned int)useadd & -4);          /* Round down to word */
+       (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1);     /* Increment the even or odd slot */
 
-       incrVSID = 1 << ((tmp + 1) >> 1);                                                       /* Get ceiling of sqrt of table size */
-       incrVSID |= 1 << ((tmp + 1) >> 2);                                                      /* Get ceiling of quadroot of table size */
-       incrVSID |= 1;                                                                                          /* Set bit and add 1 */
+#if 0
+       for(i = 0; i < (pmapUsageMask + 1); i++) {                                      /* (TEST/DEBUG) */
+               if((mpv->pmap->pmapUsage[i]) > 8192) {                                  /* (TEST/DEBUG) */
+                       panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
+                               i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
+               }
+       }
+#endif
+       
+       hw_rem_map(mp);                                                                                         /* Remove the corresponding mapping */
 
-       return;
+       pp = mpv->physent;
 
-}
+       if ((mpv->physent) && (pmap->vflags & pmapVMhost)) {
+
+               while(mp1 = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) {   /* Keep going so long as there's another */
+
+                       mpv1 = hw_cpv(mp1);                                                                             /* Get the virtual address */
+#if DEBUG
+                       if(hw_atomic_sub(&mpv1->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
+#else
+                       (void)hw_atomic_sub(&mpv1->pmap->stats.resident_count, 1);      /* Decrement the resident page count */
+#endif
+
+                       uindx = ((mpv1->PTEv >> 24) & 0x78) | ((mpv1->PTEv >> 3) & 7);  /* Join segment number and top 2 bits of the API */
+                       useadd = (unsigned int *)&mpv1->pmap->pmapUsage[uindx]; /* Point to slot to bump */
+                       useaddr = (unsigned int *)((unsigned int)useadd & -4);  /* Round down to word */
+                       (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1);     /* Increment the even or odd slot */
+
+#if 0
+                       for(i = 0; i < (pmapUsageMask + 1); i++) {                              /* (TEST/DEBUG) */
+                               if((mpv1->pmap->pmapUsage[i]) > 8192) {                         /* (TEST/DEBUG) */
+                                       panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
+                               i * pmapUsageSize, mpv1->pmap->pmapUsage[i], mpv1->pmap);
+                       }
+               }
+#endif
+       
+                       hw_rem_map(mp1);                                                                                /* Remove the mapping */
+                       mapping_free(mpv1);                                                                             /* Add mapping to the free list */
+               }
+       }
 
+       if(mpv->physent)hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock physical entry associated with mapping */
+       
+       splx(s);                                                                                                        /* Was there something you needed? */
+               
+       mapping_free(mpv);                                                                                      /* Add mapping to the free list */
+       debugLog2(2, 1, 0);                                                                                     /* end mapping_remove */
+       return TRUE;                                                                                            /* Tell them we did it */
+}
 
 /*
- *             mapping_remove(pmap_t pmap, addr64_t va);
- *                     Given a pmap and virtual address, this routine finds the mapping and unmaps it.
- *                     The mapping block will be added to
- *                     the free list.  If the free list threshold is reached, garbage collection will happen.
+ *             mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) - release all mappings for this physent for the specified map
  *
- *                     We also pass back the next higher mapped address. This is done so that the higher level
- *                     pmap_remove function can release a range of addresses simply by calling mapping_remove
- *                     in a loop until it finishes the range or is returned a vaddr of 0.
+ *             This guy releases any mappings that exist for a physical page on a specified map.
+ *             We get the lock on the phys_entry, and hold it through out this whole routine.
+ *             That way, no one can change the queue out from underneath us.  We keep fetching
+ *             the physents mapping anchor until it is null, then we're done.  
  *
- *                     Note that if the mapping is not found, we return the next VA ORed with 1
+ *             For each mapping, we call the remove routine to remove it from the PTEG hash list and 
+ *             decriment the pmap's residency count.  Then we release the mapping back to the free list.
  *
  */
 
-addr64_t mapping_remove(pmap_t pmap, addr64_t va) {            /* Remove a single mapping for this VADDR 
-                                                                                                                  Returns TRUE if a mapping was found to remove */
+void mapping_purge_pmap(struct phys_entry *pp, pmap_t pmap) {          /* Remove all mappings from specified pmap for this physent */
 
-       mapping         *mp;
-       addr64_t        nextva;
+       mapping         *mp, *mp_next, *mpv;
+       spl_t           s;
+       unsigned int *useadd, *useaddr, uindx;
+       int i;
+               
+       s=splhigh();                                                                    /* Don't bother me */
        
-       disable_preemption();                                                           /* Don't change threads */
-
-       while(1) {                                                                                      /* Keep trying until we truely fail */
-               mp = hw_rem_map(pmap, va, &nextva);                             /* Remove a mapping from this pmap */
-               if(((unsigned int)mp & mapRetCode) != mapRtRemove) break;       /* If it is gone, we are done */
+       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry */
+               panic("\nmapping_purge_pmap: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
+                       pp, pp->phys_link, pp->pte1);   /* Complain about timeout */
        }
 
-       enable_preemption();                                                            /* Thread change ok */
+       mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS);
+       
+       while(mp) {     /* Keep going so long as there's another */
 
-       if(!mp) return (nextva | 1);                                            /* Nothing found to unmap */
+               mpv = hw_cpv(mp);                                       /* Get the virtual address */
+               if(mpv->pmap != pmap) {
+                       mp = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
+                       continue;
+               }
+#if DEBUG
+               if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
+#else
+               (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1);       /* Decrement the resident page count */
+#endif
 
-       if((unsigned int)mp & mapRetCode) {                                     /* Was there a failure? */
+               uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7);    /* Join seg # and top 2 bits of API */
+               useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx];  /* Point to slot to bump */
+               useaddr = (unsigned int *)((unsigned int)useadd & -4);  /* Round down to word */
+               (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Incr the even or odd slot */
+
+       
+       
+               mp_next = (mapping *)((unsigned int)mpv->next & ~PHYS_FLAGS);
+               hw_rem_map(mp);                                         /* Remove the mapping */
+               mapping_free(mpv);                                      /* Add mapping to the free list */
+               mp = mp_next;
+       }
+               
+       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);       /* We're done, unlock the physical entry */
+       splx(s);
+       return;
+}
+/*
+ *             mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list 
+ *
+ *             This guy releases any mappings that exist for a physical page.
+ *             We get the lock on the phys_entry, and hold it through out this whole routine.
+ *             That way, no one can change the queue out from underneath us.  We keep fetching
+ *             the physents mapping anchor until it is null, then we're done.  
+ *
+ *             For each mapping, we call the remove routine to remove it from the PTEG hash list and 
+ *             decriment the pmap's residency count.  Then we release the mapping back to the free list.
+ *
+ */
+void mapping_purge(struct phys_entry *pp) {                                            /* Remove all mappings for this physent */
+
+       mapping         *mp, *mpv;
+       spl_t           s;
+       unsigned int *useadd, *useaddr, uindx;
+       int i;
+               
+       s=splhigh();                                                                                            /* Don't bother me */
+       debugLog2(3, pp->pte1, 0);                                                                      /* start mapping_purge */
        
-               panic("mapping_remove: hw_rem_map failed - pmap = %08X, va = %016llX, code = %08X\n",
-                       pmap, va, mp);
+       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {              /* Lock the physical entry */
+               panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
+                       pp, pp->phys_link, pp->pte1);   /* Complain about timeout */
        }
        
-       mapping_free(mp);                                                                       /* Add mapping to the free list */
+       while(mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) {    /* Keep going so long as there's another */
 
-       return nextva;                                                                          /* Tell them we did it */
+               mpv = hw_cpv(mp);                                                                               /* Get the virtual address */
+#if DEBUG
+               if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n");
+#else
+               (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1);       /* Decrement the resident page count */
+#endif
+
+               uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7);    /* Join segment number and top 2 bits of the API */
+               useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx];  /* Point to slot to bump */
+               useaddr = (unsigned int *)((unsigned int)useadd & -4);  /* Round down to word */
+               (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1);     /* Increment the even or odd slot */
+
+#if 0
+       for(i = 0; i < (pmapUsageMask + 1); i++) {                                      /* (TEST/DEBUG) */
+               if((mpv->pmap->pmapUsage[i]) > 8192) {                                  /* (TEST/DEBUG) */
+                       panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
+                               i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
+               }
+       }
+#endif
+       
+       
+               hw_rem_map(mp);                                                                                 /* Remove the mapping */
+               mapping_free(mpv);                                                                              /* Add mapping to the free list */
+       }
+               
+       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);       /* We're done, unlock the physical entry */
+       
+       debugLog2(4, pp->pte1, 0);                                                                      /* end mapping_purge */
+       splx(s);                                                                                                        /* Was there something you needed? */
+       return;                                                                                                         /* Tell them we did it */
 }
 
+
 /*
- *             mapping_make(pmap, va, pa, flags, size, prot) - map a virtual address to a real one 
+ *             mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one 
  *
  *             This routine takes the given parameters, builds a mapping block, and queues it into the 
  *             correct lists.
  *             
- *             pmap (virtual address)          is the pmap to map into
- *             va   (virtual address)          is the 64-bit virtual address that is being mapped
- *             pa      (physical page number)  is the physical page number (i.e., physcial address >> 12). This is
- *                                                                     a 32-bit quantity.
- *             Flags:
- *                     block                                   if 1, mapping is a block, size parameter is used. Note: we do not keep 
- *                                                                     reference and change information or allow protection changes of blocks.
- *                                                                     any changes must first unmap and then remap the area.
- *                     use attribute                   Use specified attributes for map, not defaults for physical page
- *                     perm                                    Mapping is permanent
- *                     cache inhibited                 Cache inhibited (used if use attribute or block set )
- *                     guarded                                 Guarded access (used if use attribute or block set )
- *             size                                            size of block (not used if not block)
- *             prot                                            VM protection bits
- *             attr                                            Cachability/Guardedness    
- *
- *             Returns 0 if mapping was successful.  Returns vaddr that overlaps/collides.
- *             Returns 1 for any other failure.
- *
- *             Note that we make an assumption that all memory in the range 0f 0x0000000080000000 to 0x00000000FFFFFFFF is reserved
- *             for I/O and default the cache attrubutes appropriately.  The caller is free to set whatever they want however.
- *
- *             If there is any physical page that is not found in the physent table, the mapping is forced to be a
- *             block mapping of length 1.  This keeps us from trying to update a physent during later mapping use,
- *             e.g., fault handling.
- *
+ *             The pp parameter can be null.  This allows us to make a mapping that is not
+ *             associated with any physical page.  We may need this for certain I/O areas.
  *
+ *             If the phys_entry address is null, we neither lock or chain into it.
+ *             If locked is 1, we already hold the lock on the phys_entry and won't get nor release it.
  */
  
-addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot) {   /* Make an address mapping */
+mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked) {        /* Make an address mapping */
 
-       register mapping *mp;
-       addr64_t colladdr;
-       unsigned int pindex, mflags, pattr, wimg;
-       phys_entry *physent;
-       int i, nlists;
-
-       disable_preemption();                                                                           /* Don't change threads */
+       register mapping *mp, *mpv;
+       unsigned int *useadd, *useaddr;
+       spl_t           s;
+       int i;
 
-       pindex = 0;
+       debugLog2(5, va, pa);                                                                           /* start mapping_purge */
+       mpv = mapping_alloc();                                                                          /* Get a spare mapping block */
        
-       mflags = 0x01000000;                                                                            /* Start building mpFlags field (busy count = 1) */
+       mpv->pmap = pmap;                                                                                       /* Initialize the pmap pointer */
+       mpv->physent = pp;                                                                                      /* Initialize the pointer to the physical entry */
+       mpv->PTEr = ((unsigned int)pa & ~(PAGE_SIZE - 1)) | attr<<3 | ppc_prot(prot);   /* Build the real portion of the PTE */
+       mpv->PTEv = (((unsigned int)va >> 1) & 0x78000000) | (pmap->space << 7) | (((unsigned int)va >> 22) & 0x0000003F);      /* Build the VSID */
+
+       s=splhigh();                                                                                            /* Don't bother from now on */
        
-       if(!(flags & mmFlgBlock)) {                                                                     /* Is this a block map? */
+       mp = hw_cvp(mpv);                                                                                       /* Get the physical address of this */
 
-               size = 1;                                                                                               /* Set size to 1 page if not block */
-        
-               physent = mapping_phys_lookup(pa, &pindex);                             /* Get physical entry */
-               if(!physent) {                                                                                  /* Did we find the physical page? */
-                       mflags |= mpBlock;                                                                      /* Force this to a block if no physent */
-                       size = 1;                                                                                       /* Force size to 1 page */
-                       pattr = 0;                                                                                      /* Assume normal, non-I/O memory */
-                       if((pa & 0xFFF80000) == 0x00080000) pattr = mmFlgCInhib | mmFlgGuarded; /* If this page is in I/O range, set I/O attributes */
+       if(pp && !locked) {                                                                                     /* Is there a physical entry? Or do we already hold the lock? */
+               if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry */
+                       panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
+                               pp, pp->phys_link, pp->pte1);                                   /* Complain about timeout */
                }
-               else pattr = ((physent->ppLink & (ppI | ppG)) >> 4);    /* Get the default attributes from physent */
-               
-               if(flags & mmFlgUseAttr) pattr = flags & (mmFlgCInhib | mmFlgGuarded);  /* Use requested attributes */
        }
-       else {                                                                                                          /* This is a block */
-                
-               pattr = flags & (mmFlgCInhib | mmFlgGuarded);                   /* Use requested attributes */
-               mflags |= mpBlock;                                                                              /* Show that this is a block */
+               
+       if(pp) {                                                                                                        /* See of there is a physcial entry */
+               mpv->next = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS);             /* Move the old anchor to the new mappings forward */
+               pp->phys_link = (mapping *)((unsigned int)mp | (unsigned int)pp->phys_link & PHYS_FLAGS);       /* Point the anchor at us.  Now we're on the list (keep the flags) */
        }
        
-       wimg = 0x2;                                                                                                     /* Set basic PPC wimg to 0b0010 - Coherent */
-       if(pattr & mmFlgCInhib) wimg |= 0x4;                                            /* Add cache inhibited if we need to */
-       if(pattr & mmFlgGuarded) wimg |= 0x1;                                           /* Add guarded if we need to */
+       hw_add_map(mp, pmap->space, va);                                                        /* Stick it on the PTEG hash list */
        
-       mflags = mflags | (pindex << 16);                                                       /* Stick in the physical entry table index */
+       (void)hw_atomic_add(&mpv->pmap->stats.resident_count, 1);       /* Increment the resident page count */
+       useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask];       /* Point to slot to bump */
+       useaddr = (unsigned int *)((unsigned int)useadd & -4);          /* Round down to word */
+       (void)hw_atomic_add(useaddr, (useaddr == useadd) ? 0x00010000 : 1);     /* Increment the even or odd slot */
+#if 0
+       for(i = 0; i < (pmapUsageMask + 1); i++) {                                      /* (TEST/DEBUG) */
+               if((mpv->pmap->pmapUsage[i]) > 8192) {                                  /* (TEST/DEBUG) */
+                       panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
+                               i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap);
+               }
+       }
+#endif
+
+       if(pp && !locked)hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);      /* If we have one and we didn't hold on entry, unlock the physical entry */
+               
+       splx(s);                                                                                                        /* Ok for interruptions now */
+       debugLog2(6, pmap->space, prot);                                                        /* end mapping_purge */
+       return mpv;                                                                                                     /* Leave... */
+}
+
+
+/*
+ *             Enters optimal translations for odd-sized V=F blocks.
+ *
+ *             Builds a block map for each power-of-two hunk o' address
+ *             that exists.  This is specific to the processor type.  
+ *             PPC uses BAT register size stuff.  Future PPC might have
+ *             something else.
+ *
+ *             The supplied va is expected to be maxoptimal vs the supplied boundary. We're too
+ *             stupid to know otherwise so we only look at the va anyhow, so there...
+ *
+ */
+void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr) {   /* Maps optimal autogenned blocks */
+
+       register blokmap *blm, *oblm;
+       unsigned int    pg;
+       unsigned int    maxsize, boundary, leading, trailing, cbsize, minsize, tomin;
+       int                             i, maxshft, nummax, minshft;
+
+#if 1
+       kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n",    /* (TEST/DEBUG) */
+        pmap, va, pa, bnd, size, prot, attr);
+#endif
        
-       if(flags & mmFlgPerm) mflags |= mpPerm;                                         /* Set permanent mapping */
+       minsize = blokValid ^ (blokValid & (blokValid - 1));    /* Set minimum subblock size */
+       maxsize = 0x80000000 >> cntlzw(blokValid);              /* Set maximum subblock size */
        
-       size = size - 1;                                                                                        /* Change size to offset */
-       if(size > 0xFFFF) return 1;                                                                     /* Leave if size is too big */
+       minshft = 31 - cntlzw(minsize);                                 /* Shift to position minimum size */
+       maxshft = 31 - cntlzw(blokValid);                               /* Shift to position maximum size */
        
-       nlists = mapSetLists(pmap);                                                                     /* Set number of lists this will be on */
+       leading = ((va + bnd - 1) & -bnd) - va;                 /* Get size of leading area */
+       trailing = size - leading;                                              /* Get size of trailing area */
+       tomin = ((va + minsize - 1) & -minsize) - va;   /* Get size needed to round up to the minimum block size */
        
-       mp = mapping_alloc(nlists);                                                                     /* Get a spare mapping block with this many lists */
+#if 1
+       kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd, leading, trailing, tomin);           /* (TEST/DEBUG) */
+#endif
 
-                                                                /* the mapping is zero except that the mpLists field is set */
-       mp->mpFlags |= mflags;                                                                          /* Add in the rest of the flags to mpLists */
-       mp->mpSpace = pmap->space;                                                                      /* Set the address space/pmap lookup ID */
-       mp->mpBSize = size;                                                                                     /* Set the size */
-       mp->mpPte = 0;                                                                                          /* Set the PTE invalid */
-       mp->mpPAddr = pa;                                                                                       /* Set the physical page number */
-       mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3) | ppc_prot(prot); /* Add the protection and attributes to the field */
+       if(tomin)pmap_map_block(pmap, va, pa, tomin, prot, attr, 0); /* Map up to minimum block size */
        
-       while(1) {                                                                                                      /* Keep trying... */
-               colladdr = hw_add_map(pmap, mp);                                                /* Go add the mapping to the pmap */
-               if(!colladdr) {                                                                                 /* All is ok... */
-                       enable_preemption();                                                            /* Ok to switch around here */
-                       return 0;                                                                                       /* Return... */
-               }
-               
-               if((colladdr & mapRetCode) == mapRtRemove) {                    /* Is our target being removed? */
-                       (void)mapping_remove(pmap, colladdr);                           /* Yes, go help out */
-                       continue;                                                                                       /* Try to add it now */
-               }
+       va = va + tomin;                                                                /* Adjust virtual start */
+       pa = pa + tomin;                                                                /* Adjust physical start */
+       leading = leading - tomin;                                              /* Adjust leading size */
+       
+/*
+ *     Some of this code is very classic PPC.  We need to fix this up.
+ */
+       leading = leading >> minshft;                                   /* Position for bit testing */
+       cbsize = minsize;                                                               /* Set the minimum size */
+       
+       for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, small to large */
                
-               if((colladdr & mapRetCode) == mapRtMapDup) {                    /* Is our target already mapped (collision mapping must be identical)? */
-                       mapping_free(mp);                                                                       /* Return mapping to the free list */
-                       enable_preemption();                                                            /* Ok to switch around here */
-                       return 0;                                                                                       /* Normal return */
+               if(leading & 1) {               
+                       pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
+                       pa = pa + cbsize;                                               /* Bump up physical address */
+                       va = va + cbsize;                                               /* Bump up virtual address */
                }
+       
+               leading = leading >> 1;                                         /* Shift up to next size */
+               cbsize = cbsize << 1;                                           /* Here too */
+
+       }
+       
+       nummax = trailing >> maxshft;                                   /* Get number of max size blocks left */
+       for(i=0; i < nummax - 1; i++) {                                 /* Account for all max size block left but 1 */
+               pmap_map_block(pmap, va, pa, maxsize, prot, attr, 0); /* Map up to next boundary */
+
+               pa = pa + maxsize;                                                      /* Bump up physical address */
+               va = va + maxsize;                                                      /* Bump up virtual address */
+               trailing -= maxsize;                                            /* Back off what we just did */
+       }
+       
+       cbsize = maxsize;                                                               /* Start at maximum size */
+       
+       for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, high to low */
                
-               if(colladdr != mapRtBadLk) {                                                    /* Did it collide? */
-                       mapping_free(mp);                                                                       /* Yeah, toss the pending mapping */
-                       enable_preemption();                                                            /* Ok to switch around here */
-                       return colladdr;                                                                        /* Pass back the overlapping address */
-               }
-                       
-               panic("mapping_make: hw_add_map failed - code = %08X, pmap = %08X, va = %016llX, mapping = %08X\n",
-                       colladdr, pmap, va, mp);                                                        /* Die dead */
+               if(trailing & cbsize) { 
+                       trailing &= ~cbsize;                                    /* Remove the block we are allocating */                                                
+                       pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */
+                       pa = pa + cbsize;                                               /* Bump up physical address */
+                       va = va + cbsize;                                               /* Bump up virtual address */
+               }       
+               cbsize = cbsize >> 1;                                           /* Next size down */
        }
        
-       return 1;                                                                                                       /* Leave... */
+       if(trailing) pmap_map_block(pmap, va, pa, trailing, prot, attr, 0); /* Map up to end */
+       
+       return;                                                                                                 /* Return */
 }
 
 
 /*
- *             mapping *mapping_find(pmap, va, *nextva, full) - Finds a mapping 
+ *             Enters translations for odd-sized V=F blocks.
  *
- *             Looks up the vaddr and returns the mapping and the next mapped va
- *             If full is true, it will descend through all nested pmaps to find actual mapping
+ *             Checks to insure that the request is at least ODDBLKMIN in size.  If smaller, the request
+ *             will be split into normal-sized page mappings.
  *
- *             Must be called with interruptions disabled or we can hang trying to remove found mapping.
+ *             The higher level VM map should be locked to insure that we don't have a
+ *             double diddle here.
  *
- *             Returns 0 if not found and the virtual address of the mapping if it is
- *             Note that the mappings busy count is bumped. It is the responsibility of the caller
- *             to drop the count.  If this is not done, any attempt to remove the mapping will hang.
+ *             We panic if we get a block that overlaps with another. We do not merge adjacent
+ *             blocks because removing any address within a block removes the entire block and if
+ *             would really mess things up if we trashed too much.
  *
- *             NOTE: The nextva field is not valid when full is TRUE.
+ *             Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can
+ *             not be changed.  The block must be unmapped and then remapped with the new stuff.
+ *             We also do not keep track of reference or change flags.
  *
+ *             Blocks are kept in MRU order anchored from the pmap. The chain is traversed only
+ *             with interruptions and translation disabled and under the control of the lock located
+ *             in the first block map. MRU is used because it is expected that the same entry 
+ *             will be accessed repeatedly while PTEs are being generated to cover those addresses.
  *
  */
  
-mapping *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full) {  /* Make an address mapping */
+void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) {       /* Map an autogenned block */
 
-       register mapping *mp;
-       addr64_t        curva;
-       pmap_t  curpmap;
-       int     nestdepth;
+       register blokmap *blm, *oblm, *oblm_virt;;
+       unsigned int pg;
+
+#if 0
+       kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n",       /* (TEST/DEBUG) */
+        pmap, va, pa, size, prot, attr);
+#endif
+
+       if(size < ODDBLKMIN) {                                                                  /* Is this below the minimum size? */
+               for(pg = 0; pg < size; pg += PAGE_SIZE) {                       /* Add all pages in this block */
+                       mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
+#if 0
+                       kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n",       /* (TEST/DEBUG) */
+                               va + pg, pa + pg);
+#endif
+               }
+               return;                                                                                         /* All done */
+       }
+       
+       blm = (blokmap *)mapping_alloc();                                               /* Get a block mapping */
+       
+       blm->start = (unsigned int)va & -PAGE_SIZE;                             /* Get virtual block start */
+       blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1);   /* Get virtual block end */
+       blm->current = 0;
+       blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
+       blm->space = pmap->space;                                                               /* Set the space (only needed for remove) */
+       blm->blkFlags = flags;                                                                  /* Set the block's flags */
+       
+#if 0
+       kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n",   /* (TEST/DEBUG) */
+        blm, blm->start, blm->end, blm->PTEr);
+#endif
+
+       blm = (blokmap *)hw_cvp((mapping *)blm);                                /* Get the physical address of this */
+
+#if 0
+       kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n",   /* (TEST/DEBUG) */
+        blm, pmap->bmaps);
+#endif
+
+       do {
+               oblm = hw_add_blk(pmap, blm); 
+               if ((unsigned int)oblm & 2) {
+                       oblm_virt = (blokmap *)hw_cpv((mapping *)((unsigned int)oblm & 0xFFFFFFFC));
+                       mapping_remove(pmap, oblm_virt->start);
+               };
+       } while ((unsigned int)oblm & 2);
+
+       if (oblm) {
+               oblm = (blokmap *)hw_cpv((mapping *) oblm);                             /* Get the old block virtual address */
+               blm = (blokmap *)hw_cpv((mapping *)blm);                                /* Back to the virtual address of this */
+               if((oblm->start != blm->start) ||                                       /* If we have a match, then this is a fault race and */
+                               (oblm->end != blm->end) ||                              /* is acceptable */
+                               (oblm->PTEr != blm->PTEr))
+                       panic("pmap_map_block: block map overlap - blm = %08X\n", oblm);/* Otherwise, Squeak loudly and carry a big stick */
+               mapping_free((struct mapping *)blm);
+       }
 
-       curpmap = pmap;                                                                                         /* Remember entry */
-       nestdepth = 0;                                                                                          /* Set nest depth */
-       curva = (addr64_t)va;                                                                                   /* Set current va */
+#if 0
+       kprintf("pmap_map_block: pmap->bmaps=%08X\n",                   /* (TEST/DEBUG) */
+        blm, pmap->bmaps);
+#endif
+
+       return;                                                                                                 /* Return */
+}
+
+
+/*
+ *             Optimally enters translations for odd-sized V=F blocks.
+ *
+ *             Checks to insure that the request is at least ODDBLKMIN in size.  If smaller, the request
+ *             will be split into normal-sized page mappings.
+ *
+ *             This one is different than pmap_map_block in that it will allocate it's own virtual
+ *             target address. Rather than allocating a single block,
+ *             it will also allocate multiple blocks that are power-of-two aligned/sized.  This allows
+ *             hardware-level mapping that takes advantage of BAT maps or large page sizes.
+ *
+ *             Most considerations for pmap_map_block apply.
+ *
+ *
+ */
+kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va, 
+       vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {     /* Map an optimal autogenned block */
 
-       while(1) {
+       register blokmap *blm, *oblm;
+       unsigned int    pg;
+    kern_return_t      err;
+       unsigned int    bnd;
+
+#if 1
+       kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n",     /* (TEST/DEBUG) */
+               map, pa, size, prot, attr);
+#endif
 
-               mp = hw_find_map(curpmap, curva, nextva);                               /* Find the mapping for this address */
-               if((unsigned int)mp == mapRtBadLk) {                                    /* Did we lock up ok? */
-                       panic("mapping_find: pmap lock failure - rc = %08X, pmap = %08X\n", mp, curpmap);       /* Die... */
+       if(size < ODDBLKMIN) {                                                                  /* Is this below the minimum size? */
+               err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE);    /* Make us some memories */
+               if(err) {
+#if DEBUG
+                       kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err);        /* Say we died */
+#endif
+                       return(err);                                                                    /* Pass back the error */
                }
-               
-               if(!mp || !(mp->mpFlags & mpNest) || !full) break;              /* Are we a nest or are we only going one deep? */
+#if 1
+               kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va);      /* (TEST/DEBUG) */
+#endif
 
-               if(mp->mpFlags & mpSpecial) {                                                   /* Don't chain through a special mapping */
-                       mp = 0;                                                                                         /* Set not found */
-                       break;
+               for(pg = 0; pg < size; pg += PAGE_SIZE) {                       /* Add all pages in this block */
+                       mapping_make(map->pmap, 0, *va + pg, pa + pg, prot, attr, 0);   /* Map this page on in */
                }
+               return(KERN_SUCCESS);                                                           /* All done */
+       }
+       
+       err = vm_map_block(map, va, &bnd, pa, size, prot);              /* Go get an optimal allocation */
 
-               if(nestdepth++ > 64) {                                                                  /* Have we nested too far down? */
-                       panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %08X, curpmap = %08X\n",
-                               va, curva, pmap, curpmap);
+       if(err == KERN_INVALID_ADDRESS) {                                               /* Can we try a brute force block mapping? */
+               err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE);    /* Make us some memories */
+               if(err) {
+#if DEBUG
+                       kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err);    /* Say we died */
+#endif
+                       return(err);                                                                    /* Pass back the error */
                }
-               
-               curva = curva + mp->mpNestReloc;                                                /* Relocate va to new pmap */
-               curpmap = pmapTrans[mp->mpSpace].pmapVAddr;                             /* Get the address of the nested pmap */
-               mapping_drop_busy(mp);                                                                  /* We have everything we need from the mapping */
-               
+#if 1
+               kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va);       /* (TEST/DEBUG) */
+#endif
+               pmap_map_block(map->pmap, *va, pa, size, prot, attr, 0);        /* Set up a block mapped area */
+               return KERN_SUCCESS;                                                            /* All done now */
+       }
+
+       if(err != KERN_SUCCESS) {                                                               /* We couldn't get any address range to map this... */
+#if DEBUG
+               kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err);        /* Say we couldn' do it */
+#endif
+               return(err);
        }
 
-       return mp;                                                                                                      /* Return the mapping if we found one */
+#if 1
+       kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va, bnd);      /* (TEST/DEBUG) */
+#endif
+       mapping_block_map_opt(map->pmap, *va, pa, bnd, size, prot, attr);       /* Go build the maps */
+       return(KERN_SUCCESS);                                                                   /* All done */
 }
 
+
+#if 0
+
 /*
- *              kern_return_t mapping_protect(pmap_t pmap, addt_t va, vm_prot_t prot, addr64_t *nextva) - change the protection of a virtual page
+ *             Enters translations for odd-sized V=F blocks and merges adjacent or overlapping
+ *             areas.
  *
- *             This routine takes a pmap and virtual address and changes
- *             the protection.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the protection is changed. 
+ *             Once blocks are merged, they act like one block, i.e., if you remove it,
+ *             it all goes...
  *
- *             We return success if we change the protection or if there is no page mapped at va.  We return failure if
- *             the va corresponds to a block mapped area or the mapping is permanant.
+ *             This can only be used during boot.  Ain't no way we can handle SMP
+ *             or preemption easily, so we restrict it.  We don't check either. We
+ *             assume only skilled professional programmers will attempt using this
+ *             function. We assume no responsibility, either real or imagined, for
+ *             injury or death resulting from unauthorized use of this function.
  *
+ *             No user servicable parts inside. Notice to be removed by end-user only,
+ *             under penalty of applicable federal and state laws.
+ *
+ *             See descriptions of pmap_map_block. Ignore the part where we say we panic for
+ *             overlapping areas.  Note that we do panic if we can't merge.
  *
  */
+void pmap_map_block_merge(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {     /* Map an autogenned block */
 
-int mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva) {      /* Change protection of a virtual page */
+       register blokmap *blm, *oblm;
+       unsigned int pg;
+       spl_t           s;
 
-       int     ret;
-       
-       ret = hw_protect(pmap, va, ppc_prot(prot), nextva);     /* Try to change the protect here */
+#if 1
+       kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */
+        pmap, va, pa, size, prot, attr);
+#endif
 
-       switch (ret) {                                                          /* Decode return code */
+       s=splhigh();                                                                                            /* Don't bother from now on */
+       if(size < ODDBLKMIN) {                                                                          /* Is this below the minimum size? */
+               for(pg = 0; pg < size; pg += PAGE_SIZE) {                               /* Add all pages in this block */
+                       mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */
+               }
+               return;                                                                                                 /* All done */
+       }
        
-               case mapRtOK:                                                   /* Changed */
-               case mapRtNotFnd:                                               /* Didn't find it */
-                       return mapRtOK;                                         /* Ok, return... */
-                       break;
+       blm = (blokmap *)mapping_alloc();                                                       /* Get a block mapping */
+       
+       blm->start = (unsigned int)va & -PAGE_SIZE;                                     /* Get virtual block start */
+       blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1);           /* Get virtual block end */
+       blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */
+       
+#if 1
+       kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n",     /* (TEST/DEBUG) */
+        blm, blm->start, blm->end, blm->PTEr);
+#endif
 
-               case mapRtBlock:                                                /* Block map, just ignore request */
-               case mapRtNest:                                                 /* Nested pmap, just ignore request */
-                       return ret;                                                     /* Pass back return code */
-                       break;
-                       
-               default:
-                       panic("mapping_protect: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, va);
-               
+       blm = (blokmap *)hw_cvp((mapping *)blm);                                        /* Get the physical address of this */
+
+#if 1
+       kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n",     /* (TEST/DEBUG) */
+        blm, pmap->bmaps);
+#endif
+
+       if(oblm = hw_add_blk(pmap, blm)) {                                                      /* Add to list and make sure we don't overlap anything */
+               panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm);  /* Squeak loudly and carry a big stick */
        }
 
+#if 1
+       kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n",                     /* (TEST/DEBUG) */
+        blm, pmap->bmaps);
+#endif
+       splx(s);                                                                                                        /* Ok for interruptions now */
+
+       return;                                                                                                         /* Return */
 }
+#endif
 
 /*
- *             void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) - change the protection of a physical page
+ *             void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and changes
  *             the protection.  If there are PTEs associated with the mappings, they will be invalidated before
- *             the protection is changed.  There is no limitation on changes, e.g., 
+ *             the protection is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
+ *             either (I don't think, maybe I'll change my mind later).  There is no limitation on changes, e.g., 
  *             higher to lower, lower to higher.
  *
- *             Any mapping that is marked permanent is not changed
- *
  *             Phys_entry is unlocked.
  */
 
-void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) {        /* Change protection of all mappings to page */
-       
-       unsigned int pindex;
-       phys_entry *physent;
+void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked) {   /* Change protection of all mappings to page */
+
+       spl_t                           spl;
        
-       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
-       if(!physent) {                                                                                          /* Did we find the physical page? */
-               panic("mapping_protect_phys: invalid physical page %08X\n", pa);
-       }
+       debugLog2(9, pp->pte1, prot);                                                           /* end remap */
+       spl=splhigh();                                                                                          /* No interruptions during this */
+       if(!locked) {                                                                                           /* Do we need to lock the physent? */
+               if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry */
+                       panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
+                               pp, pp->phys_link, pp->pte1);                                           /* Complain about timeout */
+               }
+       }       
 
-       hw_walk_phys(physent, hwpSPrtPhy, hwpSPrtMap, hwpNoop, ppc_prot(prot)); /* Set the new protection for page and mappings */
+       hw_prot(pp, ppc_prot(prot));                                                            /* Go set the protection on this physical page */
 
+       if(!locked) hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);   /* We're done, unlock the physical entry */
+       splx(spl);                                                                                                      /* Restore interrupt state */
+       debugLog2(10, pp->pte1, 0);                                                                     /* end remap */
+       
        return;                                                                                                         /* Leave... */
 }
 
-
 /*
- *             void mapping_clr_mod(ppnum_t pa) - clears the change bit of a physical page
+ *             void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page
+ *
+ *             This routine takes a pmap and virtual address and changes
+ *             the protection.  If there are PTEs associated with the mappings, they will be invalidated before
+ *             the protection is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
+ *             either (I don't think, maybe I'll change my mind later).  There is no limitation on changes, e.g., 
+ *             higher to lower, lower to higher.
  *
- *             This routine takes a physical entry and runs through all mappings attached to it and turns
- *             off the change bit. 
  */
 
-void mapping_clr_mod(ppnum_t pa) {                                                             /* Clears the change bit of a physical page */
+void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) { /* Change protection of a virtual page */
+
+       mapping         *mp, *mpv;
+       spl_t           s;
+
+       debugLog2(9, vaddr, pmap);                                      /* start mapping_protect */
+       s = splhigh();                                                          /* Don't bother me */
+               
+       mp = hw_lock_phys_vir(pmap->space, vaddr);      /* Lock the physical entry for this mapping */
+
+       if(!mp) {                                                                       /* Did we find one? */
+               splx(s);                                                                /* Restore the interrupt level */
+               debugLog2(10, 0, 0);                                            /* end mapping_pmap */
+               return;                                                                 /* Didn't find any... */
+       }
+       if((unsigned int)mp & 1) {                                      /* Did we timeout? */
+               panic("mapping_protect: timeout locking physical entry\n");     /* Yeah, scream about it! */
+               splx(s);                                                                /* Restore the interrupt level */
+               return;                                                                 /* Bad hair day... */
+       }
+               
+       hw_prot_virt(mp, ppc_prot(prot));                       /* Go set the protection on this virtual mapping */
 
-       unsigned int pindex;
-       phys_entry *physent;
+       mpv = hw_cpv(mp);                                                       /* Get virtual address of mapping */
+       if(mpv->physent) {                                                      /* If there is a physical page, */
+               hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock the physical entry */
+       }
+       splx(s);                                                                        /* Restore interrupt state */
+       debugLog2(10, mpv->PTEr, 0);                            /* end remap */
        
-       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
-       if(!physent) {                                                                                          /* Did we find the physical page? */
-               panic("mapping_clr_mod: invalid physical page %08X\n", pa);
+       return;                                                                         /* Leave... */
+}
+
+/*
+ *             mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes
+ *
+ *             This routine takes a physical entry and sets the physical attributes.  There can be no mappings
+ *             associated with this page when we do it.
+ */
+
+void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) {     /* Sets the default physical page attributes */
+
+       debugLog2(11, pp->pte1, prot);                                                          /* end remap */
+
+       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry */
+               panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n", 
+                       pp, pp->phys_link, pp->pte1);                                           /* Complain about timeout */
        }
 
-       hw_walk_phys(physent, hwpNoop, hwpCCngMap, hwpCCngPhy, 0);      /* Clear change for page and mappings */
+       hw_phys_attr(pp, ppc_prot(prot), wimg);                                         /* Go set the default WIMG and protection */
+
+       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);       /* We're done, unlock the physical entry */
+       debugLog2(12, pp->pte1, wimg);                                                          /* end remap */
+       
        return;                                                                                                         /* Leave... */
 }
 
-
 /*
- *             void mapping_set_mod(ppnum_t pa) - set the change bit of a physical page
+ *             void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page
  *
- *             This routine takes a physical entry and runs through all mappings attached to it and turns
- *             on the change bit.  
+ *             This routine takes a physical entry and runs through all mappings attached to it and invalidates
+ *             any PTEs it finds.
+ *
+ *             Interruptions must be disabled and the physical entry locked at entry.
  */
 
-void mapping_set_mod(ppnum_t pa) {                                                             /* Sets the change bit of a physical page */
+void mapping_invall(struct phys_entry *pp) {                                   /* Clear all PTEs pointing to a physical page */
 
-       unsigned int pindex;
-       phys_entry *physent;
+       hw_inv_all(pp);                                                                                         /* Go set the change bit of a physical page */
        
-       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
-       if(!physent) {                                                                                          /* Did we find the physical page? */
-               panic("mapping_set_mod: invalid physical page %08X\n", pa);
-       }
-
-       hw_walk_phys(physent, hwpNoop, hwpSCngMap, hwpSCngPhy, 0);      /* Set change for page and mappings */
        return;                                                                                                         /* Leave... */
 }
 
 
 /*
- *             void mapping_clr_ref(ppnum_t pa) - clears the reference bit of a physical page
+ *             void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and turns
- *             off the reference bit.  
+ *             off the change bit.  If there are PTEs associated with the mappings, they will be invalidated before
+ *             the change bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
+ *             either (I don't think, maybe I'll change my mind later).
+ *
+ *             Interruptions must be disabled and the physical entry locked at entry.
  */
 
-void mapping_clr_ref(ppnum_t pa) {                                                             /* Clears the reference bit of a physical page */
+void mapping_clr_mod(struct phys_entry *pp) {                                  /* Clears the change bit of a physical page */
 
-       unsigned int pindex;
-       phys_entry *physent;
-       
-       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
-       if(!physent) {                                                                                          /* Did we find the physical page? */
-               panic("mapping_clr_ref: invalid physical page %08X\n", pa);
-       }
-
-       hw_walk_phys(physent, hwpNoop, hwpCRefMap, hwpCRefPhy, 0);      /* Clear reference for page and mappings */
+       hw_clr_mod(pp);                                                                                         /* Go clear the change bit of a physical page */
        return;                                                                                                         /* Leave... */
 }
 
 
 /*
- *             void mapping_set_ref(ppnum_t pa) - set the reference bit of a physical page
+ *             void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and turns
- *             on the reference bit. 
+ *             on the change bit.  If there are PTEs associated with the mappings, they will be invalidated before
+ *             the change bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
+ *             either (I don't think, maybe I'll change my mind later).
+ *
+ *             Interruptions must be disabled and the physical entry locked at entry.
  */
 
-void mapping_set_ref(ppnum_t pa) {                                                             /* Sets the reference bit of a physical page */
-
-       unsigned int pindex;
-       phys_entry *physent;
-       
-       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
-       if(!physent) {                                                                                          /* Did we find the physical page? */
-               panic("mapping_set_ref: invalid physical page %08X\n", pa);
-       }
+void mapping_set_mod(struct phys_entry *pp) {                                  /* Sets the change bit of a physical page */
 
-       hw_walk_phys(physent, hwpNoop, hwpSRefMap, hwpSRefPhy, 0);      /* Set reference for page and mappings */
+       hw_set_mod(pp);                                                                                         /* Go set the change bit of a physical page */
        return;                                                                                                         /* Leave... */
 }
 
 
 /*
- *             void mapping_tst_mod(ppnum_t pa) - test the change bit of a physical page
+ *             void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page
  *
- *             This routine takes a physical entry and runs through all mappings attached to it and tests
- *             the changed bit. 
+ *             This routine takes a physical entry and runs through all mappings attached to it and turns
+ *             off the reference bit.  If there are PTEs associated with the mappings, they will be invalidated before
+ *             the reference bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
+ *             either (I don't think, maybe I'll change my mind later).
+ *
+ *             Interruptions must be disabled at entry.
  */
 
-boolean_t mapping_tst_mod(ppnum_t pa) {                                                        /* Tests the change bit of a physical page */
+void mapping_clr_ref(struct phys_entry *pp) {                                  /* Clears the reference bit of a physical page */
 
-       unsigned int pindex, rc;
-       phys_entry *physent;
-       
-       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
-       if(!physent) {                                                                                          /* Did we find the physical page? */
-               panic("mapping_tst_mod: invalid physical page %08X\n", pa);
+       mapping *mp;
+
+       debugLog2(13, pp->pte1, 0);                                                                     /* end remap */
+       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Lock the physical entry for this mapping */
+               panic("Lock timeout getting lock on physical entry\n"); /* Just die... */
        }
+       hw_clr_ref(pp);                                                                                         /* Go clear the reference bit of a physical page */
+       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);       /* Unlock physical entry */
+       debugLog2(14, pp->pte1, 0);                                                                     /* end remap */
+       return;                                                                                                         /* Leave... */
+}
+
+
+/*
+ *             void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page
+ *
+ *             This routine takes a physical entry and runs through all mappings attached to it and turns
+ *             on the reference bit.  If there are PTEs associated with the mappings, they will be invalidated before
+ *             the reference bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
+ *             either (I don't think, maybe I'll change my mind later).
+ *
+ *             Interruptions must be disabled and the physical entry locked at entry.
+ */
+
+void mapping_set_ref(struct phys_entry *pp) {                                  /* Sets the reference bit of a physical page */
 
-       rc = hw_walk_phys(physent, hwpTCngPhy, hwpTCngMap, hwpNoop, 0); /* Set change for page and mappings */
-       return ((rc & (unsigned long)ppC) != 0);                                        /* Leave with change bit */
+       hw_set_ref(pp);                                                                                         /* Go set the reference bit of a physical page */
+       return;                                                                                                         /* Leave... */
 }
 
 
 /*
- *             void mapping_tst_ref(ppnum_t pa) - tests the reference bit of a physical page
+ *             void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page
  *
  *             This routine takes a physical entry and runs through all mappings attached to it and tests
- *             the reference bit. 
+ *             the changed bit.  If there are PTEs associated with the mappings, they will be invalidated before
+ *             the changed bit is tested.  We don't try to save the PTE.  We won't worry about the LRU calculations
+ *             either (I don't think, maybe I'll change my mind later).
+ *
+ *             Interruptions must be disabled and the physical entry locked at entry.
  */
 
-boolean_t mapping_tst_ref(ppnum_t pa) {                                                        /* Tests the reference bit of a physical page */
-
-       unsigned int pindex, rc;
-       phys_entry *physent;
-       
-       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
-       if(!physent) {                                                                                          /* Did we find the physical page? */
-               panic("mapping_tst_ref: invalid physical page %08X\n", pa);
-       }
+boolean_t mapping_tst_mod(struct phys_entry *pp) {                             /* Tests the change bit of a physical page */
 
-       rc = hw_walk_phys(physent, hwpTRefPhy, hwpTRefMap, hwpNoop, 0); /* Test reference for page and mappings */
-       return ((rc & (unsigned long)ppR) != 0);                                        /* Leave with reference bit */
+       return(hw_tst_mod(pp));                                                                         /* Go test the change bit of a physical page */
 }
 
 
 /*
- *             phys_ent  *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) - tests the reference bit of a physical page
+ *             void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page
+ *
+ *             This routine takes a physical entry and runs through all mappings attached to it and tests
+ *             the reference bit.  If there are PTEs associated with the mappings, they will be invalidated before
+ *             the reference bit is changed.  We don't try to save the PTE.  We won't worry about the LRU calculations
+ *             either (I don't think, maybe I'll change my mind later).
  *
- *             This routine takes a physical page number and returns the phys_entry associated with it.  It also
- *             calculates the bank address associated with the entry
- *             the reference bit. 
+ *             Interruptions must be disabled and the physical entry locked at entry.
  */
 
-phys_entry *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) {    /* Finds the physical entry for the page */
+boolean_t mapping_tst_ref(struct phys_entry *pp) {                             /* Tests the reference bit of a physical page */
 
-       phys_entry *physent;
-       int i;
-       
-       for(i = 0; i < pmap_mem_regions_count; i++) {                           /* Walk through the list */
-               if(!(unsigned int)pmap_mem_regions[i].mrPhysTab) continue;      /* Skip any empty lists */
-               if((pp < pmap_mem_regions[i].mrStart) || (pp > pmap_mem_regions[i].mrEnd)) continue;    /* This isn't ours */
-               
-               *pindex = (i * sizeof(mem_region_t)) / 4;                               /* Make the word index to this list */
-               
-               return &pmap_mem_regions[i].mrPhysTab[pp - pmap_mem_regions[i].mrStart];        /* Return the physent pointer */
-       }
-       
-       return (phys_entry *)0;                                                                         /* Shucks, can't find it... */
-       
+       return(hw_tst_ref(pp));                                                                         /* Go test the reference bit of a physical page */
 }
 
 
+/*
+ *             void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent
+ *
+ *             Currently, this sets the default word 1 of the PTE.  The only bits set are the WIMG bits
+ */
+
+void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg) {            /* Initializes hw specific storage attributes */
+
+       pp->pte1 = (pa & -PAGE_SIZE) | ((wimg << 3) & 0x00000078);      /* Set the WIMG and phys addr in the default PTE1 */
+
+       return;                                                                                                         /* Leave... */
+}
 
 
 /*
@@ -641,8 +1077,8 @@ phys_entry *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) {        /* Finds the
  *             The list will be replenshed from mapCtl.mapcrel if there are enough.  Otherwise,
  *             a new one is allocated.
  *
- *             This routine allocates and/or frees memory and must be called from a safe place. 
- *             Currently, vm_pageout_scan is the safest place. 
+ *             This routine allocates and/or memory and must be called from a safe place. 
+ *             Currently, vm_pageout_scan is the safest place. We insure that the 
  */
 
 thread_call_t                          mapping_adjust_call;
@@ -657,7 +1093,7 @@ void mapping_adjust(void) {                                                                                /* Adjust free mappings */
        extern int vm_page_free_count;
 
        if(mapCtl.mapcmin <= MAPPERBLOK) {
-               mapCtl.mapcmin = (sane_size / PAGE_SIZE) / 16;
+               mapCtl.mapcmin = (mem_size / PAGE_SIZE) / 16;
 
 #if DEBUG
                kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
@@ -688,10 +1124,10 @@ void mapping_adjust(void) {                                                                              /* Adjust free mappings */
                        mapCtl.mapcreln--;                                                                      /* Back off the count */
                        allocsize = MAPPERBLOK;                                                         /* Show we allocated one block */                       
                }
-        else {                                                                                                 /* No free ones, try to get it */
+               else {                                                                                                  /* No free ones, try to get it */
                        
                        allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK;  /* Get the number of pages we need */
-                       
+
                        hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);            /* Unlock our stuff */
                        splx(s);                                                                                        /* Restore 'rupts */
 
@@ -702,21 +1138,18 @@ void mapping_adjust(void) {                                                                              /* Adjust free mappings */
                                }
                                if(retr == KERN_SUCCESS) break;                                 /* We got some memory, bail out... */
                        }
-               
                        allocsize = allocsize * MAPPERBLOK;                                     /* Convert pages to number of maps allocated */
                        s = splhigh();                                                                          /* Don't bother from now on */
                        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
                                panic("mapping_adjust - timeout getting control lock (2)\n");   /* Tell all and die */
                        }
                }
-
                if (retr != KERN_SUCCESS)
                        break;                                                                                          /* Fail to alocate, bail out... */
                for(; allocsize > 0; allocsize -= MAPPERBLOK) {                 /* Release one block at a time */
                        mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
                        mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE);   /* Point to the next slot */
                }
-
                if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
                        mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
        }
@@ -737,13 +1170,11 @@ void mapping_adjust(void) {                                                                              /* Adjust free mappings */
 
        while((unsigned int)mbn) {                                                                      /* Toss 'em all */
                mb = mbn->nextblok;                                                                             /* Get the next */
-               
                kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE);   /* Release this mapping block */
-       
                mbn = mb;                                                                                               /* Chain to the next */
        }
 
-       __asm__ volatile("eieio");                                                                      /* Make sure all is well */
+       __asm__ volatile("sync");                                                                       /* Make sure all is well */
        mapCtl.mapcrecurse = 0;                                                                         /* We are done now */
        return;
 }
@@ -762,53 +1193,18 @@ void mapping_free(struct mapping *mp) {                                                  /* Release a mapping */
 
        mappingblok     *mb, *mbn;
        spl_t                   s;
-       unsigned int    full, mindx, lists;
+       unsigned int    full, mindx;
 
-       mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 6;                      /* Get index to mapping */
+       mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 5;                      /* Get index to mapping */
        mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE);            /* Point to the mapping block */
-    lists = (mp->mpFlags & mpLists);                                                   /* get #lists */
-    if ((lists == 0) || (lists > kSkipListMaxLists))                   /* panic if out of range */
-        panic("mapping_free: mpLists invalid\n");
-
-#if 0
-       mp->mpFlags = 0x99999999;                                                                       /* (BRINGUP) */ 
-       mp->mpSpace = 0x9999;                                                                           /* (BRINGUP) */ 
-       mp->mpBSize = 0x9999;                                                                           /* (BRINGUP) */ 
-       mp->mpPte   = 0x99999998;                                                                       /* (BRINGUP) */ 
-       mp->mpPAddr = 0x99999999;                                                                       /* (BRINGUP) */ 
-       mp->mpVAddr = 0x9999999999999999ULL;                                            /* (BRINGUP) */ 
-       mp->mpAlias = 0x9999999999999999ULL;                                            /* (BRINGUP) */ 
-       mp->mpList0 = 0x9999999999999999ULL;                                            /* (BRINGUP) */ 
-       mp->mpList[0] = 0x9999999999999999ULL;                                          /* (BRINGUP) */ 
-       mp->mpList[1] = 0x9999999999999999ULL;                                          /* (BRINGUP) */ 
-       mp->mpList[2] = 0x9999999999999999ULL;                                          /* (BRINGUP) */ 
-
-       if(lists > mpBasicLists) {                                                                      /* (BRINGUP) */ 
-               mp->mpList[3] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
-               mp->mpList[4] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
-               mp->mpList[5] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
-               mp->mpList[6] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
-               mp->mpList[7] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
-               mp->mpList[8] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
-               mp->mpList[9] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
-               mp->mpList[10] = 0x9999999999999999ULL;                                 /* (BRINGUP) */ 
-       }
-#endif 
-       
 
        s = splhigh();                                                                                          /* Don't bother from now on */
        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
                panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
        }
        
-       full = !(mb->mapblokfree[0] | mb->mapblokfree[1]);                      /* See if full now */ 
+       full = !(mb->mapblokfree[0] | mb->mapblokfree[1] | mb->mapblokfree[2] | mb->mapblokfree[3]);    /* See if full now */ 
        mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));    /* Flip on the free bit */
-    if ( lists > mpBasicLists ) {                                                              /* if big block, lite the 2nd bit too */
-        mindx++;
-        mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));
-        mapCtl.mapcfree++;
-        mapCtl.mapcinuse--;
-    }
        
        if(full) {                                                                                                      /* If it was full before this: */
                mb->nextblok = mapCtl.mapcnext;                                                 /* Move head of list to us */
@@ -823,7 +1219,8 @@ void mapping_free(struct mapping *mp) {                                                    /* Release a mapping */
        mapCtl.mapcfreec++;                                                                                     /* Count total calls */
 
        if(mapCtl.mapcfree > mapCtl.mapcmin) {                                          /* Should we consider releasing this? */
-               if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1]) == 0xFFFFFFFF) {    /* See if empty now */ 
+               if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1] & mb->mapblokfree[2] & mb->mapblokfree[3]) 
+                  == 0xFFFFFFFF) {                                                                             /* See if empty now */ 
 
                        if(mapCtl.mapcnext == mb) {                                                     /* Are we first on the list? */
                                mapCtl.mapcnext = mb->nextblok;                                 /* Unchain us */
@@ -865,169 +1262,70 @@ void mapping_free(struct mapping *mp) {                                                 /* Release a mapping */
 
 
 /*
- *             mapping_alloc(lists) - obtain a mapping from the free list 
+ *             mapping_alloc(void) - obtain a mapping from the free list 
  *
- *             This routine takes a mapping off of the free list and returns its address.
- *             The mapping is zeroed, and its mpLists count is set.  The caller passes in
- *             the number of skiplists it would prefer; if this number is greater than 
- *             mpBasicLists (ie, 4) then we need to allocate a 128-byte mapping, which is
- *             just two consequtive free entries coallesced into one.  If we cannot find
- *             two consequtive free entries, we clamp the list count down to mpBasicLists
- *             and return a basic 64-byte node.  Our caller never knows the difference.
+ *             This routine takes a mapping off of the free list and returns it's address.
  *
- *             If this allocation empties a block, we remove it from the free list.
+ *             We do this by finding a free entry in the first block and allocating it.
+ *             If this allocation empties the block, we remove it from the free list.
  *             If this allocation drops the total number of free entries below a threshold,
  *             we allocate a new block.
  *
  */
 
-mapping *mapping_alloc(int lists) {                                                            /* Obtain a mapping */
+mapping *mapping_alloc(void) {                                                                 /* Obtain a mapping */
 
        register mapping *mp;
        mappingblok     *mb, *mbn;
        spl_t                   s;
        int                             mindx;
        kern_return_t   retr;
-    int                                big = (lists > mpBasicLists);                           /* set flag if big block req'd */
-       pmap_t                  refpmap, ckpmap;
-       unsigned int    space, i;
-       int                             ref_count;
-       addr64_t                va, nextva;
-       extern  pmap_t  free_pmap_list;
-       extern  int             free_pmap_count;
-       decl_simple_lock_data(extern,free_pmap_lock)
-       boolean_t               found_mapping;
-       boolean_t               do_rescan;
-    
+
        s = splhigh();                                                                                          /* Don't bother from now on */
        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
                panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
        }
 
-       if(!((unsigned int)mapCtl.mapcnext)) {                                          /* Are there any free mappings? */
-       
-/*
- *             No free mappings.  First, there may be some mapping blocks on the "to be released"
- *             list.  If so, rescue one.  Otherwise, try to steal a couple blocks worth.
- */
-
-               if(mbn = mapCtl.mapcrel) {                                                              /* Try to rescue a block from impending doom */
-                       mapCtl.mapcrel = mbn->nextblok;                                         /* Pop the queue */
-                       mapCtl.mapcreln--;                                                                      /* Back off the count */
-                       mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
-                       goto rescued;
-               }
-
-               hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
-
-               simple_lock(&free_pmap_lock);
-
-               if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
-                       panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
-               }
-
-               if (!((unsigned int)mapCtl.mapcnext)) {
-
-                       refpmap = (pmap_t)cursor_pmap->pmap_link.next;
-                       space = mapCtl.mapcflush.spacenum;
-                       while (refpmap != cursor_pmap) {
-                               if(((pmap_t)(refpmap->pmap_link.next))->spaceNum > space) break;
-                               refpmap = (pmap_t)refpmap->pmap_link.next;
-                       }
-
-                       ckpmap = refpmap;
-                       va = mapCtl.mapcflush.addr;
-                       found_mapping = FALSE;
-
-                       while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
-
+       if(!(mb = mapCtl.mapcnext)) {                                                           /* Get the first block entry */
+               unsigned int                    i;
+               struct mappingflush             mappingflush;
+               PCA                                             *pca_min, *pca_max;
+               PCA                                             *pca_base;
+
+               pca_min = (PCA *)(hash_table_base+hash_table_size);
+               pca_max = (PCA *)(hash_table_base+hash_table_size+hash_table_size);
+
+               while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
+                       mapCtl.mapcflush.mappingcnt = 0;
+                       pca_base = mapCtl.mapcflush.pcaptr;
+                       do {
+                               hw_select_mappings(&mapCtl.mapcflush);
+                               mapCtl.mapcflush.pcaptr++;
+                               if (mapCtl.mapcflush.pcaptr >= pca_max)
+                                       mapCtl.mapcflush.pcaptr = pca_min;
+                       } while ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr != pca_base));
+
+                       if ((mapCtl.mapcflush.mappingcnt == 0) && (mapCtl.mapcflush.pcaptr == pca_base)) {
                                hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
-
-                               ckpmap = (pmap_t)ckpmap->pmap_link.next;
-
-                               if ((ckpmap->stats.resident_count != 0) && (ckpmap != kernel_pmap)) {
-                                       do_rescan = TRUE;
-                                       for (i=0;i<8;i++) {
-                                               mp = hw_purge_map(ckpmap, va, &nextva);
-
-                                               if((unsigned int)mp & mapRetCode) {
-                                                       panic("mapping_alloc: hw_purge_map failed - pmap = %08X, va = %16llX, code = %08X\n", ckpmap, va, mp);
-                                               }
-
-                                               if(!mp) { 
-                                                       if (do_rescan)
-                                                               do_rescan = FALSE;
-                                                       else
-                                                               break;
-                                               } else {
-                                                       mapping_free(mp);
-                                                       found_mapping = TRUE;
-                                               }
-
-                                               va = nextva;
-                                       }
-                               }
-
-                               if (ckpmap == refpmap) {
-                                       if (found_mapping == FALSE)
-                                               panic("no valid pmap to purge mappings\n");
-                                       else
-                                               found_mapping = FALSE;
-                               }
-
-                               if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
-                                       panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
-                               }
-
+                               panic("mapping_alloc - all mappings are wired\n");
+                       }
+                       mappingflush = mapCtl.mapcflush;
+                       hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
+                       splx(s);
+                       for (i=0;i<mappingflush.mappingcnt;i++)
+                               mapping_remove(mappingflush.mapping[i].pmap, 
+                                              mappingflush.mapping[i].offset);
+                       s = splhigh();
+                       if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {
+                               panic("mapping_alloc - timeout getting control lock\n");
                        }
-
-                       mapCtl.mapcflush.spacenum = ckpmap->spaceNum;
-                       mapCtl.mapcflush.addr = nextva;
                }
-
-               simple_unlock(&free_pmap_lock);
-       }
-
-rescued:
-
-       mb = mapCtl.mapcnext;
-    
-    if ( big ) {                                                                                               /* if we need a big (128-byte) mapping */
-        mapCtl.mapcbig++;                                                                              /* count attempts to allocate a big mapping */
-        mbn = NULL;                                                                                            /* this will be prev ptr */
-        mindx = 0;
-        while( mb ) {                                                                                  /* loop over mapping blocks with free entries */
-            mindx = mapalc2(mb);                                                               /* try for 2 consequtive free bits in this block */
-
-           if ( mindx )        break;                                                                  /* exit loop if we found them */
-            mbn = mb;                                                                                  /* remember previous block */
-            mb = mb->nextblok;                                                                 /* move on to next block */
-        }
-        if ( mindx == 0 ) {                                                                            /* if we couldn't find 2 consequtive bits... */
-            mapCtl.mapcbigfails++;                                                             /* count failures */
-            big = 0;                                                                                   /* forget that we needed a big mapping */
-            lists = mpBasicLists;                                                              /* clamp list count down to the max in a 64-byte mapping */
-            mb = mapCtl.mapcnext;                                                              /* back to the first block with a free entry */
-        }
-        else {                                                                                                 /* if we did find a big mapping */
-            mapCtl.mapcfree--;                                                                 /* Decrement free count twice */
-            mapCtl.mapcinuse++;                                                                        /* Bump in use count twice */
-            if ( mindx < 0 ) {                                                                 /* if we just used the last 2 free bits in this block */
-                if (mbn) {                                                                             /* if this wasn't the first block */
-                    mindx = -mindx;                                                            /* make positive */
-                    mbn->nextblok = mb->nextblok;                              /* unlink this one from the middle of block list */
-                    if (mb ==  mapCtl.mapclast)        {                               /* if we emptied last block */
-                        mapCtl.mapclast = mbn;                                 /* then prev block is now last */
-                    }
-                }
-            }
-        }
-    }
-    
-    if ( !big ) {                                                                                              /* if we need a small (64-byte) mapping */
-        if(!(mindx = mapalc1(mb)))                                                             /* Allocate a 1-bit slot */
-            panic("mapping_alloc - empty mapping block detected at %08X\n", mb);
-    }
+               mb = mapCtl.mapcnext;
+       }
+       
+       if(!(mindx = mapalc(mb))) {                                                                     /* Allocate a slot */
+               panic("mapping_alloc - empty mapping block detected at %08X\n", mb);    /* Not allowed to find none */
+       }
        
        if(mindx < 0) {                                                                                         /* Did we just take the last one */
                mindx = -mindx;                                                                                 /* Make positive */
@@ -1048,7 +1346,6 @@ rescued:
  *     For early boot, we are set up to only rescue one block at a time.  This is because we prime
  *     the release list with as much as we need until threads start.
  */
-
        if(mapCtl.mapcfree < mapCtl.mapcmin) {                                          /* See if we need to replenish */
                if(mbn = mapCtl.mapcrel) {                                                              /* Try to rescue a block from impending doom */
                        mapCtl.mapcrel = mbn->nextblok;                                         /* Pop the queue */
@@ -1068,9 +1365,7 @@ rescued:
        splx(s);                                                                                                        /* Restore 'rupts */
        
        mp = &((mapping *)mb)[mindx];                                                           /* Point to the allocated mapping */
-    mp->mpFlags = lists;                                                                               /* set the list count */
-
-
+    __asm__ volatile("dcbz 0,%0" : : "r" (mp));                                        /* Clean it up */
        return mp;                                                                                                      /* Send it back... */
 }
 
@@ -1082,7 +1377,7 @@ consider_mapping_adjust()
 
        s = splhigh();                                                                                          /* Don't bother from now on */
        if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
-               panic("consider_mapping_adjust -- lock timeout\n");
+               panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
        }
 
         if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
@@ -1101,15 +1396,8 @@ consider_mapping_adjust()
 /*
  *             void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
  *
- *             The mapping block is a page size area on a page boundary.  It contains 1 header and 63
- *             mappings.  This call adds and initializes a block for use.  Mappings come in two sizes,
- *             64 and 128 bytes (the only difference is the number of skip-lists.)  When we allocate a
- *             128-byte mapping we just look for two consequtive free 64-byte mappings, so most of the
- *             code only deals with "basic" 64-byte mappings.  This works for two reasons:
- *                     - Only one in 256 mappings is big, so they are rare.
- *                     - If we cannot find two consequtive free mappings, we just return a small one.
- *                       There is no problem with doing this, except a minor performance degredation.
- *             Therefore, all counts etc in the mapping control structure are in units of small blocks.
+ *             The mapping block is a page size area on a page boundary.  It contains 1 header and 127
+ *             mappings.  This call adds and initializes a block for use.
  *     
  *             The header contains a chain link, bit maps, a virtual to real translation mask, and
  *             some statistics. Bit maps map each slot on the page (bit 0 is not used because it 
@@ -1141,38 +1429,33 @@ void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
        mappingblok     *mb;
        spl_t           s;
        int                     i;
-       addr64_t        raddr;
-       ppnum_t         pp;
+       unsigned int    raddr;
 
-       mb = (mappingblok *)mbl;                                                                /* Start of area */     
+       mb = (mappingblok *)mbl;                                                                /* Start of area */
+       
        
        if(perm >= 0) {                                                                                 /* See if we need to initialize the block */
                if(perm) {
-                       raddr = (addr64_t)((unsigned int)mbl);                  /* Perm means V=R */
+                       raddr = (unsigned int)mbl;                                              /* Perm means V=R */
                        mb->mapblokflags = mbPerm;                                              /* Set perm */
-//                     mb->mapblokflags |= (unsigned int)mb;                   /* (BRINGUP) */
                }
                else {
-                       pp = pmap_find_phys(kernel_pmap, (addr64_t)mbl);        /* Get the physical page */
-                       if(!pp) {                                                                               /* What gives?  Where's the page? */
-                               panic("mapping_free_init: could not find translation for vaddr %016llX\n", (addr64_t)mbl);
-                       }
-                       
-                       raddr = (addr64_t)pp << 12;                                             /* Convert physical page to physical address */
+                       raddr = kvtophys(mbl);                                                  /* Get real address */
                        mb->mapblokflags = 0;                                                   /* Set not perm */
-//                     mb->mapblokflags |= (unsigned int)mb;                   /* (BRINGUP) */
                }
                
-               mb->mapblokvrswap = raddr ^ (addr64_t)((unsigned int)mbl);              /* Form translation mask */
+               mb->mapblokvrswap = raddr ^ (unsigned int)mbl;          /* Form translation mask */
                
                mb->mapblokfree[0] = 0x7FFFFFFF;                                        /* Set first 32 (minus 1) free */
                mb->mapblokfree[1] = 0xFFFFFFFF;                                        /* Set next 32 free */
+               mb->mapblokfree[2] = 0xFFFFFFFF;                                        /* Set next 32 free */
+               mb->mapblokfree[3] = 0xFFFFFFFF;                                        /* Set next 32 free */
        }
        
        s = splhigh();                                                                                  /* Don't bother from now on */
        if(!locked) {                                                                                   /* Do we need the lock? */
                if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {             /* Lock the control header */ 
-                       panic("mapping_free_init: timeout getting control lock\n");     /* Tell all and die */
+                       panic("mapping_free_init - timeout getting control lock\n");    /* Tell all and die */
                }
        }
        
@@ -1198,8 +1481,7 @@ void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {
        if(!locked) {                                                                                   /* Do we need to unlock? */
                hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);            /* Unlock our stuff */
        }
-
-       splx(s);                                                                                                /* Restore 'rupts */
+               splx(s);                                                                                        /* Restore 'rupts */
        return;                                                                                                 /* All done, leave... */
 }
 
@@ -1236,9 +1518,9 @@ void mapping_prealloc(unsigned int size) {                                        /* Preallocates mapppings for lar
                splx(s);                                                                                        /* Restore 'rupts */
                return;
        }
-       if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) {     /* Make sure we aren't recursing */
+       if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) {                 /* Make sure we aren't recursing */
                hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                    /* Unlock our stuff */
-               splx(s);                                                                                        /* Restore 'rupts */
+               splx(s);                                                        /* Restore 'rupts */
                return;
        }
        nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK;                  /* Get number of blocks to get */
@@ -1248,8 +1530,9 @@ void mapping_prealloc(unsigned int size) {                                        /* Preallocates mapppings for lar
        
        for(i = 0; i < nmapb; i++) {                                                    /* Allocate 'em all */
                retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE);   /* Find a virtual address to use */
-               if(retr != KERN_SUCCESS)                                                        /* Did we get some memory? */
+               if(retr != KERN_SUCCESS) {                                                      /* Did we get some memory? */
                        break;
+               }
                mapping_free_init((vm_offset_t)mbn, -1, 0);                     /* Initialize on to the release queue */
        }
        if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
@@ -1300,7 +1583,7 @@ void mapping_free_prime(void) {                                                                   /* Primes the mapping block release list
        mappingblok     *mbn;
        vm_offset_t     mapping_min;
        
-       retr = kmem_suballoc(kernel_map, &mapping_min, sane_size / 16,
+       retr = kmem_suballoc(kernel_map, &mapping_min, MAPPING_MAP_SIZE,
                             FALSE, TRUE, &mapping_map);
 
        if (retr != KERN_SUCCESS)
@@ -1343,41 +1626,86 @@ mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_
 
 
 /*
- *             addr64_t        mapping_p2v(pmap_t pmap, ppnum_t pa) - Finds first virtual mapping of a physical page in a space
+ *             vm_offset_t     mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space
  *
- *             First looks up  the physical entry associated witht the physical page.  Then searches the alias
- *             list for a matching pmap.  It grabs the virtual address from the mapping, drops busy, and returns 
- *             that.
+ *             Gets a lock on the physical entry.  Then it searches the list of attached mappings for one with
+ *             the same space.  If it finds it, it returns the virtual address.
  *
+ *             Note that this will fail if the pmap has nested pmaps in it.  Fact is, I'll check
+ *             for it and fail it myself...
  */
 
-addr64_t       mapping_p2v(pmap_t pmap, ppnum_t pa) {                          /* Finds first virtual mapping of a physical page in a space */
+vm_offset_t    mapping_p2v(pmap_t pmap, struct phys_entry *pp) {               /* Finds first virtual mapping of a physical page in a space */
 
-       spl_t s;
-       mapping *mp;
-       unsigned int pindex;
-       phys_entry *physent;
-       addr64_t va;
+       spl_t                           s;
+       register mapping        *mp, *mpv;
+       vm_offset_t                     va;
 
-       physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
-       if(!physent) {                                                                                          /* Did we find the physical page? */
-               panic("mapping_p2v: invalid physical page %08X\n", pa);
+       if(pmap->vflags & pmapAltSeg) return 0;                                 /* If there are nested pmaps, fail immediately */
+
+       s = splhigh();
+       if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {      /* Try to get the lock on the physical entry */
+               splx(s);                                                                                        /* Restore 'rupts */
+               panic("mapping_p2v: timeout getting lock on physent\n");                        /* Arrrgghhhh! */
+               return(0);                                                                                      /* Should die before here */
+       }
+       
+       va = 0;                                                                                                 /* Assume failure */
+       
+       for(mpv = hw_cpv(pp->phys_link); mpv; mpv = hw_cpv(mpv->next)) {        /* Scan 'em all */
+               
+               if(!(((mpv->PTEv >> 7) & 0x000FFFFF) == pmap->space)) continue; /* Skip all the rest if this is not the right space... */ 
+               
+               va = ((((unsigned int)mpv->PTEhash & -64) << 6) ^ (pmap->space  << 12)) & 0x003FF000;   /* Backward hash to the wrapped VADDR */
+               va = va | ((mpv->PTEv << 1) & 0xF0000000);                      /* Move in the segment number */
+               va = va | ((mpv->PTEv << 22) & 0x0FC00000);                     /* Add in the API for the top of the address */
+               break;                                                                                          /* We're done now, pass virtual address back */
        }
+       
+       hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);                               /* Unlock the physical entry */
+       splx(s);                                                                                                /* Restore 'rupts */
+       return(va);                                                                                             /* Return the result or 0... */
+}
 
-       s = splhigh();                                                                                  /* Make sure interruptions are disabled */
+/*
+ *     kvtophys(addr)
+ *
+ *     Convert a kernel virtual address to a physical address
+ */
+vm_offset_t kvtophys(vm_offset_t va) {
+
+       register mapping                *mp, *mpv;
+       register blokmap                *bmp;
+       register vm_offset_t    pa;
+       spl_t                           s;
+       
+       s=splhigh();                                                                                    /* Don't bother from now on */
+       mp = hw_lock_phys_vir(PPC_SID_KERNEL, va);                              /* Find mapping and lock the physical entry for this mapping */
 
-       mp = hw_find_space(physent, pmap->space);                               /* Go find the first mapping to the page from the requested pmap */
+       if((unsigned int)mp&1) {                                                                /* Did the lock on the phys entry time out? */
+               splx(s);                                                                                        /* Restore 'rupts */
+               panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va); /* Scream bloody murder! */
+               return 0;
+       }
 
-       if(mp) {                                                                                                /* Did we find one? */
-               va = mp->mpVAddr & -4096;                                                       /* If so, get the cleaned up vaddr */
-               mapping_drop_busy(mp);                                                          /* Go ahead and relase the mapping now */
+       if(!mp) {                                                                                               /* If it was not a normal page */
+               pa = hw_cvp_blk(kernel_pmap, va);                                       /* Try to convert odd-sized page (returns 0 if not found) */
+               splx(s);                                                                                        /* Restore 'rupts */
+               return pa;                                                                                      /* Return physical address */
        }
-       else va = 0;                                                                                    /* Return failure */
 
-       splx(s);                                                                                                /* Restore 'rupts */
+       mpv = hw_cpv(mp);                                                                               /* Convert to virtual addressing */
        
-       return va;                                                                                              /* Bye, bye... */
+       if(!mpv->physent) {                                                                             /* Was there a physical entry? */
+               pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1)));      /* Get physical address from physent */
+       }
+       else {
+               pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1)));     /* Get physical address from physent */
+               hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock the physical entry */
+       }
        
+       splx(s);                                                                                                /* Restore 'rupts */
+       return pa;                                                                                              /* Return the physical address... */
 }
 
 /*
@@ -1389,27 +1717,17 @@ addr64_t        mapping_p2v(pmap_t pmap, ppnum_t pa) {                          /* Finds first virtual mappin
 
 vm_offset_t phystokv(vm_offset_t pa) {
 
-       addr64_t        va;
-       ppnum_t pp;
+       struct phys_entry       *pp;
+       vm_offset_t                     va;
 
-       pp = pa >> 12;                                                                                  /* Convert to a page number */
-       
-       if(!(va = mapping_p2v(kernel_pmap, pp))) {
+       pp = pmap_find_physentry(pa);                                                   /* Find the physical entry */
+       if (PHYS_NULL == pp) {
+               return (vm_offset_t)NULL;                                                       /* If none, return null */
+       }
+       if(!(va=mapping_p2v(kernel_pmap, pp))) {
                return 0;                                                                                       /* Can't find it, return 0... */
        }
-       
-       return (va | (pa & (PAGE_SIZE - 1)));                                   /* Build and return VADDR... */
-
-}
-
-/*
- *     kvtophys(addr)
- *
- *     Convert a kernel virtual address to a physical address
- */
-vm_offset_t kvtophys(vm_offset_t va) {
-
-       return pmap_extract(kernel_pmap, va);                                   /* Find mapping and lock the physical entry for this mapping */
+       return (va | (pa & (PAGE_SIZE-1)));                                             /* Build and return VADDR... */
 
 }
 
@@ -1431,203 +1749,346 @@ void ignore_zero_fault(boolean_t type) {                              /* Sets up to ignore or honor any fa
 }
 
 
-/* 
- *             Copies data between a physical page and a virtual page, or 2 physical.  This is used to 
- *             move data from the kernel to user state. Note that the "which" parm
- *             says which of the parameters is physical and if we need to flush sink/source.  
- *             Note that both addresses may be physicical but only one may be virtual
+/*
+ *     Allocates a range of virtual addresses in a map as optimally as
+ *     possible for block mapping.  The start address is aligned such
+ *     that a minimum number of power-of-two sized/aligned blocks is
+ *     required to cover the entire range. 
  *
- *             The rules are that the size can be anything.  Either address can be on any boundary
- *             and span pages.  The physical data must be congiguous as must the virtual.
+ *     We also use a mask of valid block sizes to determine optimality.
  *
- *             We can block when we try to resolve the virtual address at each page boundary.
- *             We don't check protection on the physical page.
+ *     Note that the passed in pa is not actually mapped to the selected va,
+ *     rather, it is used to figure the optimal boundary.  The actual 
+ *     V to R mapping is done externally.
  *
- *             Note that we will not check the entire range and if a page translation fails,
- *             we will stop with partial contents copied.
+ *     This function will return KERN_INVALID_ADDRESS if an optimal address 
+ *     can not be found.  It is not necessarily a fatal error, the caller may still be
+ *     still be able to do a non-optimal assignment.
+ */
+
+kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa, 
+       vm_size_t size, vm_prot_t prot) {
+
+       vm_map_entry_t  entry, next, tmp_entry, new_entry;
+       vm_offset_t             start, end, algnpa, endadr, strtadr, curradr;
+       vm_offset_t             boundary;
+       
+       unsigned int    maxsize, minsize, leading, trailing;
+       
+       assert(page_aligned(pa));
+       assert(page_aligned(size));
+
+       if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT);  /* Dude, like we need a target map */
+       
+       minsize = blokValid ^ (blokValid & (blokValid - 1));    /* Set minimum subblock size */
+       maxsize = 0x80000000 >> cntlzw(blokValid);      /* Set maximum subblock size */
+       
+       boundary = 0x80000000 >> cntlzw(size);          /* Get optimal boundary */
+       if(boundary > maxsize) boundary = maxsize;      /* Pin this at maximum supported hardware size */
+       
+       vm_map_lock(map);                                                       /* No touchee no mapee */
+
+       for(; boundary > minsize; boundary >>= 1) {     /* Try all optimizations until we find one */
+               if(!(boundary & blokValid)) continue;   /* Skip unavailable block sizes */
+               algnpa = (pa + boundary - 1) & -boundary;       /* Round physical up */
+               leading = algnpa - pa;                                  /* Get leading size */
+               
+               curradr = 0;                                                    /* Start low */
+               
+               while(1) {                                                              /* Try all possible values for this opt level */
+
+                       curradr = curradr + boundary;           /* Get the next optimal address */
+                       strtadr = curradr - leading;            /* Calculate start of optimal range */
+                       endadr = strtadr + size;                        /* And now the end */
+                       
+                       if((curradr < boundary) ||                      /* Did address wrap here? */
+                               (strtadr > curradr) ||                  /* How about this way? */
+                               (endadr < strtadr)) break;              /* We wrapped, try next lower optimization... */
+               
+                       if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */
+                       if(endadr > map->max_offset) break;     /* No room right now... */
+                       
+                       if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */
+               
+                       next = entry->vme_next;                         /* Get the next entry */
+                       if((next == vm_map_to_entry(map)) ||    /* Are we the last entry? */
+                               (next->vme_start >= endadr)) {  /* or do we end before the next entry? */
+                       
+                               new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */
+                                       VM_OBJECT_NULL,
+                                       0,                                                      /* Offset into object of 0 */
+                                       FALSE,                                          /* No copy needed */
+                                       FALSE,                                          /* Not shared */
+                                       FALSE,                                          /* Not in transition */
+                                       prot,                                           /* Set the protection to requested */
+                                       prot,                                           /* We can't change protection */
+                                       VM_BEHAVIOR_DEFAULT,            /* Use default behavior, but makes no never mind,
+                                                                                                  'cause we don't page in this area */
+                                       VM_INHERIT_DEFAULT,             /* Default inheritance */
+                                       0);                                                     /* Nothing is wired */
+                       
+                               vm_map_unlock(map);                             /* Let the world see it all */
+                               *va = strtadr;                                  /* Tell everyone */
+                               *bnd = boundary;                                /* Say what boundary we are aligned to */
+                               return(KERN_SUCCESS);                   /* Leave, all is right with the world... */
+                       }
+               }               
+       }       
+
+       vm_map_unlock(map);                                                     /* Couldn't find a slot */
+       return(KERN_INVALID_ADDRESS);
+}
+
+/* 
+ *             Copies data from a physical page to a virtual page.  This is used to 
+ *             move data from the kernel to user state.
+ *
+ *             Note that it is invalid to have a source that spans a page boundry.
+ *             This can block.
+ *             We don't check protection either.
+ *             And we don't handle a block mapped sink address either.
  *
  */
  
-kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which) {
+kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size) {
  
        vm_map_t map;
        kern_return_t ret;
-       addr64_t pa, nextva, vaddr, paddr;
-       register mapping *mp;
+       unsigned int spaceid;
+       int left, csize;
+       vm_offset_t pa;
+       register mapping *mpv, *mp;
        spl_t s;
-       unsigned int sz, left, lop, csize;
-       int needtran, bothphys;
-       unsigned int pindex;
-       phys_entry *physent;
-       vm_prot_t prot;
 
-       map = (which & cppvKmap) ? kernel_map : current_map_fast();
+       if((size == 0) || ((source ^ (source + size - 1)) & -PAGE_SIZE)) return KERN_FAILURE;   /* We don't allow a source page crosser */
+       map = current_act()->map;                                               /* Get the current map */
 
-       if((which & (cppvPsrc | cppvPsnk)) == 0 ) {             /* Make sure that only one is virtual */
-               panic("copypv: no more than 1 parameter may be virtual\n");     /* Not allowed */
-       }
-       
-       bothphys = 1;                                                                   /* Assume both are physical */
+       while(size) {
+               s=splhigh();                                                            /* Don't bother me */
        
-       if(!(which & cppvPsnk)) {                                               /* Is there a virtual page here? */
-               vaddr = sink;                                                           /* Sink side is virtual */
-               bothphys = 0;                                                           /* Show both aren't physical */
-               prot = VM_PROT_READ | VM_PROT_WRITE;            /* Sink always must be read/write */
-       } else if(!(which & cppvPsrc)) {                                /* Source side is virtual */
-               vaddr = source;                                                         /* Source side is virtual */
-               bothphys = 0;                                                           /* Show both aren't physical */
-               prot = VM_PROT_READ;                                            /* Virtual source is always read only */
-       }
+               spaceid = map->pmap->pmapSegs[(unsigned int)sink >> 28];        /* Get space ID. Don't bother to clean top bits */
 
-       needtran = 1;                                                                   /* Show we need to map the virtual the first time */
-       s = splhigh();                                                                  /* Don't bother me */
+               mp = hw_lock_phys_vir(spaceid, sink);           /* Lock the physical entry for the sink */
+               if(!mp) {                                                                       /* Was it there? */
+                       splx(s);                                                                /* Restore the interrupt level */
+                       ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0);    /* Didn't find it, try to fault it in... */
+                       if (ret == KERN_SUCCESS) continue;              /* We got it in, try again to find it... */
 
-       while(size) {
+                       return KERN_FAILURE;                                    /* Didn't find any, return no good... */
+               }
+               if((unsigned int)mp&1) {                                        /* Did we timeout? */
+                       panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink);   /* Yeah, scream about it! */
+                       splx(s);                                                                /* Restore the interrupt level */
+                       return KERN_FAILURE;                                    /* Bad hair day, return FALSE... */
+               }
 
-               if(!bothphys && (needtran || !(vaddr & 4095LL))) {      /* If first time or we stepped onto a new page, we need to translate */
-                       if(!needtran) {                                                 /* If this is not the first translation, we need to drop the old busy */
-                               mapping_drop_busy(mp);                          /* Release the old mapping now */
-                       }
-                       needtran = 0;
-                       
-                       while(1) {
-                               mp = mapping_find(map->pmap, vaddr, &nextva, 1);        /* Find and busy the mapping */
-                               if(!mp) {                                                       /* Was it there? */
-                                       if(per_proc_info[cpu_number()].istackptr == 0)
-                                               panic("copypv: No vaild mapping on memory %s %x", "RD", vaddr);
-
-                                       splx(s);                                                /* Restore the interrupt level */
-                                       ret = vm_fault(map, trunc_page_32((vm_offset_t)vaddr), prot, FALSE, NULL, 0);   /* Didn't find it, try to fault it in... */
-                               
-                                       if(ret != KERN_SUCCESS)return KERN_FAILURE;     /* Didn't find any, return no good... */
-                                       
-                                       s = splhigh();                                  /* Don't bother me */
-                                       continue;                                               /* Go try for the map again... */
-       
-                               }
-               
-                               /* Note that we have to have the destination writable.  So, if we already have it, or we are mapping the source,
-                                       we can just leave.
-                               */              
-                               if((which & cppvPsnk) || !(mp->mpVAddr & 1)) break;             /* We got it mapped R/W or the source is not virtual, leave... */
-                       
-                               mapping_drop_busy(mp);                          /* Go ahead and release the mapping for now */
-                               if(per_proc_info[cpu_number()].istackptr == 0)
-                                       panic("copypv: No vaild mapping on memory %s %x", "RDWR", vaddr);
-                               splx(s);                                                        /* Restore the interrupt level */
-                               
-                               ret = vm_fault(map, trunc_page_32((vm_offset_t)vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0);   /* check for a COW area */
-                               if (ret != KERN_SUCCESS) return KERN_FAILURE;   /* We couldn't get it R/W, leave in disgrace... */
-                               s = splhigh();                                          /* Don't bother me */
-                       }
-                       paddr = ((addr64_t)mp->mpPAddr << 12) + (vaddr - (mp->mpVAddr & -4096LL));        /* construct the physical address... this calculation works */
-                                                                                                         /* properly on both single page and block mappings */
-                       if(which & cppvPsrc) sink = paddr;              /* If source is physical, then the sink is virtual */
-                       else source = paddr;                                    /* Otherwise the source is */
+               mpv = hw_cpv(mp);                                                       /* Convert mapping block to virtual */
+
+               if(mpv->PTEr & 1) {                                                     /* Are we write protected? yes, could indicate COW */
+                       hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock the sink */
+                       splx(s);                                                                /* Restore the interrupt level */
+                       ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0);    /* check for a COW area */
+                       if (ret == KERN_SUCCESS) continue;              /* We got it in, try again to find it... */
+                       return KERN_FAILURE;                                    /* Didn't find any, return no good... */
                }
-                       
-               lop = (unsigned int)(4096LL - (sink & 4095LL));         /* Assume sink smallest */
-               if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL));  /* No, source is smaller */
-               
-               csize = size;                                                           /* Assume we can copy it all */
-               if(lop < size) csize = lop;                                     /* Nope, we can't do it all */
-               
-               if(which & cppvFsrc) flush_dcache64(source, csize, 1);  /* If requested, flush source before move */
-               if(which & cppvFsnk) flush_dcache64(sink, csize, 1);    /* If requested, flush sink before move */
+               left = PAGE_SIZE - (sink & PAGE_MASK);          /* Get amount left on sink page */
+
+               csize = size < left ? size : left;              /* Set amount to copy this pass */
+
+               pa = (vm_offset_t)((mpv->physent->pte1 & ~PAGE_MASK) | ((unsigned int)sink & PAGE_MASK));       /* Get physical address of sink */
+
+               bcopy_physvir((char *)source, (char *)pa, csize);       /* Do a physical copy, virtually */
+
+               hw_set_mod(mpv->physent);                                       /* Go set the change of the sink */
+
+               hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock the sink */
+               splx(s);                                                                        /* Open up for interrupts */
+
+               sink += csize;                                                          /* Move up to start of next page */
+               source += csize;                                                        /* Move up source */
+               size -= csize;                                                          /* Set amount for next pass */
+       }
+       return KERN_SUCCESS;
+}
 
-               bcopy_physvir(source, sink, csize);                     /* Do a physical copy, virtually */
-               
-               if(which & cppvFsrc) flush_dcache64(source, csize, 1);  /* If requested, flush source after move */
-               if(which & cppvFsnk) flush_dcache64(sink, csize, 1);    /* If requested, flush sink after move */
 
 /*
- *             Note that for certain ram disk flavors, we may be copying outside of known memory.
- *             Therefore, before we try to mark it modifed, we check if it exists.
+ * copy 'size' bytes from physical to physical address
+ * the caller must validate the physical ranges 
+ *
+ * if flush_action == 0, no cache flush necessary
+ * if flush_action == 1, flush the source
+ * if flush_action == 2, flush the dest
+ * if flush_action == 3, flush both source and dest
  */
 
-               if( !(which & cppvNoModSnk)) {
-                       physent = mapping_phys_lookup(sink >> 12, &pindex);     /* Get physical entry for sink */
-                       if(physent) mapping_set_mod((ppnum_t)(sink >> 12));             /* Make sure we know that it is modified */
-               }
-               if( !(which & cppvNoRefSrc)) {
-                       physent = mapping_phys_lookup(source >> 12, &pindex);   /* Get physical entry for source */
-                       if(physent) mapping_set_ref((ppnum_t)(source >> 12));           /* Make sure we know that it is modified */
-               }
-               size = size - csize;                                            /* Calculate what is left */
-               vaddr = vaddr + csize;                                          /* Move to next sink address */
-               source = source + csize;                                        /* Bump source to next physical address */
-               sink = sink + csize;                                            /* Bump sink to next physical address */
+kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, unsigned int flush_action) {
+
+        switch(flush_action) {
+       case 1:
+               flush_dcache(source, size, 1);
+               break;
+       case 2:
+               flush_dcache(dest, size, 1);
+               break;
+       case 3:
+               flush_dcache(source, size, 1);
+               flush_dcache(dest, size, 1);
+               break;
+
        }
-       
-       if(!bothphys) mapping_drop_busy(mp);                    /* Go ahead and release the mapping of the virtual page if any */
-       splx(s);                                                                                /* Open up for interrupts */
+        bcopy_phys((char *)source, (char *)dest, size);        /* Do a physical copy */
+
+        switch(flush_action) {
+       case 1:
+               flush_dcache(source, size, 1);
+               break;
+       case 2:
+               flush_dcache(dest, size, 1);
+               break;
+       case 3:
+               flush_dcache(source, size, 1);
+               flush_dcache(dest, size, 1);
+               break;
 
-       return KERN_SUCCESS;
+       }
 }
 
 
+
+#if DEBUG
 /*
- *     Debug code 
+ *             Dumps out the mapping stuff associated with a virtual address
  */
+void dumpaddr(space_t space, vm_offset_t va) {
 
-void mapping_verify(void) {
+       mapping         *mp, *mpv;
+       vm_offset_t     pa;
+       spl_t           s;
 
-       spl_t           s;
-       mappingblok     *mb, *mbn;
-       int                     relncnt;
-       unsigned int    dumbodude;
+       s=splhigh();                                                                                    /* Don't bother me */
 
-       dumbodude = 0;
-       
-       s = splhigh();                                                                                  /* Don't bother from now on */
+       mp = hw_lock_phys_vir(space, va);                                               /* Lock the physical entry for this mapping */
+       if(!mp) {                                                                                               /* Did we find one? */
+               splx(s);                                                                                        /* Restore the interrupt level */
+               printf("dumpaddr: virtual address (%08X) not mapped\n", va);    
+               return;                                                                                         /* Didn't find any, return FALSE... */
+       }
+       if((unsigned int)mp&1) {                                                                /* Did we timeout? */
+               panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va);     /* Yeah, scream about it! */
+               splx(s);                                                                                        /* Restore the interrupt level */
+               return;                                                                                         /* Bad hair day, return FALSE... */
+       }
+       printf("dumpaddr: space=%08X; vaddr=%08X\n", space, va);        /* Say what address were dumping */
+       mpv = hw_cpv(mp);                                                                               /* Get virtual address of mapping */
+       dumpmapping(mpv);
+       if(mpv->physent) {
+               dumppca(mpv);
+               hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);     /* Unlock physical entry associated with mapping */
+       }
+       splx(s);                                                                                                /* Was there something you needed? */
+       return;                                                                                                 /* Tell them we did it */
+}
 
-       mbn = 0;                                                                                                /* Start with none */
-       for(mb = mapCtl.mapcnext; mb; mb = mb->nextblok) {              /* Walk the free chain */
-               if((mb->mapblokflags & 0x7FFFFFFF) != mb) {                     /* Is tag ok? */
-                       panic("mapping_verify: flags tag bad, free chain; mb = %08X, tag = %08X\n", mb, mb->mapblokflags);
-               }
-               mbn = mb;                                                                                       /* Remember the last one */
+
+
+/*
+ *             Prints out a mapping control block
+ *
+ */
+void dumpmapping(struct mapping *mp) {                                                 /* Dump out a mapping */
+
+       printf("Dump of mapping block: %08X\n", mp);                    /* Header */
+       printf("                 next: %08X\n", mp->next);                 
+       printf("             hashnext: %08X\n", mp->hashnext);                 
+       printf("              PTEhash: %08X\n", mp->PTEhash);                 
+       printf("               PTEent: %08X\n", mp->PTEent);                 
+       printf("              physent: %08X\n", mp->physent);                 
+       printf("                 PTEv: %08X\n", mp->PTEv);                 
+       printf("                 PTEr: %08X\n", mp->PTEr);                 
+       printf("                 pmap: %08X\n", mp->pmap);
+       
+       if(mp->physent) {                                                                       /* Print physent if it exists */
+               printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1);
        }
-       
-       if(mapCtl.mapcnext && (mapCtl.mapclast != mbn)) {               /* Do we point to the last one? */
-               panic("mapping_verify: last pointer bad; mb = %08X, mapclast = %08X\n", mb, mapCtl.mapclast);
+       else {
+               printf("Associated physical entry: none\n");
        }
        
-       relncnt = 0;                                                                                    /* Clear count */
-       for(mb = mapCtl.mapcrel; mb; mb = mb->nextblok) {               /* Walk the release chain */
-               dumbodude |= mb->mapblokflags;                                          /* Just touch it to make sure it is mapped */
-               relncnt++;                                                                                      /* Count this one */
-       }
+       dumppca(mp);                                                                            /* Dump out the PCA information */
        
-       if(mapCtl.mapcreln != relncnt) {                                                        /* Is the count on release queue ok? */
-               panic("mapping_verify: bad release queue count; mapcreln = %d, cnt = %d, ignore this = %08X\n", mapCtl.mapcreln, relncnt, dumbodude);
-       }
+       return;
+}
 
-       splx(s);                                                                                                /* Restore 'rupts */
+/*
+ *             Prints out a PTEG control area
+ *
+ */
+void dumppca(struct mapping *mp) {                                             /* PCA */
+
+       PCA                             *pca;
+       unsigned int    *pteg;
+       
+       pca = (PCA *)((unsigned int)mp->PTEhash&-64);           /* Back up to the start of the PCA */
+       pteg=(unsigned int *)((unsigned int)pca-(((hash_table_base&0x0000FFFF)+1)<<16));
+       printf(" Dump of PCA: %08X\n", pca);            /* Header */
+       printf("     PCAlock: %08X\n", pca->PCAlock);                 
+       printf("     PCAallo: %08X\n", pca->flgs.PCAallo);                 
+       printf("     PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]);                 
+       printf("              %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]);                 
+       printf("Dump of PTEG: %08X\n", pteg);           /* Header */
+       printf("              %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]);                 
+       printf("              %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]);                 
+       printf("              %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]);                 
+       printf("              %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]);                 
+       return;
+}
 
+/*
+ *             Dumps starting with a physical entry
+ */
+void dumpphys(struct phys_entry *pp) {                                                 /* Dump from physent */
+
+       mapping                 *mp;
+       PCA                             *pca;
+       unsigned int    *pteg;
+
+       printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1);
+       mp = hw_cpv(pp->phys_link);
+       while(mp) {
+               dumpmapping(mp);
+               dumppca(mp);
+               mp = hw_cpv(mp->next);
+       }
+       
        return;
 }
 
-void mapping_phys_unused(ppnum_t pa) {
+#endif
 
-       unsigned int pindex;
-       phys_entry *physent;
 
-       physent = mapping_phys_lookup(pa, &pindex);                             /* Get physical entry */
-       if(!physent) return;                                                                    /* Did we find the physical page? */
+kern_return_t bmapvideo(vm_offset_t *info);
+kern_return_t bmapvideo(vm_offset_t *info) {
 
-       if(!(physent->ppLink & ~(ppLock | ppN | ppFlags))) return;      /* No one else is here */
-       
-       panic("mapping_phys_unused: physical page (%08X) in use, physent = %08X\n", pa, physent);
+       extern struct vc_info vinfo;
        
+       (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info));    /* Copy out the video info */
+       return KERN_SUCCESS;
 }
+
+kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
+kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
        
+       pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr, 0);    /* Map it in */
+       return KERN_SUCCESS;
+}
+
+kern_return_t bmapmapr(vm_offset_t va);
+kern_return_t bmapmapr(vm_offset_t va) {
        
-       
-       
-       
-       
-       
-       
-       
-       
+       mapping_remove(current_act()->task->map->pmap, va);     /* Remove map */
+       return KERN_SUCCESS;
+}