]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/pmap.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.c
index 14edd73266741574b4f76d7454fbefa8aba3b418..7224204c91069e46c7e0570465dcb6cbdc6a2140 100644 (file)
@@ -1,24 +1,21 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License").  You may not use this file except in compliance with the
+ * License.  Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
  * 
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- * 
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
@@ -85,8 +82,6 @@
  *     and to when physical maps must be made correct.
  */
 
-#include <cpus.h>
-
 #include <string.h>
 #include <norma_vm.h>
 #include <mach_kdb.h>
@@ -99,6 +94,7 @@
 #include <kern/zalloc.h>
 
 #include <kern/lock.h>
+#include <kern/kalloc.h>
 #include <kern/spl.h>
 
 #include <vm/pmap.h>
 #include <i386/misc_protos.h>
 
 #include <i386/cpuid.h>
+#include <i386/cpu_data.h>
+#include <i386/cpu_number.h>
+#include <i386/machine_cpu.h>
+#include <i386/mp_slave_boot.h>
+#include <i386/seg.h>
+#include <i386/cpu_capabilities.h>
 
 #if    MACH_KDB
 #include <ddb/db_command.h>
 
 #include <kern/xpr.h>
 
-#if NCPUS > 1
-#include <i386/AT386/mp/mp_events.h>
-#endif
+#include <vm/vm_protos.h>
+
+#include <i386/mp.h>
+#include <i386/mp_desc.h>
+
+#include <sys/kdebug.h>
+
+#ifdef IWANTTODEBUG
+#undef DEBUG
+#define DEBUG 1
+#define POSTCODE_DELAY 1
+#include <i386/postcode.h>
+#endif /* IWANTTODEBUG */
 
 /*
  * Forward declarations for internal functions.
  */
-void   pmap_expand(
+void           pmap_expand_pml4(
+                       pmap_t          map,
+                       vm_map_offset_t v);
+
+void           pmap_expand_pdpt(
                        pmap_t          map,
-                       vm_offset_t     v);
+                       vm_map_offset_t v);
 
-extern void    pmap_remove_range(
+void           pmap_expand(
+                       pmap_t          map,
+                       vm_map_offset_t v);
+
+static void    pmap_remove_range(
                        pmap_t          pmap,
-                       vm_offset_t     va,
+                       vm_map_offset_t va,
                        pt_entry_t      *spte,
                        pt_entry_t      *epte);
 
-void   phys_attribute_clear(
-                       vm_offset_t     phys,
+void           phys_attribute_clear(
+                       ppnum_t phys,
                        int             bits);
 
-boolean_t phys_attribute_test(
-                       vm_offset_t     phys,
+boolean_t      phys_attribute_test(
+                       ppnum_t phys,
                        int             bits);
 
-void pmap_set_modify(vm_offset_t       phys);
-
-void phys_attribute_set(
-                       vm_offset_t     phys,
+void           phys_attribute_set(
+                       ppnum_t phys,
                        int             bits);
 
+void           pmap_set_reference(
+                       ppnum_t pn);
+
+void           pmap_movepage(
+                       unsigned long   from,
+                       unsigned long   to,
+                       vm_size_t       size);
 
-#ifndef        set_dirbase
-void   set_dirbase(vm_offset_t dirbase);
-#endif /* set_dirbase */
+boolean_t      phys_page_exists(
+                       ppnum_t pn);
+
+#ifdef PMAP_DEBUG
+void dump_pmap(pmap_t);
+void dump_4GB_pdpt(pmap_t p);
+void dump_4GB_pdpt_thread(thread_t tp);
+#endif
 
-#define        PA_TO_PTE(pa)   (pa_to_pte((pa) - VM_MIN_KERNEL_ADDRESS))
 #define        iswired(pte)    ((pte) & INTEL_PTE_WIRED)
 
-pmap_t real_pmap[NCPUS];
+int nx_enabled = 1;                    /* enable no-execute protection */
+
+int cpu_64bit  = 0;
 
-#define        WRITE_PTE(pte_p, pte_entry)             *(pte_p) = (pte_entry);
-#define        WRITE_PTE_FAST(pte_p, pte_entry)        *(pte_p) = (pte_entry);
 
 /*
  *     Private data structures.
@@ -183,7 +212,7 @@ pmap_t      real_pmap[NCPUS];
 typedef struct pv_entry {
        struct pv_entry *next;          /* next pv_entry */
        pmap_t          pmap;           /* pmap where mapping lies */
-       vm_offset_t     va;             /* virtual address for mapping */
+       vm_map_offset_t va;             /* virtual address for mapping */
 } *pv_entry_t;
 
 #define PV_ENTRY_NULL  ((pv_entry_t) 0)
@@ -197,11 +226,21 @@ pv_entry_t        pv_head_table;          /* array of entries, one per page */
  */
 pv_entry_t     pv_free_list;           /* free list at SPLVM */
 decl_simple_lock_data(,pv_free_list_lock)
+int pv_free_count = 0;
+#define PV_LOW_WATER_MARK 5000
+#define PV_ALLOC_CHUNK 2000
+thread_call_t  mapping_adjust_call;
+static thread_call_data_t  mapping_adjust_call_data;
+int mappingrecurse = 0;
 
 #define        PV_ALLOC(pv_e) { \
        simple_lock(&pv_free_list_lock); \
        if ((pv_e = pv_free_list) != 0) { \
            pv_free_list = pv_e->next; \
+            pv_free_count--; \
+            if (pv_free_count < PV_LOW_WATER_MARK) \
+              if (hw_compare_and_store(0,1,(u_int *)&mappingrecurse)) \
+                thread_call_enter(mapping_adjust_call); \
        } \
        simple_unlock(&pv_free_list_lock); \
 }
@@ -210,11 +249,14 @@ decl_simple_lock_data(,pv_free_list_lock)
        simple_lock(&pv_free_list_lock); \
        pv_e->next = pv_free_list; \
        pv_free_list = pv_e; \
+        pv_free_count++; \
        simple_unlock(&pv_free_list_lock); \
 }
 
 zone_t         pv_list_zone;           /* zone of pv_entry structures */
 
+static zone_t pdpt_zone;
+
 /*
  *     Each entry in the pv_head_table is locked by a bit in the
  *     pv_lock_table.  The lock bits are accessed by the physical
@@ -229,16 +271,19 @@ char      *pv_lock_table;         /* pointer to array of bits */
  *     for.  Initialized to zero so that pmap operations done before
  *     pmap_init won't touch any non-existent structures.
  */
-vm_offset_t    vm_first_phys = (vm_offset_t) 0;
-vm_offset_t    vm_last_phys  = (vm_offset_t) 0;
+pmap_paddr_t   vm_first_phys = (pmap_paddr_t) 0;
+pmap_paddr_t   vm_last_phys  = (pmap_paddr_t) 0;
 boolean_t      pmap_initialized = FALSE;/* Has pmap_init completed? */
 
+static struct vm_object kptobj_object_store;
+static vm_object_t kptobj;
+
 /*
  *     Index into pv_head table, its lock bits, and the modify/reference
  *     bits starting at vm_first_phys.
  */
 
-#define pa_index(pa)   (atop(pa - vm_first_phys))
+#define pa_index(pa)   (i386_btop(pa - vm_first_phys))
 
 #define pai_to_pvh(pai)                (&pv_head_table[pai])
 #define lock_pvh_pai(pai)      bit_lock(pai, (void *)pv_lock_table)
@@ -255,18 +300,14 @@ char      *pmap_phys_attributes;
  */
 #define        PHYS_MODIFIED   INTEL_PTE_MOD   /* page modified */
 #define        PHYS_REFERENCED INTEL_PTE_REF   /* page referenced */
+#define PHYS_NCACHE    INTEL_PTE_NCACHE
 
 /*
  *     Amount of virtual memory mapped by one
  *     page-directory entry.
  */
 #define        PDE_MAPPED_SIZE         (pdetova(1))
-
-/*
- *     We allocate page table pages directly from the VM system
- *     through this object.  It maps physical memory.
- */
-vm_object_t    pmap_object = VM_OBJECT_NULL;
+uint64_t pde_mapped_size;
 
 /*
  *     Locking and TLB invalidation
@@ -305,26 +346,23 @@ vm_object_t       pmap_object = VM_OBJECT_NULL;
  *     kernel_pmap can only be held at splhigh.
  */
 
-#if    NCPUS > 1
 /*
- *     We raise the interrupt level to splhigh, to block interprocessor
- *     interrupts during pmap operations.  We must take the CPU out of
- *     the cpus_active set while interrupts are blocked.
+ *     We raise the interrupt level to splvm, to block interprocessor
+ *     interrupts during pmap operations.  We mark the cpu's cr3 inactive
+ *     while interrupts are blocked.
  */
-#define SPLVM(spl)     { \
-       spl = splhigh(); \
-       mp_disable_preemption(); \
-       i_bit_clear(cpu_number(), &cpus_active); \
-       mp_enable_preemption(); \
+#define SPLVM(spl)     {                                               \
+       spl = splhigh();                                                \
+       CPU_CR3_MARK_INACTIVE();                                        \
 }
 
-#define SPLX(spl)      { \
-       mp_disable_preemption(); \
-       i_bit_set(cpu_number(), &cpus_active); \
-       mp_enable_preemption(); \
-       splx(spl); \
+#define SPLX(spl)      {                                               \
+       if (current_cpu_datap()->cpu_tlb_invalid)                       \
+           process_pmap_updates();                                     \
+       CPU_CR3_MARK_ACTIVE();                                          \
+       splx(spl);                                                      \
 }
-
+           
 /*
  *     Lock on pmap system
  */
@@ -361,89 +399,59 @@ lock_t    pmap_system_lock;
 
 #define UNLOCK_PVH(index)      unlock_pvh_pai(index)
 
-#define PMAP_FLUSH_TLBS()                                              \
-{                                                                      \
-       flush_tlb();                                                    \
-       i386_signal_cpus(MP_TLB_FLUSH);                                 \
-}
-
-#define        PMAP_RELOAD_TLBS()      {               \
-       i386_signal_cpus(MP_TLB_RELOAD);        \
-       set_cr3(kernel_pmap->pdirbase);         \
-}
-
-#define PMAP_INVALIDATE_PAGE(map, addr) {      \
-       if (map == kernel_pmap)                 \
-               invlpg((vm_offset_t) addr);     \
-       else                                    \
-               flush_tlb();                    \
-       i386_signal_cpus(MP_TLB_FLUSH);         \
-}
-
-#else  /* NCPUS > 1 */
-
-#if    MACH_RT
-#define SPLVM(spl)                     { (spl) = splhigh(); }
-#define SPLX(spl)                      splx (spl)
-#else  /* MACH_RT */
-#define SPLVM(spl)
-#define SPLX(spl)
-#endif /* MACH_RT */
+#if    USLOCK_DEBUG
+extern int     max_lock_loops;
+extern int     disableSerialOuput;
+#define LOOP_VAR                                                       \
+       unsigned int    loop_count;                                     \
+       loop_count = disableSerialOuput ? max_lock_loops                \
+                                       : max_lock_loops*100
+#define LOOP_CHECK(msg, pmap)                                          \
+       if (--loop_count == 0) {                                        \
+               mp_disable_preemption();                                \
+               kprintf("%s: cpu %d pmap %x\n",                         \
+                         msg, cpu_number(), pmap);                     \
+               Debugger("deadlock detection");                         \
+               mp_enable_preemption();                                 \
+               loop_count = max_lock_loops;                            \
+       }
+#else  /* USLOCK_DEBUG */
+#define LOOP_VAR
+#define LOOP_CHECK(msg, pmap)
+#endif /* USLOCK_DEBUG */
 
-#define PMAP_READ_LOCK(pmap, spl)      SPLVM(spl)
-#define PMAP_WRITE_LOCK(spl)           SPLVM(spl)
-#define PMAP_READ_UNLOCK(pmap, spl)    SPLX(spl)
-#define PMAP_WRITE_UNLOCK(spl)         SPLX(spl)
-#define PMAP_WRITE_TO_READ_LOCK(pmap)
 
-#if    MACH_RT
-#define LOCK_PVH(index)                        disable_preemption()
-#define UNLOCK_PVH(index)              enable_preemption()
-#else  /* MACH_RT */
-#define LOCK_PVH(index)
-#define UNLOCK_PVH(index)
-#endif /* MACH_RT */
+static void pmap_flush_tlbs(pmap_t pmap);
 
-#define        PMAP_FLUSH_TLBS()       flush_tlb()
-#define        PMAP_RELOAD_TLBS()      set_cr3(kernel_pmap->pdirbase)
-#define        PMAP_INVALIDATE_PAGE(map, addr) {       \
-               if (map == kernel_pmap)         \
-                       invlpg((vm_offset_t) addr);     \
-               else                            \
-                       flush_tlb();            \
-}
+#define PMAP_UPDATE_TLBS(pmap, s, e)                                   \
+       pmap_flush_tlbs(pmap)
 
-#endif /* NCPUS > 1 */
 
 #define MAX_TBIS_SIZE  32              /* > this -> TBIA */ /* XXX */
 
-#if    NCPUS > 1
-/*
- *     Structures to keep track of pending TLB invalidations
- */
-cpu_set                        cpus_active;
-cpu_set                        cpus_idle;
-volatile boolean_t     cpu_update_needed[NCPUS];
-
 
-#endif /* NCPUS > 1 */
+pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
 
 /*
  *     Other useful macros.
  */
-#define current_pmap()         (vm_map_pmap(current_act()->map))
-#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
+#define current_pmap()         (vm_map_pmap(current_thread()->map))
 
 struct pmap    kernel_pmap_store;
 pmap_t         kernel_pmap;
 
+pd_entry_t    high_shared_pde;
+pd_entry_t    commpage64_pde;
+
 struct zone    *pmap_zone;             /* zone of pmap structures */
 
 int            pmap_debug = 0;         /* flag for debugging prints */
-int            ptes_per_vm_page;       /* number of hardware ptes needed
-                                          to map one VM page. */
+
 unsigned int   inuse_ptepages_count = 0;       /* debugging */
 
+addr64_t       kernel64_cr3;
+boolean_t      no_shared_cr3 = FALSE;  /* -no_shared_cr3 boot arg */
+
 /*
  *     Pmap cache.  Cache is threaded through ref_count field of pmap.
  *     Max will eventually be constant -- variable for experimentation.
@@ -454,21 +462,22 @@ pmap_t            pmap_cache_list;
 int            pmap_cache_count;
 decl_simple_lock_data(,pmap_cache_lock)
 
-extern vm_offset_t     hole_start, hole_end;
-
 extern char end;
 
-/*
- * Page directory for kernel.
- */
-pt_entry_t     *kpde = 0;      /* set by start.s - keep out of bss */
+static int nkpt;
+extern uint32_t lowGlo;
+extern void *version;
+
+pt_entry_t     *DMAP1, *DMAP2;
+caddr_t         DADDR1;
+caddr_t         DADDR2;
 
 #if  DEBUG_ALIAS
 #define PMAP_ALIAS_MAX 32
 struct pmap_alias {
         vm_offset_t rpc;
         pmap_t pmap;
-        vm_offset_t va;
+        vm_map_offset_t va;
         int cookie;
 #define PMAP_ALIAS_COOKIE 0xdeadbeef
 } pmap_aliasbuf[PMAP_ALIAS_MAX];
@@ -478,64 +487,182 @@ extern vm_offset_t get_rpc();
 #endif  /* DEBUG_ALIAS */
 
 /*
- *     Given an offset and a map, compute the address of the
- *     pte.  If the address is invalid with respect to the map
- *     then PT_ENTRY_NULL is returned (and the map may need to grow).
- *
- *     This is only used in machine-dependent code.
+ * for legacy, returns the address of the pde entry.
+ * for 64 bit, causes the pdpt page containing the pde entry to be mapped,
+ * then returns the mapped address of the pde entry in that page
  */
-
-pt_entry_t *
-pmap_pte(
-       register pmap_t         pmap,
-       register vm_offset_t    addr)
+pd_entry_t *
+pmap_pde(pmap_t m, vm_map_offset_t v)
 {
-       register pt_entry_t     *ptp;
-       register pt_entry_t     pte;
+  pd_entry_t *pde;
+       if (!cpu_64bit || (m == kernel_pmap)) {
+         pde = (&((m)->dirbase[(vm_offset_t)(v) >> PDESHIFT]));
+       } else {
+         assert(m);
+         assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+         pde = pmap64_pde(m, v);
+       }
+       return pde;
+}
 
-       pte = pmap->dirbase[pdenum(pmap, addr)];
-       if ((pte & INTEL_PTE_VALID) == 0)
-               return(PT_ENTRY_NULL);
-       ptp = (pt_entry_t *)ptetokv(pte);
-       return(&ptp[ptenum(addr)]);
 
+/*
+ * the single pml4 page per pmap is allocated at pmap create time and exists
+ * for the duration of the pmap. we allocate this page in kernel vm (to save us one
+ * level of page table dynamic mapping.
+ * this returns the address of the requested pml4 entry in the top level page.
+ */
+static inline
+pml4_entry_t *
+pmap64_pml4(pmap_t pmap, vm_map_offset_t vaddr)
+{
+  return ((pml4_entry_t *)pmap->pm_hold + ((vm_offset_t)((vaddr>>PML4SHIFT)&(NPML4PG-1))));
 }
 
-#define        pmap_pde(pmap, addr) (&(pmap)->dirbase[pdenum(pmap, addr)])
+/*
+ * maps in the pml4 page, if any, containing the pdpt entry requested
+ * and returns the address of the pdpt entry in that mapped page
+ */
+pdpt_entry_t *
+pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr)
+{
+  pml4_entry_t newpf;
+  pml4_entry_t *pml4;
+  int i;
 
-#define DEBUG_PTE_PAGE 0
+  assert(pmap);
+  assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+  if ((vaddr > 0x00007FFFFFFFFFFFULL) && (vaddr < 0xFFFF800000000000ULL)) {
+    return(0);
+  }
 
-#if    DEBUG_PTE_PAGE
-void
-ptep_check(
-       ptep_t  ptep)
+  pml4 = pmap64_pml4(pmap, vaddr);
+
+       if (pml4 && ((*pml4 & INTEL_PTE_VALID))) {
+
+               newpf = *pml4 & PG_FRAME;
+
+
+               for (i=PMAP_PDPT_FIRST_WINDOW; i < PMAP_PDPT_FIRST_WINDOW+PMAP_PDPT_NWINDOWS; i++) {
+                 if (((*(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP)) & PG_FRAME) == newpf) {
+                 return((pdpt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR) + 
+                        ((vm_offset_t)((vaddr>>PDPTSHIFT)&(NPDPTPG-1))));
+                 }
+               }
+
+                 current_cpu_datap()->cpu_pmap->pdpt_window_index++;
+                 if (current_cpu_datap()->cpu_pmap->pdpt_window_index > (PMAP_PDPT_FIRST_WINDOW+PMAP_PDPT_NWINDOWS-1))
+                   current_cpu_datap()->cpu_pmap->pdpt_window_index = PMAP_PDPT_FIRST_WINDOW;
+                 pmap_store_pte(
+                                (current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pdpt_window_index].prv_CMAP),
+                                newpf | INTEL_PTE_RW | INTEL_PTE_VALID);
+                 invlpg((u_int)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pdpt_window_index].prv_CADDR));
+                 return ((pdpt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pdpt_window_index].prv_CADDR) +
+                         ((vm_offset_t)((vaddr>>PDPTSHIFT)&(NPDPTPG-1))));
+       }
+
+       return (0);
+}
+
+/*
+ * maps in the pdpt page, if any, containing the pde entry requested
+ * and returns the address of the pde entry in that mapped page
+ */
+pd_entry_t *
+pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr)
 {
-       register pt_entry_t     *pte, *epte;
-       int                     ctu, ctw;
+  pdpt_entry_t newpf;
+  pdpt_entry_t *pdpt;
+  int i;
 
-       /* check the use and wired counts */
-       if (ptep == PTE_PAGE_NULL)
-               return;
-       pte = pmap_pte(ptep->pmap, ptep->va);
-       epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t);
-       ctu = 0;
-       ctw = 0;
-       while (pte < epte) {
-               if (pte->pfn != 0) {
-                       ctu++;
-                       if (pte->wired)
-                               ctw++;
+  assert(pmap);
+  assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+  if ((vaddr > 0x00007FFFFFFFFFFFULL) && (vaddr < 0xFFFF800000000000ULL)) {
+    return(0);
+  }
+
+  /*  if (vaddr & (1ULL << 63)) panic("neg addr");*/
+  pdpt = pmap64_pdpt(pmap, vaddr);
+
+         if (pdpt && ((*pdpt & INTEL_PTE_VALID))) {
+
+               newpf = *pdpt & PG_FRAME;
+
+               for (i=PMAP_PDE_FIRST_WINDOW; i < PMAP_PDE_FIRST_WINDOW+PMAP_PDE_NWINDOWS; i++) {
+                 if (((*(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP)) & PG_FRAME) == newpf) {
+                 return((pd_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR) + 
+                        ((vm_offset_t)((vaddr>>PDSHIFT)&(NPDPG-1))));
+                 }
                }
-               pte += ptes_per_vm_page;
+
+                 current_cpu_datap()->cpu_pmap->pde_window_index++;
+                 if (current_cpu_datap()->cpu_pmap->pde_window_index > (PMAP_PDE_FIRST_WINDOW+PMAP_PDE_NWINDOWS-1))
+                   current_cpu_datap()->cpu_pmap->pde_window_index = PMAP_PDE_FIRST_WINDOW;
+                 pmap_store_pte(
+                                (current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pde_window_index].prv_CMAP),
+                                newpf | INTEL_PTE_RW | INTEL_PTE_VALID);
+                 invlpg((u_int)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pde_window_index].prv_CADDR));
+                 return ((pd_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pde_window_index].prv_CADDR) +
+                         ((vm_offset_t)((vaddr>>PDSHIFT)&(NPDPG-1))));
        }
 
-       if (ctu != ptep->use_count || ctw != ptep->wired_count) {
-               printf("use %d wired %d - actual use %d wired %d\n",
-                       ptep->use_count, ptep->wired_count, ctu, ctw);
-               panic("pte count");
+       return (0);
+}
+
+
+
+/*
+ * return address of mapped pte for vaddr va in pmap pmap.
+ * must be called with pre-emption or interrupts disabled
+ * if targeted pmap is not the kernel pmap
+ * since we may be passing back a virtual address that is
+ * associated with this cpu... pre-emption or interrupts
+ * must remain disabled until the caller is done using
+ * the pointer that was passed back .
+ *
+ * maps the pde page, if any, containing the pte in and returns
+ * the address of the pte in that mapped page
+ */
+pt_entry_t     *
+pmap_pte(pmap_t pmap, vm_map_offset_t vaddr)
+{
+        pd_entry_t     *pde;
+       pd_entry_t     newpf;
+       int i;
+
+       assert(pmap);
+       pde = pmap_pde(pmap,vaddr);
+
+       if (pde && ((*pde & INTEL_PTE_VALID))) {
+         if (pmap == kernel_pmap) {
+           return (vtopte(vaddr)); /* compat kernel still has pte's mapped */
+         }
+
+               assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+               newpf = *pde & PG_FRAME;
+
+               for (i=PMAP_PTE_FIRST_WINDOW; i < PMAP_PTE_FIRST_WINDOW+PMAP_PTE_NWINDOWS; i++) {
+                 if (((*(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP)) & PG_FRAME) == newpf) {
+                 return((pt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR) + 
+                        ((vm_offset_t)i386_btop(vaddr) & (NPTEPG-1)));
+                 }
+               }
+
+                 current_cpu_datap()->cpu_pmap->pte_window_index++;
+                 if (current_cpu_datap()->cpu_pmap->pte_window_index > (PMAP_PTE_FIRST_WINDOW+PMAP_PTE_NWINDOWS-1))
+                   current_cpu_datap()->cpu_pmap->pte_window_index = PMAP_PTE_FIRST_WINDOW;
+                 pmap_store_pte(
+                                (current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pte_window_index].prv_CMAP),
+                                newpf | INTEL_PTE_RW | INTEL_PTE_VALID);
+                 invlpg((u_int)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pte_window_index].prv_CADDR));
+                 return ((pt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pte_window_index].prv_CADDR) +
+                         ((vm_offset_t)i386_btop(vaddr) & (NPTEPG-1)));
        }
+
+       return(0);
 }
-#endif /* DEBUG_PTE_PAGE */
+
 
 /*
  *     Map memory at initialization.  The physical addresses being
@@ -546,18 +673,20 @@ ptep_check(
  */
 vm_offset_t
 pmap_map(
-       register vm_offset_t    virt,
-       register vm_offset_t    start,
-       register vm_offset_t    end,
-       register vm_prot_t      prot)
+       vm_offset_t     virt,
+       vm_map_offset_t start_addr,
+       vm_map_offset_t end_addr,
+       vm_prot_t       prot,
+       unsigned int    flags)
 {
-       register int            ps;
+       int             ps;
 
        ps = PAGE_SIZE;
-       while (start < end) {
-               pmap_enter(kernel_pmap, virt, start, prot, 0, FALSE);
+       while (start_addr < end_addr) {
+               pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
+                          (ppnum_t) i386_btop(start_addr), prot, flags, FALSE);
                virt += ps;
-               start += ps;
+               start_addr += ps;
        }
        return(virt);
 }
@@ -571,42 +700,187 @@ pmap_map(
  */
 vm_offset_t
 pmap_map_bd(
-       register vm_offset_t    virt,
-       register vm_offset_t    start,
-       register vm_offset_t    end,
-       vm_prot_t               prot)
+       vm_offset_t     virt,
+       vm_map_offset_t start_addr,
+       vm_map_offset_t end_addr,
+       vm_prot_t       prot,
+       unsigned int    flags)
 {
-       register pt_entry_t     template;
-       register pt_entry_t     *pte;
+       pt_entry_t      template;
+       pt_entry_t      *pte;
 
-       template = pa_to_pte(start)
-               | INTEL_PTE_NCACHE
+       template = pa_to_pte(start_addr)
                | INTEL_PTE_REF
                | INTEL_PTE_MOD
                | INTEL_PTE_WIRED
                | INTEL_PTE_VALID;
+
+       if(flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)) {
+           template |= INTEL_PTE_NCACHE;
+           if(!(flags & (VM_MEM_GUARDED | VM_WIMG_USE_DEFAULT)))
+                   template |= INTEL_PTE_PTA;
+       }
+
        if (prot & VM_PROT_WRITE)
            template |= INTEL_PTE_WRITE;
 
-       while (start < end) {
-               pte = pmap_pte(kernel_pmap, virt);
-               if (pte == PT_ENTRY_NULL)
+       while (start_addr < end_addr) {
+               pte = pmap_pte(kernel_pmap, (vm_map_offset_t)virt);
+               if (pte == PT_ENTRY_NULL) {
                        panic("pmap_map_bd: Invalid kernel address\n");
-               WRITE_PTE_FAST(pte, template)
+               }
+               pmap_store_pte(pte, template);
                pte_increment_pa(template);
                virt += PAGE_SIZE;
-               start += PAGE_SIZE;
+               start_addr += PAGE_SIZE;
        }
 
-       PMAP_FLUSH_TLBS();
-
+       flush_tlb();
        return(virt);
 }
 
-extern int             cnvmem;
 extern char            *first_avail;
 extern vm_offset_t     virtual_avail, virtual_end;
-extern vm_offset_t     avail_start, avail_end, avail_next;
+extern pmap_paddr_t    avail_start, avail_end;
+extern  vm_offset_t     etext;
+extern  void            *sectHIBB;
+extern  int             sectSizeHIB;
+
+
+vm_offset_t
+pmap_high_shared_remap(enum high_fixed_addresses e, vm_offset_t va, int sz)
+{
+  vm_offset_t ve = pmap_index_to_virt(e);
+  pt_entry_t *ptep;
+  pmap_paddr_t pa;
+  int i;
+
+  assert(0 == (va & PAGE_MASK));  /* expecting page aligned */
+  ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)ve);
+
+  for (i=0; i< sz; i++) {
+    pa = (pmap_paddr_t) kvtophys(va);
+    pmap_store_pte(ptep, (pa & PG_FRAME)
+                               | INTEL_PTE_VALID
+                               | INTEL_PTE_GLOBAL
+                               | INTEL_PTE_RW
+                               | INTEL_PTE_REF
+                               | INTEL_PTE_MOD);
+    va+= PAGE_SIZE;
+    ptep++;
+  }
+  return ve;
+}
+
+vm_offset_t
+pmap_cpu_high_shared_remap(int cpu, enum high_cpu_types e, vm_offset_t va, int sz)
+{ 
+  enum high_fixed_addresses    a = e + HIGH_CPU_END * cpu;
+  return pmap_high_shared_remap(HIGH_FIXED_CPUS_BEGIN + a, va, sz);
+}
+
+void pmap_init_high_shared(void);
+
+extern vm_offset_t gdtptr, idtptr;
+
+extern uint32_t low_intstack;
+
+extern struct fake_descriptor ldt_desc_pattern;
+extern struct fake_descriptor tss_desc_pattern;
+
+extern char hi_remap_text, hi_remap_etext;
+extern char t_zero_div;
+
+pt_entry_t *pte_unique_base;
+
+void
+pmap_init_high_shared(void)
+{
+
+       vm_offset_t haddr;
+        struct __gdt_desc_struct gdt_desc = {0,0,0};
+       struct __idt_desc_struct idt_desc = {0,0,0};
+#if MACH_KDB
+       struct i386_tss *ttss;
+#endif
+
+       kprintf("HIGH_MEM_BASE 0x%x fixed per-cpu begin 0x%x\n", 
+               HIGH_MEM_BASE,pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN));
+       pte_unique_base = pmap_pte(kernel_pmap, (vm_map_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN));
+
+       if (i386_btop(&hi_remap_etext - &hi_remap_text + 1) >
+                               HIGH_FIXED_TRAMPS_END - HIGH_FIXED_TRAMPS + 1)
+               panic("tramps too large");
+       haddr = pmap_high_shared_remap(HIGH_FIXED_TRAMPS,
+                                       (vm_offset_t) &hi_remap_text, 3);
+       kprintf("tramp: 0x%x, ",haddr);
+       printf("hi mem tramps at 0x%x\n",haddr);
+       /* map gdt up high and update ptr for reload */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_GDT,
+                                       (vm_offset_t) master_gdt, 1);
+       __asm__ __volatile__("sgdt %0": "=m" (gdt_desc): :"memory");
+       gdt_desc.address = haddr;
+       kprintf("GDT: 0x%x, ",haddr);
+       /* map ldt up high */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_LDT_BEGIN,
+                                       (vm_offset_t) master_ldt,
+                                       HIGH_FIXED_LDT_END - HIGH_FIXED_LDT_BEGIN + 1);
+       kprintf("LDT: 0x%x, ",haddr);
+       /* put new ldt addr into gdt */
+       master_gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
+       master_gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(KERNEL_LDT)], 1);
+       master_gdt[sel_idx(USER_LDT)] = ldt_desc_pattern;
+       master_gdt[sel_idx(USER_LDT)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(USER_LDT)], 1);
+
+       /* map idt up high */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_IDT,
+                                       (vm_offset_t) master_idt, 1);
+       __asm__ __volatile__("sidt %0" : "=m" (idt_desc));
+       idt_desc.address = haddr;
+       kprintf("IDT: 0x%x, ", haddr);
+       /* remap ktss up high and put new high addr into gdt */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_KTSS,
+                                       (vm_offset_t) &master_ktss, 1);
+       master_gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
+       master_gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(KERNEL_TSS)], 1);
+       kprintf("KTSS: 0x%x, ",haddr);
+#if MACH_KDB
+       /* remap dbtss up high and put new high addr into gdt */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_DBTSS,
+                                       (vm_offset_t) &master_dbtss, 1);
+       master_gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
+       master_gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(DEBUG_TSS)], 1);
+       ttss = (struct i386_tss *)haddr;
+       kprintf("DBTSS: 0x%x, ",haddr);
+#endif /* MACH_KDB */
+
+       /* remap dftss up high and put new high addr into gdt */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_DFTSS,
+                                       (vm_offset_t) &master_dftss, 1);
+       master_gdt[sel_idx(DF_TSS)] = tss_desc_pattern;
+       master_gdt[sel_idx(DF_TSS)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(DF_TSS)], 1);
+       kprintf("DFTSS: 0x%x\n",haddr);
+
+       /* remap mctss up high and put new high addr into gdt */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_DFTSS,
+                                       (vm_offset_t) &master_mctss, 1);
+       master_gdt[sel_idx(MC_TSS)] = tss_desc_pattern;
+       master_gdt[sel_idx(MC_TSS)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(MC_TSS)], 1);
+       kprintf("MCTSS: 0x%x\n",haddr);
+
+       __asm__ __volatile__("lgdt %0": "=m" (gdt_desc));
+       __asm__ __volatile__("lidt %0": "=m" (idt_desc));
+       kprintf("gdt/idt reloaded, ");
+       set_tr(KERNEL_TSS);
+       kprintf("tr reset to KERNEL_TSS\n");
+}
+
 
 /*
  *     Bootstrap the system enough to run with virtual memory.
@@ -629,171 +903,197 @@ extern  vm_offset_t     avail_start, avail_end, avail_next;
 
 void
 pmap_bootstrap(
-       vm_offset_t     load_start)
+       __unused vm_offset_t    load_start,
+       boolean_t               IA32e)
 {
-       vm_offset_t     va, tva, paddr;
-       pt_entry_t      template;
-       pt_entry_t      *pde, *pte, *ptend;
-       vm_size_t       morevm;         /* VM space for kernel map */
-
-       /*
-        *      Set ptes_per_vm_page for general use.
-        */
-       ptes_per_vm_page = PAGE_SIZE / INTEL_PGBYTES;
-
+       vm_offset_t     va;
+       pt_entry_t      *pte;
+       int i;
+       int wpkernel, boot_arg;
+       pdpt_entry_t *pdpt;
+
+       vm_last_addr = VM_MAX_KERNEL_ADDRESS;   /* Set the highest address
+                                                * known to VM */
        /*
         *      The kernel's pmap is statically allocated so we don't
         *      have to use pmap_create, which is unlikely to work
         *      correctly at this part of the boot sequence.
         */
 
+
        kernel_pmap = &kernel_pmap_store;
+       kernel_pmap->ref_count = 1;
+       kernel_pmap->nx_enabled = FALSE;
+       kernel_pmap->pm_64bit = 0;
+       kernel_pmap->pm_obj = (vm_object_t) NULL;
+       kernel_pmap->dirbase = (pd_entry_t *)((unsigned int)IdlePTD | KERNBASE);
+       kernel_pmap->pdirbase = (pmap_paddr_t)((int)IdlePTD);
+       pdpt = (pd_entry_t *)((unsigned int)IdlePDPT | KERNBASE );
+       kernel_pmap->pm_pdpt = pdpt;
+       kernel_pmap->pm_cr3 = (pmap_paddr_t)((int)IdlePDPT);
+
+       va = (vm_offset_t)kernel_pmap->dirbase;
+       /* setup self referential mapping(s) */
+       for (i = 0; i< NPGPTD; i++, pdpt++) {
+         pmap_paddr_t pa;
+         pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i));
+         pmap_store_pte(
+           (pd_entry_t *) (kernel_pmap->dirbase + PTDPTDI + i),
+           (pa & PG_FRAME) | INTEL_PTE_VALID | INTEL_PTE_RW | INTEL_PTE_REF |
+             INTEL_PTE_MOD | INTEL_PTE_WIRED) ;
+         pmap_store_pte(pdpt, pa | INTEL_PTE_VALID);
+       }
 
-#if    NCPUS > 1
-       lock_init(&pmap_system_lock,
-                 FALSE,                /* NOT a sleep lock */
-                 ETAP_VM_PMAP_SYS,
-                 ETAP_VM_PMAP_SYS_I);
-#endif /* NCPUS > 1 */
+       cpu_64bit = IA32e;
+       
+       lo_kernel_cr3 = kernel_pmap->pm_cr3;
+       current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
 
-       simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
-       simple_lock_init(&pv_free_list_lock, ETAP_VM_PMAP_FREE);
+       /* save the value we stuff into created pmaps to share the gdts etc */
+       high_shared_pde = *pmap_pde(kernel_pmap, HIGH_MEM_BASE);
+       /* make sure G bit is on for high shared pde entry */
+       high_shared_pde |= INTEL_PTE_GLOBAL;
+       pmap_store_pte(pmap_pde(kernel_pmap, HIGH_MEM_BASE), high_shared_pde);
 
-       kernel_pmap->ref_count = 1;
+       nkpt = NKPT;
+       inuse_ptepages_count += NKPT;
 
-       /*
-        *      The kernel page directory has been allocated;
-        *      its virtual address is in kpde.
-        *
-        *      Enough kernel page table pages have been allocated
-        *      to map low system memory, kernel text, kernel data/bss,
-        *      kdb's symbols, and the page directory and page tables.
-        *
-        *      No other physical memory has been allocated.
-        */
+       virtual_avail = (vm_offset_t)VADDR(KPTDI,0) + (vm_offset_t)first_avail;
+       virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS);
 
        /*
-        * Start mapping virtual memory to physical memory, 1-1,
-        * at end of mapped memory.
+        * Reserve some special page table entries/VA space for temporary
+        * mapping of pages.
         */
+#define        SYSMAP(c, p, v, n)      \
+       v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n)
 
-       virtual_avail = phystokv(avail_start);
-       virtual_end = phystokv(avail_end);
-
-       pde = kpde;
-       pde += pdenum(kernel_pmap, virtual_avail);
-
-       if (pte_to_pa(*pde) == 0) {
-           /* This pte has not been allocated */
-           pte = 0; ptend = 0;
-       }
-       else {
-           pte = (pt_entry_t *)ptetokv(*pde);
-                                               /* first pte of page */
-           ptend = pte+NPTES;                  /* last pte of page */
-           pte += ptenum(virtual_avail);       /* point to pte that
-                                                  maps first avail VA */
-           pde++;      /* point pde to first empty slot */
-       }
-
-       template = pa_to_pte(avail_start)
-               | INTEL_PTE_VALID
-               | INTEL_PTE_WRITE;
-
-       for (va = virtual_avail; va < virtual_end; va += INTEL_PGBYTES) {
-           if (pte >= ptend) {
-               pte = (pt_entry_t *)phystokv(virtual_avail);
-               ptend = pte + NPTES;
-               virtual_avail = (vm_offset_t)ptend;
-               if (virtual_avail == hole_start)
-                 virtual_avail = hole_end;
-               *pde = PA_TO_PTE((vm_offset_t) pte)
-                       | INTEL_PTE_VALID
-                       | INTEL_PTE_WRITE;
-               pde++;
-           }
-           WRITE_PTE_FAST(pte, template)
-           pte++;
-           pte_increment_pa(template);
-       }
+       va = virtual_avail;
+       pte = vtopte(va);
 
-       avail_start = virtual_avail - VM_MIN_KERNEL_ADDRESS;
-       avail_next = avail_start;
+        for (i=0; i<PMAP_NWINDOWS; i++) {
+            SYSMAP(caddr_t,
+                  (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
+                   (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR),
+                  1);
+            *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0;
+        }
 
-       /*
-        *      Figure out maximum kernel address.
-        *      Kernel virtual space is:
-        *              - at least three times physical memory
-        *              - at least VM_MIN_KERNEL_ADDRESS
-        *              - limited by VM_MAX_KERNEL_ADDRESS
-        */
+       /* DMAP user for debugger */
+       SYSMAP(caddr_t, DMAP1, DADDR1, 1);
+       SYSMAP(caddr_t, DMAP2, DADDR2, 1);  /* XXX temporary - can remove */
 
-       morevm = 3*avail_end;
-       if (virtual_end + morevm > VM_MAX_KERNEL_ADDRESS)
-         morevm = VM_MAX_KERNEL_ADDRESS - virtual_end + 1;
 
-/*
- *     startup requires additional virtual memory (for tables, buffers, 
- *     etc.).  The kd driver may also require some of that memory to
- *     access the graphics board.
- *
- */
-       *(int *)&template = 0;
+       lock_init(&pmap_system_lock,
+                 FALSE,                /* NOT a sleep lock */
+                 0, 0);
 
-       /*
-        * Leave room for kernel-loaded servers, which have been linked at
-        * addresses from VM_MIN_KERNEL_LOADED_ADDRESS to
-        * VM_MAX_KERNEL_LOADED_ADDRESS.
+       virtual_avail = va;
+
+       wpkernel = 1;
+       if (PE_parse_boot_arg("wpkernel", &boot_arg)) {
+               if (boot_arg == 0)
+                       wpkernel = 0;
+       }
+
+       /* Remap kernel text readonly unless the "wpkernel" boot-arg is present
+        * and set to 0.
         */
-       if (virtual_end + morevm < VM_MAX_KERNEL_LOADED_ADDRESS + 1)
-               morevm = VM_MAX_KERNEL_LOADED_ADDRESS + 1 - virtual_end;
-
-
-       virtual_end += morevm;
-       for (tva = va; tva < virtual_end; tva += INTEL_PGBYTES) {
-           if (pte >= ptend) {
-               pmap_next_page(&paddr);
-               pte = (pt_entry_t *)phystokv(paddr);
-               ptend = pte + NPTES;
-               *pde = PA_TO_PTE((vm_offset_t) pte)
-                       | INTEL_PTE_VALID
-                       | INTEL_PTE_WRITE;
-               pde++;
-           }
-           WRITE_PTE_FAST(pte, template)
-           pte++;
+       if (wpkernel)
+       {
+               vm_offset_t     myva;
+               pt_entry_t     *ptep;
+
+               for (myva = i386_round_page(MP_BOOT + MP_BOOTSTACK); myva < etext; myva += PAGE_SIZE) {
+                        if (myva >= (vm_offset_t)sectHIBB && myva < ((vm_offset_t)sectHIBB + sectSizeHIB))
+                                continue;
+                       ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
+                       if (ptep)
+                               pmap_store_pte(ptep, *ptep & ~INTEL_PTE_RW);
+               }
        }
 
-       virtual_avail = va;
+       /* no matter what,  kernel page zero is not accessible */
+       pte = pmap_pte(kernel_pmap, 0);
+       pmap_store_pte(pte, INTEL_PTE_INVALID);
 
-       /* Push the virtual avail address above hole_end */
-       if (virtual_avail < hole_end)
-               virtual_avail = hole_end;
+       /* map lowmem global page into fixed addr 0x2000 */
+       if (0 == (pte = pmap_pte(kernel_pmap,0x2000))) panic("lowmem pte");
 
-       /*
-        *      c.f. comment above
-        *
-        */
-       virtual_end = va + morevm;
-       while (pte < ptend)
-           *pte++ = 0;
+       pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)|INTEL_PTE_VALID|INTEL_PTE_REF|INTEL_PTE_MOD|INTEL_PTE_WIRED|INTEL_PTE_RW);
+       flush_tlb();
 
-       /*
-        *      invalidate user virtual addresses 
-        */
-       memset((char *)kpde,
-              0,
-              pdenum(kernel_pmap,VM_MIN_KERNEL_ADDRESS)*sizeof(pt_entry_t));
-       kernel_pmap->dirbase = kpde;
-       printf("Kernel virtual space from 0x%x to 0x%x.\n",
-                       VM_MIN_KERNEL_ADDRESS, virtual_end);
-
-       avail_start = avail_next;
-       printf("Available physical space from 0x%x to 0x%x\n",
-                       avail_start, avail_end);
+       simple_lock_init(&kernel_pmap->lock, 0);
+       simple_lock_init(&pv_free_list_lock, 0);
+
+        pmap_init_high_shared();
+
+       pde_mapped_size = PDE_MAPPED_SIZE;
+
+       if (cpu_64bit) {
+         pdpt_entry_t *ppdpt   = (pdpt_entry_t *)IdlePDPT;
+         pdpt_entry_t *ppdpt64 = (pdpt_entry_t *)IdlePDPT64;
+         pdpt_entry_t *ppml4   = (pdpt_entry_t *)IdlePML4;
+         int istate = ml_set_interrupts_enabled(FALSE);
+
+         /*
+          * Clone a new 64-bit 3rd-level page table directory, IdlePML4,
+          * with page bits set for the correct IA-32e operation and so that
+          * the legacy-mode IdlePDPT is retained for slave processor start-up.
+          * This is necessary due to the incompatible use of page bits between
+          * 64-bit and legacy modes.
+          */
+         kernel_pmap->pm_cr3 = (pmap_paddr_t)((int)IdlePML4); /* setup in start.s for us */
+         kernel_pmap->pm_pml4 = IdlePML4;
+         kernel_pmap->pm_pdpt = (pd_entry_t *)
+                                       ((unsigned int)IdlePDPT64 | KERNBASE );
+#define PAGE_BITS INTEL_PTE_VALID|INTEL_PTE_RW|INTEL_PTE_USER|INTEL_PTE_REF
+         pmap_store_pte(kernel_pmap->pm_pml4,
+                        (uint32_t)IdlePDPT64 | PAGE_BITS);
+         pmap_store_pte((ppdpt64+0), *(ppdpt+0) | PAGE_BITS);
+         pmap_store_pte((ppdpt64+1), *(ppdpt+1) | PAGE_BITS);
+         pmap_store_pte((ppdpt64+2), *(ppdpt+2) | PAGE_BITS);
+         pmap_store_pte((ppdpt64+3), *(ppdpt+3) | PAGE_BITS);
+
+         /*
+          * The kernel is also mapped in the uber-sapce at the 4GB starting
+          * 0xFFFFFF80:00000000. This is the highest entry in the 4th-level.
+          */
+         pmap_store_pte((ppml4+KERNEL_UBER_PML4_INDEX), *(ppml4+0));
+
+         kernel64_cr3 = (addr64_t) kernel_pmap->pm_cr3;
+         cpu_IA32e_enable(current_cpu_datap());
+         current_cpu_datap()->cpu_is64bit = TRUE;
+         /* welcome to a 64 bit world */
+
+         /* Re-initialize and load descriptors */
+         cpu_desc_init64(&cpu_data_master, TRUE);
+         cpu_desc_load64(&cpu_data_master);
+         fast_syscall_init64();
+
+         pde_mapped_size = 512*4096 ; 
+
+         ml_set_interrupts_enabled(istate);
+
+       }
+       kernel_pmap->pm_hold = (vm_offset_t)kernel_pmap->pm_pml4;
+
+       kprintf("Kernel virtual space from 0x%x to 0x%x.\n",
+                       VADDR(KPTDI,0), virtual_end);
+       printf("PAE enabled\n");
+       if (cpu_64bit){
+         printf("64 bit mode enabled\n");kprintf("64 bit mode enabled\n"); }
 
-       kernel_pmap->pdirbase = kvtophys((vm_offset_t)kernel_pmap->dirbase);
+       kprintf("Available physical space from 0x%llx to 0x%llx\n",
+                       avail_start, avail_end);
 
+       /*
+        * By default for 64-bit users loaded at 4GB, share kernel mapping.
+        * But this may be overridden by the -no_shared_cr3 boot-arg.
+        */
+       if (PE_parse_boot_arg("-no_shared_cr3", &no_shared_cr3)) {
+               kprintf("Shared kernel address space disabled\n");
+       }
 }
 
 void
@@ -816,14 +1116,17 @@ pmap_init(void)
        register long           npages;
        vm_offset_t             addr;
        register vm_size_t      s;
-       int                     i;
+       vm_map_offset_t         vaddr;
+       ppnum_t                 ppn;
 
        /*
         *      Allocate memory for the pv_head_table and its lock bits,
         *      the modify bit array, and the pte_page table.
         */
 
-       npages = atop(avail_end - avail_start);
+       /* zero bias all these arrays now instead of off avail_start
+          so we cover all memory */
+       npages = i386_btop(avail_end);
        s = (vm_size_t) (sizeof(struct pv_entry) * npages
                                + pv_lock_table_size(npages)
                                + npages);
@@ -853,6 +1156,8 @@ pmap_init(void)
        pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
        s = (vm_size_t) sizeof(struct pv_entry);
        pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
+       s = 63;
+       pdpt_zone = zinit(s, 400*s, 4096, "pdpt"); /* XXX */
 
        /*
         *      Only now, when all of the data structures are allocated,
@@ -861,8 +1166,30 @@ pmap_init(void)
         *      data structures and blow up.
         */
 
-       vm_first_phys = avail_start;
+       /* zero bias this now so we cover all memory */
+       vm_first_phys = 0;
        vm_last_phys = avail_end;
+
+       kptobj = &kptobj_object_store;
+       _vm_object_allocate((vm_object_size_t)NKPDE, kptobj);
+       kernel_pmap->pm_obj = kptobj;
+
+       /* create pv entries for kernel pages mapped by low level
+          startup code.  these have to exist so we can pmap_remove()
+          e.g. kext pages from the middle of our addr space */
+
+       vaddr = (vm_map_offset_t)0;
+       for (ppn = 0; ppn < i386_btop(avail_start) ; ppn++ ) {
+         pv_entry_t    pv_e;
+
+         pv_e = pai_to_pvh(ppn);
+         pv_e->va = vaddr;
+         vaddr += PAGE_SIZE;
+         kernel_pmap->stats.resident_count++;
+         pv_e->pmap = kernel_pmap;
+         pv_e->next = PV_ENTRY_NULL;
+       }
+
        pmap_initialized = TRUE;
 
        /*
@@ -870,29 +1197,40 @@ pmap_init(void)
         */
        pmap_cache_list = PMAP_NULL;
        pmap_cache_count = 0;
-       simple_lock_init(&pmap_cache_lock, ETAP_VM_PMAP_CACHE);
+       simple_lock_init(&pmap_cache_lock, 0);
 }
 
+void
+x86_lowmem_free(void)
+{
+       /* free lowmem pages back to the vm system. we had to defer doing this
+          until the vm system was fully up.
+          the actual pages that are released are determined by which
+          pages the memory sizing code puts into the region table */
 
-#define        pmap_valid_page(x)      ((avail_start <= x) && (x < avail_end))
+       ml_static_mfree((vm_offset_t) i386_ptob(pmap_memory_regions[0].base),
+                       (vm_size_t) i386_ptob(pmap_memory_regions[0].end - pmap_memory_regions[0].base));
+}
 
 
 #define valid_page(x) (pmap_initialized && pmap_valid_page(x))
 
 boolean_t
 pmap_verify_free(
-       vm_offset_t     phys)
+                ppnum_t pn)
 {
+        pmap_paddr_t   phys;
        pv_entry_t      pv_h;
        int             pai;
        spl_t           spl;
        boolean_t       result;
 
-       assert(phys != vm_page_fictitious_addr);
+       assert(pn != vm_page_fictitious_addr);
+       phys = (pmap_paddr_t)i386_ptob(pn);
        if (!pmap_initialized)
                return(TRUE);
 
-       if (!pmap_valid_page(phys))
+       if (!pmap_valid_page(pn))
                return(FALSE);
 
        PMAP_WRITE_LOCK(spl);
@@ -920,10 +1258,20 @@ pmap_verify_free(
  */
 pmap_t
 pmap_create(
-       vm_size_t       size)
+           vm_map_size_t       sz,
+           boolean_t           is_64bit)
 {
-       register pmap_t                 p;
-       register pmap_statistics_t      stats;
+        register pmap_t                        p;
+       int             i;
+       vm_offset_t     va;
+       vm_size_t       size;
+       pdpt_entry_t    *pdpt;
+       pml4_entry_t    *pml4p;
+       int template;
+       pd_entry_t      *pdp;
+       spl_t s;
+
+       size = (vm_size_t) sz;
 
        /*
         *      A software use-only map doesn't even need a map.
@@ -933,86 +1281,169 @@ pmap_create(
                return(PMAP_NULL);
        }
 
-       /*
-        *      Try to get cached pmap, if this fails,
-        *      allocate a pmap struct from the pmap_zone.  Then allocate
-        *      the page descriptor table from the pd_zone.
-        */
+       p = (pmap_t) zalloc(pmap_zone);
+       if (PMAP_NULL == p)
+         panic("pmap_create zalloc");
+
+       /* init counts now since we'll be bumping some */
+       simple_lock_init(&p->lock, 0);
+       p->stats.resident_count = 0;
+       p->stats.wired_count = 0;
+       p->ref_count = 1;
+       p->nx_enabled = 1;
+       p->pm_64bit = is_64bit;
+       p->pm_kernel_cr3 = FALSE;
+       p->pm_shared = FALSE;
+
+       if (!cpu_64bit) {
+         /* legacy 32 bit setup */
+         /* in the legacy case the pdpt layer is hardwired to 4 entries and each
+          * entry covers 1GB of addr space */
+         if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->dirbase), NBPTD))
+           panic("pmap_create kmem_alloc_wired");
+         p->pm_hold = (vm_offset_t)zalloc(pdpt_zone);
+         if ((vm_offset_t)NULL == p->pm_hold) {
+           panic("pdpt zalloc");
+         }
+         pdpt = (pdpt_entry_t *) (( p->pm_hold + 31) & ~31);
+         p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)pdpt);
+         if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG))))
+           panic("pmap_create vm_object_allocate");
+
+         memset((char *)p->dirbase, 0, NBPTD);
+
+         va = (vm_offset_t)p->dirbase;
+         p->pdirbase = kvtophys(va);
+
+         template = cpu_64bit ? INTEL_PTE_VALID|INTEL_PTE_RW|INTEL_PTE_USER|INTEL_PTE_REF : INTEL_PTE_VALID;
+         for (i = 0; i< NPGPTD; i++, pdpt++) {
+           pmap_paddr_t pa;
+           pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i));
+           pmap_store_pte(pdpt, pa | template);
+         }
+
+         /* map the high shared pde */
+         pmap_store_pte(pmap_pde(p, HIGH_MEM_BASE), high_shared_pde);
+
+       } else {
 
-       simple_lock(&pmap_cache_lock);
-       while ((p = pmap_cache_list) == PMAP_NULL) {
+         /* 64 bit setup  */
 
-               vm_offset_t             dirbases;
-               register int            i;
+         /* alloc the pml4 page in kernel vm */
+         if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->pm_hold), PAGE_SIZE))
+           panic("pmap_create kmem_alloc_wired pml4");
 
-               simple_unlock(&pmap_cache_lock);
+         memset((char *)p->pm_hold, 0, PAGE_SIZE);
+         p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_hold);
+
+         inuse_ptepages_count++;
+         p->stats.resident_count++;
+         p->stats.wired_count++;
+
+       /* allocate the vm_objs to hold the pdpt, pde and pte pages */
+
+       if (NULL == (p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS))))
+         panic("pmap_create pdpt obj");
+
+       if (NULL == (p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS))))
+         panic("pmap_create pdpt obj");
+
+       if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS))))
+         panic("pmap_create pte obj");
+
+       /* uber space points to uber mapped kernel */
+       s = splhigh();
+       pml4p = pmap64_pml4(p, 0ULL);
+       pmap_store_pte((pml4p+KERNEL_UBER_PML4_INDEX),*kernel_pmap->pm_pml4);
+       if (!is_64bit) {
+         while ((pdp = pmap64_pde(p, (uint64_t)HIGH_MEM_BASE)) == PD_ENTRY_NULL) {
+           splx(s);
+           pmap_expand_pdpt(p, (uint64_t)HIGH_MEM_BASE); /* need room for another pde entry */
+           s = splhigh();
+         }
+         pmap_store_pte(pdp, high_shared_pde);
+       }
+
+       splx(s);
+       }
+
+       return(p);
+}
+
+void
+pmap_set_4GB_pagezero(pmap_t p)
+{
+       int             spl;
+       pdpt_entry_t    *user_pdptp;
+       pdpt_entry_t    *kern_pdptp;
+
+       assert(p->pm_64bit);
+
+       /* Kernel-shared cr3 may be disabled by boot arg. */
+       if (no_shared_cr3)
+               return;
 
-#if    NCPUS > 1
        /*
-        * XXX  NEEDS MP DOING ALLOC logic so that if multiple processors
-        * XXX  get here, only one allocates a chunk of pmaps.
-        * (for now we'll just let it go - safe but wasteful)
+        * Set the bottom 4 3rd-level pte's to be the kernel's.
         */
-#endif
+       spl = splhigh();
+       while ((user_pdptp = pmap64_pdpt(p, 0x0)) == PDPT_ENTRY_NULL) {
+               splx(spl);
+               pmap_expand_pml4(p, 0x0);
+               spl = splhigh();
+       }
+       kern_pdptp = kernel_pmap->pm_pdpt;
+       pmap_store_pte(user_pdptp+0, *(kern_pdptp+0));
+       pmap_store_pte(user_pdptp+1, *(kern_pdptp+1));
+       pmap_store_pte(user_pdptp+2, *(kern_pdptp+2));
+       pmap_store_pte(user_pdptp+3, *(kern_pdptp+3));
 
-               /*
-                *      Allocate a chunck of pmaps.  Single kmem_alloc_wired
-                *      operation reduces kernel map fragmentation.
-                */
+       p->pm_kernel_cr3 = TRUE;
 
-               if (kmem_alloc_wired(kernel_map, &dirbases,
-                                    pmap_alloc_chunk * INTEL_PGBYTES)
-                                                       != KERN_SUCCESS)
-                       panic("pmap_create.1");
+       splx(spl);
 
-               for (i = pmap_alloc_chunk; i > 0 ; i--) {
-                       p = (pmap_t) zalloc(pmap_zone);
-                       if (p == PMAP_NULL)
-                               panic("pmap_create.2");
+}
 
-                       /*
-                        *      Initialize pmap.  Don't bother with
-                        *      ref count as cache list is threaded
-                        *      through it.  It'll be set on cache removal.
-                        */
-                       p->dirbase = (pt_entry_t *) dirbases;
-                       dirbases += INTEL_PGBYTES;
-                       memcpy(p->dirbase, kpde, INTEL_PGBYTES);
-                       p->pdirbase = kvtophys((vm_offset_t)p->dirbase);
+void
+pmap_load_kernel_cr3(void)
+{
+       uint32_t        kernel_cr3;
 
-                       simple_lock_init(&p->lock, ETAP_VM_PMAP);
-                       p->cpus_using = 0;
+       assert(!ml_get_interrupts_enabled());
 
-                       /*
-                        *      Initialize statistics.
-                        */
-                       stats = &p->stats;
-                       stats->resident_count = 0;
-                       stats->wired_count = 0;
-                       
-                       /*
-                        *      Insert into cache
-                        */
-                       simple_lock(&pmap_cache_lock);
-                       p->ref_count = (int) pmap_cache_list;
-                       pmap_cache_list = p;
-                       pmap_cache_count++;
-                       simple_unlock(&pmap_cache_lock);
-               }
-               simple_lock(&pmap_cache_lock);
-       }
+       /*
+        * Reload cr3 with the true kernel cr3.
+        * Note: kernel's pml4 resides below 4GB physical.
+        */
+       kernel_cr3 = current_cpu_datap()->cpu_kernel_cr3;
+       set_cr3(kernel_cr3);
+       current_cpu_datap()->cpu_active_cr3 = kernel_cr3;
+       current_cpu_datap()->cpu_task_map = TASK_MAP_32BIT;
+       current_cpu_datap()->cpu_tlb_invalid = FALSE;
+       __asm__ volatile("mfence");
+}
 
-       assert(p->stats.resident_count == 0);
-       assert(p->stats.wired_count == 0);
-       p->stats.resident_count = 0;
-       p->stats.wired_count = 0;
+void
+pmap_clear_4GB_pagezero(pmap_t p)
+{
+       int             spl;
+       pdpt_entry_t    *user_pdptp;
 
-       pmap_cache_list = (pmap_t) p->ref_count;
-       p->ref_count = 1;
-       pmap_cache_count--;
-       simple_unlock(&pmap_cache_lock);
+       if (!p->pm_kernel_cr3)
+               return;
 
-       return(p);
+       spl = splhigh();
+       user_pdptp = pmap64_pdpt(p, 0x0);
+       pmap_store_pte(user_pdptp+0, 0);
+       pmap_store_pte(user_pdptp+1, 0);
+       pmap_store_pte(user_pdptp+2, 0);
+       pmap_store_pte(user_pdptp+3, 0);
+
+       p->pm_kernel_cr3 = FALSE;
+
+       pmap_load_kernel_cr3();
+
+       splx(spl);
 }
 
 /*
@@ -1025,38 +1456,29 @@ void
 pmap_destroy(
        register pmap_t p)
 {
-       register pt_entry_t     *pdep;
-       register vm_offset_t    pa;
        register int            c;
        spl_t                   s;
+#if 0
+       register pt_entry_t     *pdep;
        register vm_page_t      m;
+#endif
 
        if (p == PMAP_NULL)
                return;
-
        SPLVM(s);
        simple_lock(&p->lock);
        c = --p->ref_count;
        if (c == 0) {
-               register int    my_cpu;
-
-               mp_disable_preemption();
-               my_cpu = cpu_number();
-
                /* 
                 * If some cpu is not using the physical pmap pointer that it
                 * is supposed to be (see set_dirbase), we might be using the
                 * pmap that is being destroyed! Make sure we are
                 * physically on the right pmap:
                 */
+               PMAP_UPDATE_TLBS(p,
+                                VM_MIN_ADDRESS,
+                                VM_MAX_KERNEL_ADDRESS);
 
-
-               if (real_pmap[my_cpu] == p) {
-                       PMAP_CPU_CLR(p, my_cpu);
-                       real_pmap[my_cpu] = kernel_pmap;
-                       PMAP_RELOAD_TLBS();
-               }
-               mp_enable_preemption();
        }
        simple_unlock(&p->lock);
        SPLX(s);
@@ -1069,52 +1491,69 @@ pmap_destroy(
         *      Free the memory maps, then the
         *      pmap structure.
         */
-       pdep = p->dirbase;
-       while (pdep < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)]) {
+
+       if (!cpu_64bit) {
+#if 0
+       pdep = (pt_entry_t *)p->dirbase;
+
+       while (pdep < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)]) {
+           int ind;
+
            if (*pdep & INTEL_PTE_VALID) {
-               pa = pte_to_pa(*pdep);
-               vm_object_lock(pmap_object);
-               m = vm_page_lookup(pmap_object, pa);
-               if (m == VM_PAGE_NULL)
+               ind = pdep - (pt_entry_t *)&p->dirbase[0];
+
+               vm_object_lock(p->pm_obj);
+               m = vm_page_lookup(p->pm_obj, (vm_object_offset_t)ind);
+               if (m == VM_PAGE_NULL) {
                    panic("pmap_destroy: pte page not in object");
+               }
                vm_page_lock_queues();
                vm_page_free(m);
                inuse_ptepages_count--;
-               vm_object_unlock(pmap_object);
+
+               vm_object_unlock(p->pm_obj);
                vm_page_unlock_queues();
 
                /*
                 *      Clear pdes, this might be headed for the cache.
                 */
-               c = ptes_per_vm_page;
-               do {
-                   *pdep = 0;
-                   pdep++;
-               } while (--c > 0);
+               pmap_store_pte(pdep, 0);
+               pdep++;
            }
            else {
-               pdep += ptes_per_vm_page;
+             pmap_store_pte(pdep, 0);
+             pdep++;
            }
        
        }
-       assert(p->stats.resident_count == 0);
-       assert(p->stats.wired_count == 0);
+#else
+       inuse_ptepages_count -= p->pm_obj->resident_page_count;
+#endif
+       vm_object_deallocate(p->pm_obj);
+         kmem_free(kernel_map, (vm_offset_t)p->dirbase, NBPTD);
+         zfree(pdpt_zone, (void *)p->pm_hold);
+       } else {
+
+         /* 64 bit */
+
+         pmap_unmap_sharedpage(p);
+
+         /* free 64 bit mode structs */
+         inuse_ptepages_count--;
+         kmem_free(kernel_map, (vm_offset_t)p->pm_hold, PAGE_SIZE);
+
+         inuse_ptepages_count -= p->pm_obj_pml4->resident_page_count;
+         vm_object_deallocate(p->pm_obj_pml4);
+
+         inuse_ptepages_count -= p->pm_obj_pdpt->resident_page_count;
+         vm_object_deallocate(p->pm_obj_pdpt);
+
+         inuse_ptepages_count -= p->pm_obj->resident_page_count;
+         vm_object_deallocate(p->pm_obj);
 
-       /*
-        *      Add to cache if not already full
-        */
-       simple_lock(&pmap_cache_lock);
-       if (pmap_cache_count <= pmap_cache_max) {
-               p->ref_count = (int) pmap_cache_list;
-               pmap_cache_list = p;
-               pmap_cache_count++;
-               simple_unlock(&pmap_cache_lock);
-       }
-       else {
-               simple_unlock(&pmap_cache_lock);
-               kmem_free(kernel_map, (vm_offset_t)p->dirbase, INTEL_PGBYTES);
-               zfree(pmap_zone, (vm_offset_t) p);
        }
+
+       zfree(pmap_zone, p);
 }
 
 /*
@@ -1148,71 +1587,82 @@ pmap_reference(
  *     Assumes that the pte-page exists.
  */
 
-/* static */
-void
+static void
 pmap_remove_range(
        pmap_t                  pmap,
-       vm_offset_t             va,
+       vm_map_offset_t         start_vaddr,
        pt_entry_t              *spte,
        pt_entry_t              *epte)
 {
        register pt_entry_t     *cpte;
-       int                     num_removed, num_unwired;
+       int                     num_removed, num_unwired, num_found;
        int                     pai;
-       vm_offset_t             pa;
+       pmap_paddr_t            pa;
+       vm_map_offset_t         vaddr;
 
-#if    DEBUG_PTE_PAGE
-       if (pmap != kernel_pmap)
-               ptep_check(get_pte_page(spte));
-#endif /* DEBUG_PTE_PAGE */
        num_removed = 0;
        num_unwired = 0;
+       num_found = 0;
 
-       for (cpte = spte; cpte < epte;
-            cpte += ptes_per_vm_page, va += PAGE_SIZE) {
+       /* invalidate the PTEs first to "freeze" them */
+       for (cpte = spte, vaddr = start_vaddr;
+            cpte < epte;
+            cpte++, vaddr += PAGE_SIZE_64) {
 
            pa = pte_to_pa(*cpte);
            if (pa == 0)
                continue;
+           num_found++;
 
-           num_removed++;
            if (iswired(*cpte))
                num_unwired++;
 
-           if (!valid_page(pa)) {
+           if (!valid_page(i386_btop(pa))) {
 
                /*
                 *      Outside range of managed physical memory.
                 *      Just remove the mappings.
                 */
-               register int    i = ptes_per_vm_page;
-               register pt_entry_t     *lpte = cpte;
-               do {
-                   *lpte = 0;
-                   lpte++;
-               } while (--i > 0);
+               pmap_store_pte(cpte, 0);
                continue;
            }
 
+           /* invalidate the PTE */
+           pmap_update_pte(cpte, *cpte, (*cpte & ~INTEL_PTE_VALID));
+       }
+
+       if (0 == num_found) {
+         /* nothing was changed, we're done */
+         goto update_counts;
+       }
+
+       /* propagate the invalidates to other CPUs */
+
+       PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
+
+       for (cpte = spte, vaddr = start_vaddr;
+            cpte < epte;
+            cpte++, vaddr += PAGE_SIZE_64) {
+
+           pa = pte_to_pa(*cpte);
+           if (pa == 0)
+               continue;
+
            pai = pa_index(pa);
+
            LOCK_PVH(pai);
 
+           num_removed++;
+
            /*
-            *  Get the modify and reference bits.
+            *  Get the modify and reference bits, then
+            *  nuke the entry in the page table
             */
-           {
-               register int            i;
-               register pt_entry_t     *lpte;
-
-               i = ptes_per_vm_page;
-               lpte = cpte;
-               do {
-                   pmap_phys_attributes[pai] |=
-                       *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
-                   *lpte = 0;
-                   lpte++;
-               } while (--i > 0);
-           }
+           /* remember reference and change */
+           pmap_phys_attributes[pai] |=
+             (char)(*cpte & (PHYS_MODIFIED|PHYS_REFERENCED));
+           /* completely invalidate the PTE */
+           pmap_store_pte(cpte, 0);
 
            /*
             *  Remove the mapping from the pvlist for
@@ -1225,7 +1675,7 @@ pmap_remove_range(
                if (pv_h->pmap == PMAP_NULL) {
                    panic("pmap_remove: null pv_list!");
                }
-               if (pv_h->va == va && pv_h->pmap == pmap) {
+               if (pv_h->va == vaddr && pv_h->pmap == pmap) {
                    /*
                     * Header is the pv_entry.  Copy the next one
                     * to header and free the next one (we cannot
@@ -1245,9 +1695,9 @@ pmap_remove_range(
                    do {
                        prev = cur;
                        if ((cur = prev->next) == PV_ENTRY_NULL) {
-                           panic("pmap-remove: mapping not in pv_list!");
+                         panic("pmap-remove: mapping not in pv_list!");
                        }
-                   } while (cur->va != va || cur->pmap != pmap);
+                   } while (cur->va != vaddr || cur->pmap != pmap);
                    prev->next = cur->next;
                    PV_FREE(cur);
                }
@@ -1255,6 +1705,7 @@ pmap_remove_range(
            }
        }
 
+ update_counts:
        /*
         *      Update the counts
         */
@@ -1262,6 +1713,7 @@ pmap_remove_range(
        pmap->stats.resident_count -= num_removed;
        assert(pmap->stats.wired_count >= num_unwired);
        pmap->stats.wired_count -= num_unwired;
+       return;
 }
 
 /*
@@ -1270,15 +1722,14 @@ pmap_remove_range(
  */
 void
 pmap_remove_some_phys(
-       pmap_t          map,
-       vm_offset_t     phys_addr)
+       __unused pmap_t         map,
+       __unused ppnum_t         pn)
 {
 
 /* Implement to support working set code */
 
 }
 
-
 /*
  *     Remove the given range of addresses
  *     from the specified map.
@@ -1287,39 +1738,41 @@ pmap_remove_some_phys(
  *     rounded to the hardware page size.
  */
 
+
 void
 pmap_remove(
        pmap_t          map,
-       vm_offset_t     s,
-       vm_offset_t     e)
+       addr64_t        s64,
+       addr64_t        e64)
 {
        spl_t                   spl;
        register pt_entry_t     *pde;
        register pt_entry_t     *spte, *epte;
-       vm_offset_t             l;
+       addr64_t                l64;
+       addr64_t                orig_s64;
 
-       if (map == PMAP_NULL)
+       if (map == PMAP_NULL || s64 == e64)
                return;
 
        PMAP_READ_LOCK(map, spl);
 
-       pde = pmap_pde(map, s);
-
-       while (s < e) {
-           l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
-           if (l > e)
-               l = e;
-           if (*pde & INTEL_PTE_VALID) {
-               spte = (pt_entry_t *)ptetokv(*pde);
-               spte = &spte[ptenum(s)];
-               epte = &spte[intel_btop(l-s)];
-               pmap_remove_range(map, s, spte, epte);
+       orig_s64 = s64;
+
+       while (s64 < e64) {
+           l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size-1);
+           if (l64 > e64)
+               l64 = e64;
+           pde = pmap_pde(map, s64);
+           if (pde && (*pde & INTEL_PTE_VALID)) {
+               spte = (pt_entry_t *)pmap_pte(map, (s64 & ~(pde_mapped_size-1)));
+               spte = &spte[ptenum(s64)];
+               epte = &spte[intel_btop(l64-s64)];
+               pmap_remove_range(map, s64, spte, epte);
            }
-           s = l;
+           s64 = l64;
            pde++;
        }
-
-       PMAP_FLUSH_TLBS();
+       PMAP_UPDATE_TLBS(map, orig_s64, e64);
 
        PMAP_READ_UNLOCK(map, spl);
 }
@@ -1333,7 +1786,7 @@ pmap_remove(
  */
 void
 pmap_page_protect(
-       vm_offset_t     phys,
+        ppnum_t         pn,
        vm_prot_t       prot)
 {
        pv_entry_t              pv_h, prev;
@@ -1343,9 +1796,11 @@ pmap_page_protect(
        register pmap_t         pmap;
        spl_t                   spl;
        boolean_t               remove;
+       pmap_paddr_t            phys;
 
-       assert(phys != vm_page_fictitious_addr);
-       if (!valid_page(phys)) {
+       assert(pn != vm_page_fictitious_addr);
+
+       if (!valid_page(pn)) {
            /*
             *  Not a managed page.
             */
@@ -1366,17 +1821,17 @@ pmap_page_protect(
                remove = TRUE;
                break;
        }
+       phys = (pmap_paddr_t)i386_ptob(pn);
+       pai = pa_index(phys);
+       pv_h = pai_to_pvh(pai);
+
 
        /*
         *      Lock the pmap system first, since we will be changing
         *      several pmaps.
         */
-
        PMAP_WRITE_LOCK(spl);
 
-       pai = pa_index(phys);
-       pv_h = pai_to_pvh(pai);
-
        /*
         * Walk down PV list, changing or removing all mappings.
         * We do not have to lock the pv_list because we have
@@ -1384,106 +1839,113 @@ pmap_page_protect(
         */
        if (pv_h->pmap != PMAP_NULL) {
 
-           prev = pv_e = pv_h;
-           do {
-               pmap = pv_e->pmap;
-               /*
-                * Lock the pmap to block pmap_extract and similar routines.
-                */
-               simple_lock(&pmap->lock);
+               prev = pv_e = pv_h;
 
-               {
-                   register vm_offset_t va;
+               do {
+                       register vm_map_offset_t vaddr;
 
-                   va = pv_e->va;
-                   pte = pmap_pte(pmap, va);
+                       pmap = pv_e->pmap;
+                       /*
+                        * Lock the pmap to block pmap_extract and similar routines.
+                        */
+                       simple_lock(&pmap->lock);
 
-                   /*
-                    * Consistency checks.
-                    */
-                   /* assert(*pte & INTEL_PTE_VALID); XXX */
-                   /* assert(pte_to_phys(*pte) == phys); */
+                       vaddr = pv_e->va;
+                       pte = pmap_pte(pmap, vaddr);
+                       if(0 == pte) {
+                         kprintf("pmap_page_protect pmap 0x%x pn 0x%x vaddr 0x%llx\n",pmap, pn, vaddr);
+                         panic("pmap_page_protect");
+                       }
+                       /*
+                        * Consistency checks.
+                        */
+                       /* assert(*pte & INTEL_PTE_VALID); XXX */
+                       /* assert(pte_to_phys(*pte) == phys); */
 
-                   /*
-                    * Invalidate TLBs for all CPUs using this mapping.
-                    */
-                   PMAP_INVALIDATE_PAGE(pmap, va);
-               }
 
-               /*
-                * Remove the mapping if new protection is NONE
-                * or if write-protecting a kernel mapping.
-                */
-               if (remove || pmap == kernel_pmap) {
-                   /*
-                    * Remove the mapping, collecting any modify bits.
-                    */
-                   {
-                       register int    i = ptes_per_vm_page;
+                       /*
+                        * Remove the mapping if new protection is NONE
+                        * or if write-protecting a kernel mapping.
+                        */
+                       if (remove || pmap == kernel_pmap) {
+                               /*
+                                * Remove the mapping, collecting any modify bits.
+                                */
+                               pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
 
-                       do {
-                           pmap_phys_attributes[pai] |=
-                               *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-                           *pte++ = 0;
-                       } while (--i > 0);
-                   }
+                               PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
 
-                   assert(pmap->stats.resident_count >= 1);
-                   pmap->stats.resident_count--;
+                               pmap_phys_attributes[pai] |= *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
 
-                   /*
-                    * Remove the pv_entry.
-                    */
-                   if (pv_e == pv_h) {
-                       /*
-                        * Fix up head later.
-                        */
-                       pv_h->pmap = PMAP_NULL;
-                   }
-                   else {
-                       /*
-                        * Delete this entry.
-                        */
-                       prev->next = pv_e->next;
-                       PV_FREE(pv_e);
-                   }
-               }
-               else {
-                   /*
-                    * Write-protect.
-                    */
-                   register int i = ptes_per_vm_page;
+                               pmap_store_pte(pte, 0);
 
-                   do {
-                       *pte &= ~INTEL_PTE_WRITE;
-                       pte++;
-                   } while (--i > 0);
 
-                   /*
-                    * Advance prev.
-                    */
-                   prev = pv_e;
-               }
+                               //XXX breaks DEBUG build                    assert(pmap->stats.resident_count >= 1);
+                               pmap->stats.resident_count--;
 
-               simple_unlock(&pmap->lock);
+                               /*
+                                * Remove the pv_entry.
+                                */
+                               if (pv_e == pv_h) {
+                                       /*
+                                        * Fix up head later.
+                                        */
+                                       pv_h->pmap = PMAP_NULL;
+                               }
+                               else {
+                                       /*
+                                        * Delete this entry.
+                                        */
+                                       prev->next = pv_e->next;
+                                       PV_FREE(pv_e);
+                               }
+                       } else {
+                               /*
+                                * Write-protect.
+                                */
+                               pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WRITE));
+                               PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+                               /*
+                                * Advance prev.
+                                */
+                               prev = pv_e;
+                       }
 
-           } while ((pv_e = prev->next) != PV_ENTRY_NULL);
+                       simple_unlock(&pmap->lock);
 
-           /*
-            * If pv_head mapping was removed, fix it up.
-            */
-           if (pv_h->pmap == PMAP_NULL) {
-               pv_e = pv_h->next;
-               if (pv_e != PV_ENTRY_NULL) {
-                   *pv_h = *pv_e;
-                   PV_FREE(pv_e);
+               } while ((pv_e = prev->next) != PV_ENTRY_NULL);
+
+               /*
+                * If pv_head mapping was removed, fix it up.
+                */
+               if (pv_h->pmap == PMAP_NULL) {
+                       pv_e = pv_h->next;
+
+                       if (pv_e != PV_ENTRY_NULL) {
+                               *pv_h = *pv_e;
+                               PV_FREE(pv_e);
+                       }
                }
-           }
        }
-
        PMAP_WRITE_UNLOCK(spl);
 }
 
+/*
+ *     Routine:
+ *             pmap_disconnect
+ *
+ *     Function:
+ *             Disconnect all mappings for this page and return reference and change status
+ *             in generic format.
+ *
+ */
+unsigned int pmap_disconnect(
+       ppnum_t pa)
+{
+       pmap_page_protect(pa, 0);                               /* disconnect the page */
+       return (pmap_get_refmod(pa));                   /* return ref/chg status */
+}
+
 /*
  *     Set the physical protection on the
  *     specified range of this map as requested.
@@ -1492,78 +1954,92 @@ pmap_page_protect(
 void
 pmap_protect(
        pmap_t          map,
-       vm_offset_t     s,
-       vm_offset_t     e,
+       vm_map_offset_t sva,
+       vm_map_offset_t eva,
        vm_prot_t       prot)
 {
        register pt_entry_t     *pde;
        register pt_entry_t     *spte, *epte;
-       vm_offset_t             l;
+       vm_map_offset_t         lva;
+       vm_map_offset_t         orig_sva;
        spl_t           spl;
-
+       boolean_t       set_NX;
+       int num_found = 0;
 
        if (map == PMAP_NULL)
                return;
 
-       /*
-        * Determine the new protection.
-        */
-       switch (prot) {
-           case VM_PROT_READ:
-           case VM_PROT_READ|VM_PROT_EXECUTE:
-               break;
-           case VM_PROT_READ|VM_PROT_WRITE:
-           case VM_PROT_ALL:
-               return; /* nothing to do */
-           default:
-               pmap_remove(map, s, e);
+       if (prot == VM_PROT_NONE) {
+               pmap_remove(map, sva, eva);
                return;
        }
 
-       /*
-        * If write-protecting in the kernel pmap,
-        * remove the mappings; the i386 ignores
-        * the write-permission bit in kernel mode.
-        *
-        * XXX should be #if'd for i386
-        */
-
-       if (cpuid_family == CPUID_FAMILY_386)
-           if (map == kernel_pmap) {
-                   pmap_remove(map, s, e);
-                   return;
-           }
+       if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled )
+               set_NX = FALSE;
+       else
+               set_NX = TRUE;
 
        SPLVM(spl);
        simple_lock(&map->lock);
 
-
-       pde = pmap_pde(map, s);
-       while (s < e) {
-           l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
-           if (l > e)
-               l = e;
-           if (*pde & INTEL_PTE_VALID) {
-               spte = (pt_entry_t *)ptetokv(*pde);
-               spte = &spte[ptenum(s)];
-               epte = &spte[intel_btop(l-s)];
+       orig_sva = sva;
+       while (sva < eva) {
+           lva = (sva + pde_mapped_size) & ~(pde_mapped_size-1);
+           if (lva > eva)
+               lva = eva;
+           pde = pmap_pde(map, sva);
+           if (pde && (*pde & INTEL_PTE_VALID)) {
+               spte = (pt_entry_t *)pmap_pte(map, (sva & ~(pde_mapped_size-1)));
+               spte = &spte[ptenum(sva)];
+               epte = &spte[intel_btop(lva-sva)];
 
                while (spte < epte) {
-                   if (*spte & INTEL_PTE_VALID)
-                       *spte &= ~INTEL_PTE_WRITE;
+                   if (*spte & INTEL_PTE_VALID) {
+                     
+                       if (prot & VM_PROT_WRITE)
+                         pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_WRITE));
+                       else
+                         pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_WRITE));
+
+                       if (set_NX == TRUE)
+                         pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_NX));
+                       else
+                         pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_NX));
+
+                       num_found++;
+
+                   }
                    spte++;
                }
            }
-           s = l;
-           pde++;
+           sva = lva;
        }
-
-       PMAP_FLUSH_TLBS();
+       if (num_found)
+         PMAP_UPDATE_TLBS(map, orig_sva, eva);
 
        simple_unlock(&map->lock);
        SPLX(spl);
 }
 
+/* Map a (possibly) autogenned block */
+void
+pmap_map_block(
+       pmap_t          pmap, 
+       addr64_t        va,
+       ppnum_t         pa,
+       uint32_t        size,
+       vm_prot_t       prot,
+       int             attr,
+       __unused unsigned int   flags)
+{
+        uint32_t page;
+
+       for (page = 0; page < size; page++) {
+               pmap_enter(pmap, va, pa, prot, attr, TRUE);
+               va += PAGE_SIZE;
+               pa++;
+       }
+}
 
 
 /*
@@ -1581,57 +2057,40 @@ pmap_protect(
 void
 pmap_enter(
        register pmap_t         pmap,
-       vm_offset_t             v,
-       register vm_offset_t    pa,
+       vm_map_offset_t         vaddr,
+       ppnum_t                 pn,
        vm_prot_t               prot,
        unsigned int            flags,
        boolean_t               wired)
 {
        register pt_entry_t     *pte;
        register pv_entry_t     pv_h;
-       register int            i, pai;
+       register int            pai;
        pv_entry_t              pv_e;
        pt_entry_t              template;
        spl_t                   spl;
-       vm_offset_t             old_pa;
-
-       XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n",
-           current_thread()->top_act,
+       pmap_paddr_t            old_pa;
+       pmap_paddr_t            pa = (pmap_paddr_t)i386_ptob(pn);
+       boolean_t               need_tlbflush = FALSE;
+       boolean_t               set_NX;
+       char                    oattr;
+
+       XPR(0x80000000, "%x/%x: pmap_enter %x/%qx/%x\n",
+           current_thread(),
            current_thread(), 
-           pmap, v, pa);
+           pmap, vaddr, pn);
 
-       assert(pa != vm_page_fictitious_addr);
+       assert(pn != vm_page_fictitious_addr);
        if (pmap_debug)
-               printf("pmap(%x, %x)\n", v, pa);
+               printf("pmap(%qx, %x)\n", vaddr, pn);
        if (pmap == PMAP_NULL)
                return;
 
-       if (cpuid_family == CPUID_FAMILY_386)
-       if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0
-           && !wired /* hack for io_wire */ ) {
-           /*
-            *  Because the 386 ignores write protection in kernel mode,
-            *  we cannot enter a read-only kernel mapping, and must
-            *  remove an existing mapping if changing it.
-            *
-            *  XXX should be #if'd for i386
-            */
-           PMAP_READ_LOCK(pmap, spl);
-
-           pte = pmap_pte(pmap, v);
-           if (pte != PT_ENTRY_NULL && pte_to_pa(*pte) != 0) {
-               /*
-                *      Invalidate the translation buffer,
-                *      then remove the mapping.
-                */
-               PMAP_INVALIDATE_PAGE(pmap, v);
-               pmap_remove_range(pmap, v, pte,
-                                 pte + ptes_per_vm_page);
-           }
-           PMAP_READ_UNLOCK(pmap, spl);
-           return;
-       }
-
+       if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled )
+               set_NX = FALSE;
+       else
+               set_NX = TRUE;
+       
        /*
         *      Must allocate a new pvlist entry while we're unlocked;
         *      zalloc may cause pageout (which will lock the pmap system).
@@ -1640,7 +2099,7 @@ pmap_enter(
         *      the allocated entry later (if we no longer need it).
         */
        pv_e = PV_ENTRY_NULL;
-Retry:
+
        PMAP_READ_LOCK(pmap, spl);
 
        /*
@@ -1649,13 +2108,13 @@ Retry:
         *      pages to map one VM page.
         */
 
-       while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
+       while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
                /*
                 *      Must unlock to expand the pmap.
                 */
                PMAP_READ_UNLOCK(pmap, spl);
 
-               pmap_expand(pmap, v);
+               pmap_expand(pmap, vaddr); /* going to grow pde level page(s) */
 
                PMAP_READ_LOCK(pmap, spl);
        }
@@ -1668,12 +2127,23 @@ Retry:
            /*
             *  May be changing its wired attribute or protection
             */
-               
+       
            template = pa_to_pte(pa) | INTEL_PTE_VALID;
+
+           if(VM_MEM_NOT_CACHEABLE == (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
+               if(!(flags & VM_MEM_GUARDED))
+                       template |= INTEL_PTE_PTA;
+               template |= INTEL_PTE_NCACHE;
+           }
+
            if (pmap != kernel_pmap)
                template |= INTEL_PTE_USER;
            if (prot & VM_PROT_WRITE)
                template |= INTEL_PTE_WRITE;
+
+           if (set_NX == TRUE)
+               template |= INTEL_PTE_NX;
+
            if (wired) {
                template |= INTEL_PTE_WIRED;
                if (!iswired(*pte))
@@ -1686,17 +2156,9 @@ Retry:
                }
            }
 
-           PMAP_INVALIDATE_PAGE(pmap, v);
-
-           i = ptes_per_vm_page;
-           do {
-               if (*pte & INTEL_PTE_MOD)
-                   template |= INTEL_PTE_MOD;
-               WRITE_PTE(pte, template)
-               pte++;
-               pte_increment_pa(template);
-           } while (--i > 0);
-
+           /* store modified PTE and preserve RC bits */
+           pmap_update_pte(pte, *pte, template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
+           need_tlbflush = TRUE;
            goto Done;
        }
 
@@ -1707,6 +2169,7 @@ Retry:
         *         2) Add pvlist entry for new mapping
         *         3) Enter new mapping.
         *
+        *      SHARING FAULTS IS HORRIBLY BROKEN
         *      SHARING_FAULTS complicates this slightly in that it cannot
         *      replace the mapping, but must remove it (because adding the
         *      pvlist entry for the new mapping may remove others), and
@@ -1718,14 +2181,7 @@ Retry:
         *      managed, step 2) is skipped.
         */
 
-       if (old_pa != (vm_offset_t) 0) {
-
-           PMAP_INVALIDATE_PAGE(pmap, v);
-
-#if    DEBUG_PTE_PAGE
-           if (pmap != kernel_pmap)
-               ptep_check(get_pte_page(pte));
-#endif /* DEBUG_PTE_PAGE */
+       if (old_pa != (pmap_paddr_t) 0) {
 
            /*
             *  Don't do anything to pages outside valid memory here.
@@ -1733,7 +2189,16 @@ Retry:
             *  to overwrite the old one.
             */
 
-           if (valid_page(old_pa)) {
+         /* invalidate the PTE */
+         pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
+         /* propagate the invalidate everywhere */
+         PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+         /* remember reference and change */
+         oattr = (char)(*pte & (PHYS_MODIFIED | PHYS_REFERENCED));
+         /* completely invalidate the PTE */
+         pmap_store_pte(pte,0);
+
+           if (valid_page(i386_btop(old_pa))) {
 
                pai = pa_index(old_pa);
                LOCK_PVH(pai);
@@ -1744,21 +2209,8 @@ Retry:
                    assert(pmap->stats.wired_count >= 1);
                    pmap->stats.wired_count--;
                }
-               i = ptes_per_vm_page;
-               do {
-                   pmap_phys_attributes[pai] |=
-                       *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-                   WRITE_PTE(pte, 0)
-                   pte++;
-                   pte_increment_pa(template);
-               } while (--i > 0);
-
-               /*
-                * Put pte back to beginning of page since it'll be
-                * used later to enter the new page.
-                */
-               pte -= ptes_per_vm_page;
 
+               pmap_phys_attributes[pai] |= oattr;
                /*
                 *      Remove the mapping from the pvlist for
                 *      this physical page.
@@ -1770,7 +2222,8 @@ Retry:
                    if (pv_h->pmap == PMAP_NULL) {
                        panic("pmap_enter: null pv_list!");
                    }
-                   if (pv_h->va == v && pv_h->pmap == pmap) {
+
+                   if (pv_h->va == vaddr && pv_h->pmap == pmap) {
                        /*
                         * Header is the pv_entry.  Copy the next one
                         * to header and free the next one (we cannot
@@ -1792,7 +2245,7 @@ Retry:
                            if ((cur = prev->next) == PV_ENTRY_NULL) {
                                panic("pmap_enter: mapping not in pv_list!");
                            }
-                       } while (cur->va != v || cur->pmap != pmap);
+                       } while (cur->va != vaddr || cur->pmap != pmap);
                        prev->next = cur->next;
                        pv_e = cur;
                    }
@@ -1806,17 +2259,18 @@ Retry:
                 *      at Step 3) will enter new mapping (overwriting old
                 *      one).  Do removal part of accounting.
                 */
-               old_pa = (vm_offset_t) 0;
-               assert(pmap->stats.resident_count >= 1);
-               pmap->stats.resident_count--;
+               old_pa = (pmap_paddr_t) 0;
+
                if (iswired(*pte)) {
                    assert(pmap->stats.wired_count >= 1);
                    pmap->stats.wired_count--;
                }
            }
+           need_tlbflush = TRUE;
+        
        }
 
-       if (valid_page(pa)) {
+       if (valid_page(i386_btop(pa))) {
 
            /*
             *  Step 2) Enter the mapping in the PV list for this
@@ -1826,7 +2280,7 @@ Retry:
            pai = pa_index(pa);
 
 
-#if SHARING_FAULTS
+#if SHARING_FAULTS /* this is horribly broken , do not enable */
 RetryPvList:
            /*
             * We can return here from the sharing fault code below
@@ -1841,7 +2295,7 @@ RetryPvList:
                /*
                 *      No mappings yet
                 */
-               pv_h->va = v;
+               pv_h->va = vaddr;
                pv_h->pmap = pmap;
                pv_h->next = PV_ENTRY_NULL;
            }
@@ -1854,13 +2308,13 @@ RetryPvList:
                     */
                    pv_entry_t  e = pv_h;
                    while (e != PV_ENTRY_NULL) {
-                       if (e->pmap == pmap && e->va == v)
+                       if (e->pmap == pmap && e->va == vaddr)
                             panic("pmap_enter: already in pv_list");
                        e = e->next;
                    }
                }
 #endif /* DEBUG */
-#if SHARING_FAULTS
+#if SHARING_FAULTS /* broken, do not enable */
                 {
                     /*
                      * do sharing faults.
@@ -1884,9 +2338,11 @@ RetryPvList:
                             *  Invalidate the translation buffer,
                             *  then remove the mapping.
                             */
-                            PMAP_INVALIDATE_PAGE(pmap, e->va);
                              pmap_remove_range(pmap, e->va, opte,
-                                                      opte + ptes_per_vm_page);
+                                                      opte + 1);
+
+                            PMAP_UPDATE_TLBS(pmap, e->va, e->va + PAGE_SIZE);
+
                             /*
                              * We could have remove the head entry,
                              * so there could be no more entries
@@ -1938,7 +2394,7 @@ RetryPvList:
                             if (logit) {
                                 pma = &pmap_aliasbuf[pmap_alias_index];
                                 pma->pmap = pmap;
-                                pma->va = v;
+                                pma->va = vaddr;
                                 pma->rpc = rpc;
                                 pma->cookie = PMAP_ALIAS_COOKIE;
                                 if (++pmap_alias_index >= PMAP_ALIAS_MAX)
@@ -1955,17 +2411,10 @@ RetryPvList:
                if (pv_e == PV_ENTRY_NULL) {
                    PV_ALLOC(pv_e);
                    if (pv_e == PV_ENTRY_NULL) {
-                       UNLOCK_PVH(pai);
-                       PMAP_READ_UNLOCK(pmap, spl);
-
-                       /*
-                        * Refill from zone.
-                        */
-                       pv_e = (pv_entry_t) zalloc(pv_list_zone);
-                       goto Retry;
+                     panic("pmap no pv_e's");
                    }
                }
-               pv_e->va = v;
+               pv_e->va = vaddr;
                pv_e->pmap = pmap;
                pv_e->next = pv_h->next;
                pv_h->next = pv_e;
@@ -1975,36 +2424,51 @@ RetryPvList:
                pv_e = PV_ENTRY_NULL;
            }
            UNLOCK_PVH(pai);
+
+           /*
+            * only count the mapping
+            * for 'managed memory'
+            */
+           pmap->stats.resident_count++;
        }
 
        /*
-        * Step 3) Enter and count the mapping.
+        * Step 3) Enter the mapping.
         */
 
-       pmap->stats.resident_count++;
 
        /*
         *      Build a template to speed up entering -
         *      only the pfn changes.
         */
        template = pa_to_pte(pa) | INTEL_PTE_VALID;
+
+       if(flags & VM_MEM_NOT_CACHEABLE) {
+               if(!(flags & VM_MEM_GUARDED))
+                       template |= INTEL_PTE_PTA;
+               template |= INTEL_PTE_NCACHE;
+       }
+
        if (pmap != kernel_pmap)
                template |= INTEL_PTE_USER;
        if (prot & VM_PROT_WRITE)
                template |= INTEL_PTE_WRITE;
+
+       if (set_NX == TRUE)
+               template |= INTEL_PTE_NX;
+
        if (wired) {
                template |= INTEL_PTE_WIRED;
                pmap->stats.wired_count++;
        }
-       i = ptes_per_vm_page;
-       do {
-               WRITE_PTE(pte, template)
-               pte++;
-               pte_increment_pa(template);
-       } while (--i > 0);
+       pmap_store_pte(pte, template);
+
 Done:
+       if (need_tlbflush == TRUE)
+               PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+
        if (pv_e != PV_ENTRY_NULL) {
-           PV_FREE(pv_e);
+               PV_FREE(pv_e);
        }
 
        PMAP_READ_UNLOCK(pmap, spl);
@@ -2020,21 +2484,20 @@ Done:
 void
 pmap_change_wiring(
        register pmap_t map,
-       vm_offset_t     v,
+       vm_map_offset_t vaddr,
        boolean_t       wired)
 {
        register pt_entry_t     *pte;
-       register int            i;
        spl_t                   spl;
 
-#if 0
+#if 1
        /*
         *      We must grab the pmap system lock because we may
         *      change a pte_page queue.
         */
        PMAP_READ_LOCK(map, spl);
 
-       if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
+       if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL)
                panic("pmap_change_wiring: pte missing");
 
        if (wired && !iswired(*pte)) {
@@ -2042,10 +2505,8 @@ pmap_change_wiring(
             *  wiring down mapping
             */
            map->stats.wired_count++;
-           i = ptes_per_vm_page;
-           do {
-               *pte++ |= INTEL_PTE_WIRED;
-           } while (--i > 0);
+           pmap_update_pte(pte, *pte, (*pte | INTEL_PTE_WIRED));
+           pte++;
        }
        else if (!wired && iswired(*pte)) {
            /*
@@ -2053,10 +2514,8 @@ pmap_change_wiring(
             */
            assert(map->stats.wired_count >= 1);
            map->stats.wired_count--;
-           i = ptes_per_vm_page;
-           do {
-               *pte++ &= ~INTEL_PTE_WIRED;
-           } while (--i > 0);
+           pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WIRED));
+           pte++;
        }
 
        PMAP_READ_UNLOCK(map, spl);
@@ -2067,108 +2526,122 @@ pmap_change_wiring(
 
 }
 
+ppnum_t
+pmap_find_phys(pmap_t pmap, addr64_t va)
+{
+       pt_entry_t     *ptp;
+       ppnum_t         ppn;
+
+       mp_disable_preemption();
+
+       ptp = pmap_pte(pmap, va);
+       if (PT_ENTRY_NULL == ptp) {
+               ppn = 0;
+       } else {
+               ppn = (ppnum_t) i386_btop(pte_to_pa(*ptp));
+       }
+       mp_enable_preemption();
+
+       return ppn;
+}
+
 /*
  *     Routine:        pmap_extract
  *     Function:
  *             Extract the physical page address associated
  *             with the given map/virtual_address pair.
+ *     Change to shim for backwards compatibility but will not
+ *     work for 64 bit systems.  Some old drivers that we cannot
+ *     change need this.
  */
 
 vm_offset_t
 pmap_extract(
        register pmap_t pmap,
-       vm_offset_t     va)
+       vm_map_offset_t vaddr)
 {
-       register pt_entry_t     *pte;
-       register vm_offset_t    pa;
-       spl_t                   spl;
+        ppnum_t ppn;
+       vm_offset_t paddr;
 
-       SPLVM(spl);
-       simple_lock(&pmap->lock);
-       if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
-           pa = (vm_offset_t) 0;
-       else if (!(*pte & INTEL_PTE_VALID))
-           pa = (vm_offset_t) 0;
-       else
-           pa = pte_to_pa(*pte) + (va & INTEL_OFFMASK);
-       simple_unlock(&pmap->lock);
-       SPLX(spl);
-       return(pa);
+       paddr = (vm_offset_t)0;
+       ppn = pmap_find_phys(pmap, vaddr);
+       if (ppn) {
+               paddr = ((vm_offset_t)i386_ptob(ppn)) | (vaddr & INTEL_OFFMASK);
+       }
+       return (paddr);
 }
 
-/*
- *     Routine:        pmap_expand
- *
- *     Expands a pmap to be able to map the specified virtual address.
- *
- *     Allocates new virtual memory for the P0 or P1 portion of the
- *     pmap, then re-maps the physical pages that were in the old
- *     pmap to be in the new pmap.
- *
- *     Must be called with the pmap system and the pmap unlocked,
- *     since these must be unlocked to use vm_allocate or vm_deallocate.
- *     Thus it must be called in a loop that checks whether the map
- *     has been expanded enough.
- *     (We won't loop forever, since page tables aren't shrunk.)
- */
 void
-pmap_expand(
-       register pmap_t         map,
-       register vm_offset_t    v)
+pmap_expand_pml4(
+                pmap_t map,
+                vm_map_offset_t vaddr)
 {
-       pt_entry_t              *pdp;
        register vm_page_t      m;
-       register vm_offset_t    pa;
-       register int            i;
+       register pmap_paddr_t   pa;
+       uint64_t                i;
        spl_t                   spl;
+       ppnum_t                 pn;
+       pml4_entry_t            *pml4p;
 
-       if (map == kernel_pmap)
-           panic("pmap_expand");
+       if (kernel_pmap == map) panic("expand kernel pml4");
 
-       /*
-        *      We cannot allocate the pmap_object in pmap_init,
-        *      because it is called before the zone package is up.
-        *      Allocate it now if it is missing.
-        */
-       if (pmap_object == VM_OBJECT_NULL)
-           pmap_object = vm_object_allocate(avail_end);
+       spl = splhigh();
+         pml4p = pmap64_pml4(map, vaddr);
+         splx(spl);
+         if (PML4_ENTRY_NULL == pml4p) panic("pmap_expand_pml4 no pml4p");
 
        /*
-        *      Allocate a VM page for the level 2 page table entries.
+        *      Allocate a VM page for the pml4 page
         */
        while ((m = vm_page_grab()) == VM_PAGE_NULL)
                VM_PAGE_WAIT();
 
        /*
-        *      Map the page to its physical address so that it
+        *      put the page into the pmap's obj list so it
         *      can be found later.
         */
-       pa = m->phys_addr;
-       vm_object_lock(pmap_object);
-       vm_page_insert(m, pmap_object, pa);
+       pn = m->phys_page;
+       pa = i386_ptob(pn);
+       i = pml4idx(map, vaddr);
+
+       vm_object_lock(map->pm_obj_pml4);
+#if 0 /* DEBUG */
+       if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i)) {
+         kprintf("pmap_expand_pml4: obj_pml4 not empty, pmap 0x%x pm_obj_pml4 0x%x vaddr 0x%llx i 0x%llx\n",
+                 map, map->pm_obj_pml4, vaddr, i);
+       }
+#endif
+       vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i);
+
        vm_page_lock_queues();
        vm_page_wire(m);
-       inuse_ptepages_count++;
-       vm_object_unlock(pmap_object);
+
        vm_page_unlock_queues();
+       vm_object_unlock(map->pm_obj_pml4);
+       inuse_ptepages_count++;
+       map->stats.resident_count++;
+       map->stats.wired_count++;
 
        /*
         *      Zero the page.
         */
-       memset((void *)phystokv(pa), 0, PAGE_SIZE);
+       pmap_zero_page(pn);
 
        PMAP_READ_LOCK(map, spl);
        /*
         *      See if someone else expanded us first
         */
-       if (pmap_pte(map, v) != PT_ENTRY_NULL) {
+       if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) {
                PMAP_READ_UNLOCK(map, spl);
-               vm_object_lock(pmap_object);
+               vm_object_lock(map->pm_obj_pml4);
                vm_page_lock_queues();
                vm_page_free(m);
                inuse_ptepages_count--;
+               map->stats.resident_count--;
+               map->stats.wired_count--;
+
                vm_page_unlock_queues();
-               vm_object_unlock(pmap_object);
+               vm_object_unlock(map->pm_obj_pml4);
                return;
        }
 
@@ -2178,42 +2651,258 @@ pmap_expand(
         *      set several page directory entries.
         */
 
-       i = ptes_per_vm_page;
-       pdp = &map->dirbase[pdenum(map, v) & ~(i-1)];
-       do {
-           *pdp = pa_to_pte(pa)
-               | INTEL_PTE_VALID
-               | INTEL_PTE_USER
-               | INTEL_PTE_WRITE;
-           pdp++;
-           pa += INTEL_PGBYTES;
-       } while (--i > 0);
+       pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
+
+       pmap_store_pte(pml4p, pa_to_pte(pa)
+                               | INTEL_PTE_VALID
+                               | INTEL_PTE_USER
+                               | INTEL_PTE_WRITE);
 
        PMAP_READ_UNLOCK(map, spl);
+
        return;
+
 }
 
-/*
- *     Copy the range specified by src_addr/len
- *     from the source map to the range dst_addr/len
- *     in the destination map.
- *
- *     This routine is only advisory and need not do anything.
- */
-#if    0
 void
-pmap_copy(
-       pmap_t          dst_pmap,
-       pmap_t          src_pmap,
-       vm_offset_t     dst_addr,
-       vm_size_t       len,
-       vm_offset_t     src_addr)
+pmap_expand_pdpt(
+                pmap_t map,
+                vm_map_offset_t vaddr)
 {
-#ifdef lint
-       dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
-#endif /* lint */
+       register vm_page_t      m;
+       register pmap_paddr_t   pa;
+       uint64_t                i;
+       spl_t                   spl;
+       ppnum_t                 pn;
+       pdpt_entry_t            *pdptp;
+
+       if (kernel_pmap == map) panic("expand kernel pdpt");
+
+       spl = splhigh();
+         while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
+           splx(spl);
+           pmap_expand_pml4(map, vaddr); /* need room for another pdpt entry */
+           spl = splhigh();
+         }
+         splx(spl);
+
+
+       /*
+        *      Allocate a VM page for the pdpt page
+        */
+       while ((m = vm_page_grab()) == VM_PAGE_NULL)
+               VM_PAGE_WAIT();
+
+       /*
+        *      put the page into the pmap's obj list so it
+        *      can be found later.
+        */
+       pn = m->phys_page;
+       pa = i386_ptob(pn);
+       i = pdptidx(map, vaddr);
+
+       vm_object_lock(map->pm_obj_pdpt);
+#if 0 /* DEBUG */
+       if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i)) {
+         kprintf("pmap_expand_pdpt: obj_pdpt not empty, pmap 0x%x pm_obj_pdpt 0x%x vaddr 0x%llx i 0x%llx\n",
+                 map, map->pm_obj_pdpt, vaddr, i);
+       }
+#endif
+       vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i);
+
+       vm_page_lock_queues();
+       vm_page_wire(m);
+
+       vm_page_unlock_queues();
+       vm_object_unlock(map->pm_obj_pdpt);
+       inuse_ptepages_count++;
+       map->stats.resident_count++;
+       map->stats.wired_count++;
+
+       /*
+        *      Zero the page.
+        */
+       pmap_zero_page(pn);
+
+       PMAP_READ_LOCK(map, spl);
+       /*
+        *      See if someone else expanded us first
+        */
+       if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) {
+               PMAP_READ_UNLOCK(map, spl);
+               vm_object_lock(map->pm_obj_pdpt);
+               vm_page_lock_queues();
+               vm_page_free(m);
+               inuse_ptepages_count--;
+               map->stats.resident_count--;
+               map->stats.wired_count--;
+
+               vm_page_unlock_queues();
+               vm_object_unlock(map->pm_obj_pdpt);
+               return;
+       }
+
+       /*
+        *      Set the page directory entry for this page table.
+        *      If we have allocated more than one hardware page,
+        *      set several page directory entries.
+        */
+
+       pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
+
+       pmap_store_pte(pdptp, pa_to_pte(pa)
+                               | INTEL_PTE_VALID
+                               | INTEL_PTE_USER
+                               | INTEL_PTE_WRITE);
+
+       PMAP_READ_UNLOCK(map, spl);
+
+       return;
+
+}
+
+
+
+/*
+ *     Routine:        pmap_expand
+ *
+ *     Expands a pmap to be able to map the specified virtual address.
+ *
+ *     Allocates new virtual memory for the P0 or P1 portion of the
+ *     pmap, then re-maps the physical pages that were in the old
+ *     pmap to be in the new pmap.
+ *
+ *     Must be called with the pmap system and the pmap unlocked,
+ *     since these must be unlocked to use vm_allocate or vm_deallocate.
+ *     Thus it must be called in a loop that checks whether the map
+ *     has been expanded enough.
+ *     (We won't loop forever, since page tables aren't shrunk.)
+ */
+void
+pmap_expand(
+       pmap_t          map,
+       vm_map_offset_t vaddr)
+{
+       pt_entry_t              *pdp;
+       register vm_page_t      m;
+       register pmap_paddr_t   pa;
+       uint64_t                 i;
+       spl_t                   spl;
+       ppnum_t                 pn;
+
+       /*
+        * if not the kernel map (while we are still compat kernel mode)
+        * and we are 64 bit, propagate expand upwards
+        */
+
+       if (cpu_64bit && (map != kernel_pmap)) {
+         spl = splhigh();
+         while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
+           splx(spl);
+           pmap_expand_pdpt(map, vaddr); /* need room for another pde entry */
+           spl = splhigh();
+         }
+         splx(spl);
+       } else {
+         pdp = pmap_pde(map, vaddr);
+       }
+
+
+       /*
+        *      Allocate a VM page for the pde entries.
+        */
+       while ((m = vm_page_grab()) == VM_PAGE_NULL)
+               VM_PAGE_WAIT();
+
+       /*
+        *      put the page into the pmap's obj list so it
+        *      can be found later.
+        */
+       pn = m->phys_page;
+       pa = i386_ptob(pn);
+       i = pdeidx(map, vaddr);
+
+       vm_object_lock(map->pm_obj);
+#if 0 /* DEBUG */
+       if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i)) {
+         kprintf("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
+                 map, map->pm_obj, vaddr, i);
+       }
+#endif
+       vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i);
+
+       vm_page_lock_queues();
+       vm_page_wire(m);
+       inuse_ptepages_count++;
+
+       vm_page_unlock_queues();
+       vm_object_unlock(map->pm_obj);
+
+       /*
+        *      Zero the page.
+        */
+       pmap_zero_page(pn);
+
+       PMAP_READ_LOCK(map, spl);
+       /*
+        *      See if someone else expanded us first
+        */
+       if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) {
+               PMAP_READ_UNLOCK(map, spl);
+               vm_object_lock(map->pm_obj);
+
+               vm_page_lock_queues();
+               vm_page_free(m);
+               inuse_ptepages_count--;
+
+               vm_page_unlock_queues();
+               vm_object_unlock(map->pm_obj);
+               return;
+       }
+
+       pdp = pmap_pde(map, vaddr); /* refetch while locked */
+
+       /*
+        *      Set the page directory entry for this page table.
+        *      If we have allocated more than one hardware page,
+        *      set several page directory entries.
+        */
+
+       pmap_store_pte(pdp, pa_to_pte(pa)
+                               | INTEL_PTE_VALID
+                               | INTEL_PTE_USER
+                               | INTEL_PTE_WRITE);
+           
+
+       PMAP_READ_UNLOCK(map, spl);
+
+       return;
+}
+
+
+/*
+ * pmap_sync_page_data_phys(ppnum_t pa)
+ * 
+ * Invalidates all of the instruction cache on a physical page and
+ * pushes any dirty data from the data cache for the same physical page
+ * Not required in i386.
+ */
+void
+pmap_sync_page_data_phys(__unused ppnum_t pa)
+{
+       return;
+}
+
+/*
+ * pmap_sync_page_attributes_phys(ppnum_t pa)
+ * 
+ * Write back and invalidate all cachelines on a physical page.
+ */
+void
+pmap_sync_page_attributes_phys(ppnum_t pa)
+{
+       cache_flush_page_phys(pa);
 }
-#endif/*       0 */
 
 int    collect_ref;
 int    collect_unref;
@@ -2235,7 +2924,6 @@ pmap_collect(
 {
        register pt_entry_t     *pdp, *ptp;
        pt_entry_t              *eptp;
-       vm_offset_t             pa;
        int                     wired;
        spl_t                   spl;
 
@@ -2249,21 +2937,19 @@ pmap_collect(
         *      Garbage collect map.
         */
        PMAP_READ_LOCK(p, spl);
-       PMAP_FLUSH_TLBS();
 
-       for (pdp = p->dirbase;
-            pdp < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)];
-            pdp += ptes_per_vm_page)
+       for (pdp = (pt_entry_t *)p->dirbase;
+            pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
+            pdp++)
        {
-           if (*pdp & INTEL_PTE_VALID) 
+          if (*pdp & INTEL_PTE_VALID) {
              if(*pdp & INTEL_PTE_REF) {
-               *pdp &= ~INTEL_PTE_REF;
+               pmap_store_pte(pdp, *pdp & ~INTEL_PTE_REF);
                collect_ref++;
              } else {
                collect_unref++;
-               pa = pte_to_pa(*pdp);
-               ptp = (pt_entry_t *)phystokv(pa);
-               eptp = ptp + NPTES*ptes_per_vm_page;
+               ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase));
+               eptp = ptp + NPTEPG;
 
                /*
                 * If the pte page has any wired mappings, we cannot
@@ -2284,21 +2970,15 @@ pmap_collect(
                     * Remove the virtual addresses mapped by this pte page.
                     */
                    pmap_remove_range(p,
-                               pdetova(pdp - p->dirbase),
+                               pdetova(pdp - (pt_entry_t *)p->dirbase),
                                ptp,
                                eptp);
 
                    /*
                     * Invalidate the page directory pointer.
                     */
-                   {
-                       register int i = ptes_per_vm_page;
-                       register pt_entry_t *pdep = pdp;
-                       do {
-                           *pdep++ = 0;
-                       } while (--i > 0);
-                   }
-
+                   pmap_store_pte(pdp, 0x0);
+                
                    PMAP_READ_UNLOCK(p, spl);
 
                    /*
@@ -2307,82 +2987,40 @@ pmap_collect(
                    {
                        register vm_page_t m;
 
-                       vm_object_lock(pmap_object);
-                       m = vm_page_lookup(pmap_object, pa);
+                       vm_object_lock(p->pm_obj);
+                       m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]));
                        if (m == VM_PAGE_NULL)
                            panic("pmap_collect: pte page not in object");
                        vm_page_lock_queues();
                        vm_page_free(m);
                        inuse_ptepages_count--;
                        vm_page_unlock_queues();
-                       vm_object_unlock(pmap_object);
+                       vm_object_unlock(p->pm_obj);
                    }
 
                    PMAP_READ_LOCK(p, spl);
                }
-           }
+             }
+          }
        }
+       PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
+
        PMAP_READ_UNLOCK(p, spl);
        return;
 
 }
 
-/*
- *     Routine:        pmap_kernel
- *     Function:
- *             Returns the physical map handle for the kernel.
- */
-#if    0
-pmap_t
-pmap_kernel(void)
-{
-       return (kernel_pmap);
-}
-#endif/*       0 */
 
-/*
- *     pmap_zero_page zeros the specified (machine independent) page.
- *     See machine/phys.c or machine/phys.s for implementation.
- */
-#if    0
 void
-pmap_zero_page(
-       register vm_offset_t    phys)
+pmap_copy_page(src, dst)
+       ppnum_t src;
+       ppnum_t dst;
 {
-       register int    i;
-
-       assert(phys != vm_page_fictitious_addr);
-       i = PAGE_SIZE / INTEL_PGBYTES;
-       phys = intel_pfn(phys);
-
-       while (i--)
-               zero_phys(phys++);
+        bcopy_phys((addr64_t)i386_ptob(src),
+                  (addr64_t)i386_ptob(dst),
+                  PAGE_SIZE);
 }
-#endif/*       0 */
-
-/*
- *     pmap_copy_page copies the specified (machine independent) page.
- *     See machine/phys.c or machine/phys.s for implementation.
- */
-#if    0
-void
-pmap_copy_page(
-       vm_offset_t     src,
-       vm_offset_t     dst)
-{
-       int     i;
-
-       assert(src != vm_page_fictitious_addr);
-       assert(dst != vm_page_fictitious_addr);
-       i = PAGE_SIZE / INTEL_PGBYTES;
 
-       while (i--) {
-               copy_phys(intel_pfn(src), intel_pfn(dst));
-               src += INTEL_PGBYTES;
-               dst += INTEL_PGBYTES;
-       }
-}
-#endif/*       0 */
 
 /*
  *     Routine:        pmap_pageable
@@ -2400,13 +3038,13 @@ pmap_copy_page(
  */
 void
 pmap_pageable(
-       pmap_t          pmap,
-       vm_offset_t     start,
-       vm_offset_t     end,
-       boolean_t       pageable)
+       __unused pmap_t         pmap,
+       __unused vm_map_offset_t        start_addr,
+       __unused vm_map_offset_t        end_addr,
+       __unused boolean_t      pageable)
 {
 #ifdef lint
-       pmap++; start++; end++; pageable++;
+       pmap++; start_addr++; end_addr++; pageable++;
 #endif /* lint */
 }
 
@@ -2415,7 +3053,7 @@ pmap_pageable(
  */
 void
 phys_attribute_clear(
-       vm_offset_t     phys,
+       ppnum_t pn,
        int             bits)
 {
        pv_entry_t              pv_h;
@@ -2424,9 +3062,10 @@ phys_attribute_clear(
        int                     pai;
        register pmap_t         pmap;
        spl_t                   spl;
+       pmap_paddr_t            phys;
 
-       assert(phys != vm_page_fictitious_addr);
-       if (!valid_page(phys)) {
+       assert(pn != vm_page_fictitious_addr);
+       if (!valid_page(pn)) {
            /*
             *  Not a managed page.
             */
@@ -2439,7 +3078,7 @@ phys_attribute_clear(
         */
 
        PMAP_WRITE_LOCK(spl);
-
+       phys = i386_ptob(pn);
        pai = pa_index(phys);
        pv_h = pai_to_pvh(pai);
 
@@ -2461,35 +3100,27 @@ phys_attribute_clear(
                simple_lock(&pmap->lock);
 
                {
-                   register vm_offset_t va;
+                   register vm_map_offset_t va;
 
                    va = pv_e->va;
-                   pte = pmap_pte(pmap, va);
 
-#if    0
                    /*
-                    * Consistency checks.
+                    * first make sure any processor actively
+                    * using this pmap fluses its TLB state
                     */
-                   assert(*pte & INTEL_PTE_VALID);
-                   /* assert(pte_to_phys(*pte) == phys); */
-#endif
 
-                   /*
-                    * Invalidate TLBs for all CPUs using this mapping.
-                    */
-                   PMAP_INVALIDATE_PAGE(pmap, va);
-               }
+                   PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
 
                /*
                 * Clear modify or reference bits.
                 */
-               {
-                   register int        i = ptes_per_vm_page;
-                   do {
-                       *pte++ &= ~bits;
-                   } while (--i > 0);
+
+                   pte = pmap_pte(pmap, va);
+                   pmap_update_pte(pte, *pte, (*pte & ~bits));
+
                }
                simple_unlock(&pmap->lock);
+
            }
        }
 
@@ -2503,7 +3134,7 @@ phys_attribute_clear(
  */
 boolean_t
 phys_attribute_test(
-       vm_offset_t     phys,
+       ppnum_t pn,
        int             bits)
 {
        pv_entry_t              pv_h;
@@ -2512,25 +3143,35 @@ phys_attribute_test(
        int                     pai;
        register pmap_t         pmap;
        spl_t                   spl;
+       pmap_paddr_t            phys;
 
-       assert(phys != vm_page_fictitious_addr);
-       if (!valid_page(phys)) {
+       assert(pn != vm_page_fictitious_addr);
+       if (!valid_page(pn)) {
            /*
             *  Not a managed page.
             */
            return (FALSE);
        }
 
+       phys = i386_ptob(pn);
+       pai = pa_index(phys);
+       /*
+        * super fast check...  if bits already collected
+        * no need to take any locks...
+        * if not set, we need to recheck after taking
+        * the lock in case they got pulled in while
+        * we were waiting for the lock
+        */
+       if (pmap_phys_attributes[pai] & bits)
+           return (TRUE);
+       pv_h = pai_to_pvh(pai);
+
        /*
         *      Lock the pmap system first, since we will be checking
         *      several pmaps.
         */
-
        PMAP_WRITE_LOCK(spl);
 
-       pai = pa_index(phys);
-       pv_h = pai_to_pvh(pai);
-
        if (pmap_phys_attributes[pai] & bits) {
            PMAP_WRITE_UNLOCK(spl);
            return (TRUE);
@@ -2554,7 +3195,7 @@ phys_attribute_test(
                simple_lock(&pmap->lock);
 
                {
-                   register vm_offset_t va;
+                   register vm_map_offset_t va;
 
                    va = pv_e->va;
                    pte = pmap_pte(pmap, va);
@@ -2572,15 +3213,11 @@ phys_attribute_test(
                 * Check modify or reference bits.
                 */
                {
-                   register int        i = ptes_per_vm_page;
-
-                   do {
                        if (*pte++ & bits) {
                            simple_unlock(&pmap->lock);
                            PMAP_WRITE_UNLOCK(spl);
                            return (TRUE);
                        }
-                   } while (--i > 0);
                }
                simple_unlock(&pmap->lock);
            }
@@ -2594,13 +3231,14 @@ phys_attribute_test(
  */
 void
 phys_attribute_set(
-       vm_offset_t     phys,
+       ppnum_t pn,
        int             bits)
 {
        int                     spl;
+       pmap_paddr_t   phys;
 
-       assert(phys != vm_page_fictitious_addr);
-       if (!valid_page(phys)) {
+       assert(pn != vm_page_fictitious_addr);
+       if (!valid_page(pn)) {
            /*
             *  Not a managed page.
             */
@@ -2612,7 +3250,7 @@ phys_attribute_set(
         *      the phys attributes array.  Don't need to bother with
         *      ptes because the test routine looks here first.
         */
-
+       phys = i386_ptob(pn);
        PMAP_WRITE_LOCK(spl);
        pmap_phys_attributes[pa_index(phys)] |= bits;
        PMAP_WRITE_UNLOCK(spl);
@@ -2623,9 +3261,9 @@ phys_attribute_set(
  */
 
 void pmap_set_modify(
-       register vm_offset_t    phys)
+                    ppnum_t pn)
 {
-       phys_attribute_set(phys, PHYS_MODIFIED);
+       phys_attribute_set(pn, PHYS_MODIFIED);
 }
 
 /*
@@ -2634,9 +3272,9 @@ void pmap_set_modify(
 
 void
 pmap_clear_modify(
-       register vm_offset_t    phys)
+                 ppnum_t pn)
 {
-       phys_attribute_clear(phys, PHYS_MODIFIED);
+       phys_attribute_clear(pn, PHYS_MODIFIED);
 }
 
 /*
@@ -2648,9 +3286,9 @@ pmap_clear_modify(
 
 boolean_t
 pmap_is_modified(
-       register vm_offset_t    phys)
+                ppnum_t pn)
 {
-       return (phys_attribute_test(phys, PHYS_MODIFIED));
+       return (phys_attribute_test(pn, PHYS_MODIFIED));
 }
 
 /*
@@ -2661,9 +3299,15 @@ pmap_is_modified(
 
 void
 pmap_clear_reference(
-       vm_offset_t     phys)
+                    ppnum_t pn)
+{
+       phys_attribute_clear(pn, PHYS_REFERENCED);
+}
+
+void
+pmap_set_reference(ppnum_t pn)
 {
-       phys_attribute_clear(phys, PHYS_REFERENCED);
+       phys_attribute_set(pn, PHYS_REFERENCED);
 }
 
 /*
@@ -2675,9 +3319,36 @@ pmap_clear_reference(
 
 boolean_t
 pmap_is_referenced(
-       vm_offset_t     phys)
+                  ppnum_t pn)
 {
-       return (phys_attribute_test(phys, PHYS_REFERENCED));
+       return (phys_attribute_test(pn, PHYS_REFERENCED));
+}
+
+/*
+ * pmap_get_refmod(phys)
+ *  returns the referenced and modified bits of the specified
+ *  physical page.
+ */
+unsigned int
+pmap_get_refmod(ppnum_t pa)
+{
+       return (   ((phys_attribute_test(pa,   PHYS_MODIFIED))?   VM_MEM_MODIFIED : 0)
+                        | ((phys_attribute_test(pa, PHYS_REFERENCED))? VM_MEM_REFERENCED : 0));
+}
+
+/*
+ * pmap_clear_refmod(phys, mask)
+ *  clears the referenced and modified bits as specified by the mask
+ *  of the specified physical page.
+ */
+void
+pmap_clear_refmod(ppnum_t pa, unsigned int mask)
+{
+       unsigned int  x86Mask;
+
+       x86Mask = (   ((mask &   VM_MEM_MODIFIED)?   PHYS_MODIFIED : 0)
+                   | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
+       phys_attribute_clear(pa, x86Mask);
 }
 
 /*
@@ -2690,122 +3361,76 @@ pmap_is_referenced(
 void
 pmap_modify_pages(
        pmap_t          map,
-       vm_offset_t     s,
-       vm_offset_t     e)
+       vm_map_offset_t sva,
+       vm_map_offset_t eva)
 {
        spl_t                   spl;
        register pt_entry_t     *pde;
        register pt_entry_t     *spte, *epte;
-       vm_offset_t             l;
+       vm_map_offset_t         lva;
+       vm_map_offset_t         orig_sva;
 
        if (map == PMAP_NULL)
                return;
 
        PMAP_READ_LOCK(map, spl);
 
-       pde = pmap_pde(map, s);
-       while (s && s < e) {
-           l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
-           if (l > e)
-               l = e;
-           if (*pde & INTEL_PTE_VALID) {
-               spte = (pt_entry_t *)ptetokv(*pde);
-               if (l) {
-                  spte = &spte[ptenum(s)];
-                  epte = &spte[intel_btop(l-s)];
+       orig_sva = sva;
+       while (sva && sva < eva) {
+           lva = (sva + pde_mapped_size) & ~(pde_mapped_size-1);
+           if (lva > eva)
+               lva = eva;
+           pde = pmap_pde(map, sva);
+           if (pde && (*pde & INTEL_PTE_VALID)) {
+             spte = (pt_entry_t *)pmap_pte(map, (sva & ~(pde_mapped_size-1)));
+               if (lva) {
+                  spte = &spte[ptenum(sva)];
+                  epte = &spte[intel_btop(lva-sva)];
                } else {
-                  epte = &spte[intel_btop(PDE_MAPPED_SIZE)];
-                  spte = &spte[ptenum(s)];
+                  epte = &spte[intel_btop(pde_mapped_size)];
+                  spte = &spte[ptenum(sva)];
                }
                while (spte < epte) {
                    if (*spte & INTEL_PTE_VALID) {
-                       *spte |= (INTEL_PTE_MOD | INTEL_PTE_WRITE);
+                       pmap_store_pte(spte, *spte
+                                               | INTEL_PTE_MOD
+                                               | INTEL_PTE_WRITE);
                    }
                    spte++;
                }
            }
-           s = l;
+           sva = lva;
            pde++;
        }
-       PMAP_FLUSH_TLBS();
+       PMAP_UPDATE_TLBS(map, orig_sva, eva);
+
        PMAP_READ_UNLOCK(map, spl);
 }
 
 
 void 
-invalidate_icache(vm_offset_t addr, unsigned cnt, int phys)
+invalidate_icache(__unused vm_offset_t addr,
+                 __unused unsigned     cnt,
+                 __unused int          phys)
 {
        return;
 }
 void 
-flush_dcache(vm_offset_t addr, unsigned count, int phys)
+flush_dcache(__unused vm_offset_t      addr,
+            __unused unsigned          count,
+            __unused int               phys)
 {
        return;
 }
 
-#if    NCPUS > 1
-
-void inline
-pmap_wait_for_clear()
-{
-       register int            my_cpu;
-       spl_t                   s;
-       register pmap_t         my_pmap;
-
-       mp_disable_preemption();
-       my_cpu = cpu_number();
-       
-
-       my_pmap = real_pmap[my_cpu];
-
-       if (!(my_pmap && pmap_in_use(my_pmap, my_cpu)))
-               my_pmap = kernel_pmap;
-
-       /*
-        *      Raise spl to splhigh (above splip) to block out pmap_extract
-        *      from IO code (which would put this cpu back in the active
-        *      set).
-        */
-       s = splhigh();
-
-       /*
-        *      Wait for any pmap updates in progress, on either user
-        *      or kernel pmap.
-        */
-        while (*(volatile hw_lock_t)&my_pmap->lock.interlock ||
-         *(volatile hw_lock_t)&kernel_pmap->lock.interlock) {
-               continue;
-       }
-
-       splx(s);
-       mp_enable_preemption();
-}
-
-void
-pmap_flush_tlb_interrupt(void) {
-       pmap_wait_for_clear();
-
-       flush_tlb();
-}
-
-void
-pmap_reload_tlb_interrupt(void) {
-       pmap_wait_for_clear();
-
-       set_cr3(kernel_pmap->pdirbase);
-}
-
-       
-#endif /* NCPUS > 1 */
-
 #if    MACH_KDB
 
 /* show phys page mappings and attributes */
 
-extern void    db_show_page(vm_offset_t pa);
+extern void    db_show_page(pmap_paddr_t pa);
 
 void
-db_show_page(vm_offset_t pa)
+db_show_page(pmap_paddr_t pa)
 {
        pv_entry_t      pv_h;
        int             pai;
@@ -2842,7 +3467,7 @@ void
 db_kvtophys(
        vm_offset_t     vaddr)
 {
-       db_printf("0x%x", kvtophys(vaddr));
+       db_printf("0x%qx", kvtophys(vaddr));
 }
 
 /*
@@ -2853,7 +3478,7 @@ db_show_vaddrs(
        pt_entry_t      *dirbase)
 {
        pt_entry_t      *ptep, *pdep, tmp;
-       int             x, y, pdecnt, ptecnt;
+       unsigned int    x, y, pdecnt, ptecnt;
 
        if (dirbase == 0) {
                dirbase = kernel_pmap->dirbase;
@@ -2862,20 +3487,20 @@ db_show_vaddrs(
                db_printf("need a dirbase...\n");
                return;
        }
-       dirbase = (pt_entry_t *) ((unsigned long) dirbase & ~INTEL_OFFMASK);
+       dirbase = (pt_entry_t *) (int) ((unsigned long) dirbase & ~INTEL_OFFMASK);
 
        db_printf("dirbase: 0x%x\n", dirbase);
 
        pdecnt = ptecnt = 0;
        pdep = &dirbase[0];
-       for (y = 0; y < NPDES; y++, pdep++) {
+       for (y = 0; y < NPDEPG; y++, pdep++) {
                if (((tmp = *pdep) & INTEL_PTE_VALID) == 0) {
                        continue;
                }
                pdecnt++;
                ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK);
                db_printf("dir[%4d]: 0x%x\n", y, *pdep);
-               for (x = 0; x < NPTES; x++, ptep++) {
+               for (x = 0; x < NPTEPG; x++, ptep++) {
                        if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) {
                                continue;
                        }
@@ -2899,95 +3524,685 @@ db_show_vaddrs(
 
 int
 pmap_list_resident_pages(
-       register pmap_t         pmap,
-       register vm_offset_t    *listp,
-       register int            space)
+       __unused pmap_t         pmap,
+       __unused vm_offset_t    *listp,
+       __unused int            space)
 {
        return 0;
 }
 #endif /* MACH_VM_DEBUG */
 
-#ifdef MACH_BSD
+
+
+/* temporary workaround */
+boolean_t
+coredumpok(__unused vm_map_t map, __unused vm_offset_t va)
+{
+#if 0
+       pt_entry_t     *ptep;
+
+       ptep = pmap_pte(map->pmap, va);
+       if (0 == ptep)
+               return FALSE;
+       return ((*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED));
+#else
+       return TRUE;
+#endif
+}
+
+
+boolean_t
+phys_page_exists(
+                ppnum_t pn)
+{
+       pmap_paddr_t     phys;
+
+       assert(pn != vm_page_fictitious_addr);
+
+       if (!pmap_initialized)
+               return (TRUE);
+       phys = (pmap_paddr_t) i386_ptob(pn);
+       if (!pmap_valid_page(pn))
+               return (FALSE);
+
+       return TRUE;
+}
+
+void
+mapping_free_prime()
+{
+       int             i;
+       pv_entry_t      pv_e;
+
+       for (i = 0; i < (5 * PV_ALLOC_CHUNK); i++) {
+               pv_e = (pv_entry_t) zalloc(pv_list_zone);
+               PV_FREE(pv_e);
+       }
+}
+
+void
+mapping_adjust()
+{
+       pv_entry_t      pv_e;
+       int             i;
+       int             spl;
+
+       if (mapping_adjust_call == NULL) {
+               thread_call_setup(&mapping_adjust_call_data,
+                                 (thread_call_func_t) mapping_adjust,
+                                 (thread_call_param_t) NULL);
+               mapping_adjust_call = &mapping_adjust_call_data;
+       }
+       /* XXX  rethink best way to do locking here */
+       if (pv_free_count < PV_LOW_WATER_MARK) {
+               for (i = 0; i < PV_ALLOC_CHUNK; i++) {
+                       pv_e = (pv_entry_t) zalloc(pv_list_zone);
+                       SPLVM(spl);
+                       PV_FREE(pv_e);
+                       SPLX(spl);
+               }
+       }
+       mappingrecurse = 0;
+}
+
+void
+pmap_commpage32_init(vm_offset_t kernel_commpage, vm_offset_t user_commpage, int cnt)
+{
+  int i;
+  pt_entry_t *opte, *npte;
+  pt_entry_t pte;
+
+
+  for (i = 0; i < cnt; i++) {
+    opte = pmap_pte(kernel_pmap, (vm_map_offset_t)kernel_commpage);
+    if (0 == opte) panic("kernel_commpage");
+    pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
+    pte &= ~INTEL_PTE_WRITE; // ensure read only
+    npte = pmap_pte(kernel_pmap, (vm_map_offset_t)user_commpage);
+    if (0 == npte) panic("user_commpage");
+    pmap_store_pte(npte, pte);
+    kernel_commpage += INTEL_PGBYTES;
+    user_commpage += INTEL_PGBYTES;
+  }
+}
+
+#define PMAP_COMMPAGE64_CNT  (_COMM_PAGE64_AREA_USED/PAGE_SIZE)
+pt_entry_t pmap_commpage64_ptes[PMAP_COMMPAGE64_CNT];
+
+void
+pmap_commpage64_init(vm_offset_t kernel_commpage, __unused vm_map_offset_t user_commpage, int cnt)
+{
+  spl_t s;
+  int i;
+  pt_entry_t *kptep;
+
+  s = splhigh();
+  for (i = 0; i< cnt; i++) {
+    kptep = pmap_pte(kernel_pmap, (uint64_t)kernel_commpage + (i*PAGE_SIZE));
+    if ((0 == kptep) || (0 == (*kptep & INTEL_PTE_VALID))) panic("pmap_commpage64_init pte");
+    pmap_commpage64_ptes[i] = ((*kptep & ~INTEL_PTE_WRITE) | INTEL_PTE_USER);
+  }
+  splx(s);
+
+}
+
+void
+pmap_map_sharedpage(__unused task_t task, pmap_t p)
+{
+  pt_entry_t *ptep;
+  spl_t s;
+  int i;
+
+  if (!p->pm_64bit) return;
+  /* setup high 64 bit commpage */
+  s = splhigh();  
+  while ((ptep = pmap_pte(p, (uint64_t)_COMM_PAGE64_BASE_ADDRESS)) == PD_ENTRY_NULL) {
+    splx(s);
+    pmap_expand(p, (uint64_t)_COMM_PAGE64_BASE_ADDRESS);
+    s = splhigh();
+  }
+
+  for (i = 0; i< PMAP_COMMPAGE64_CNT; i++) {
+    ptep = pmap_pte(p, (uint64_t)_COMM_PAGE64_BASE_ADDRESS + (i*PAGE_SIZE));
+    if (0 == ptep) panic("pmap_map_sharedpage");
+    pmap_store_pte(ptep, pmap_commpage64_ptes[i]);
+  }
+  splx(s);
+
+}
+
+void
+pmap_unmap_sharedpage(pmap_t pmap)
+{
+  spl_t s;
+  pt_entry_t *ptep;
+  int i;
+
+  if (!pmap->pm_64bit) return;
+  s = splhigh();
+  for (i = 0; i< PMAP_COMMPAGE64_CNT; i++) {
+    ptep = pmap_pte(pmap, (uint64_t)_COMM_PAGE64_BASE_ADDRESS + (i*PAGE_SIZE));
+  if (ptep) pmap_store_pte(ptep, 0);
+  }
+  splx(s);
+}
+
+static cpu_pmap_t              cpu_pmap_master;
+
+struct cpu_pmap *
+pmap_cpu_alloc(boolean_t is_boot_cpu)
+{
+       int                     ret;
+       int                     i;
+       cpu_pmap_t              *cp;
+       vm_offset_t             address;
+       vm_map_address_t        mapaddr;
+       vm_map_entry_t          entry;
+       pt_entry_t              *pte;
+       
+       if (is_boot_cpu) {
+               cp = &cpu_pmap_master;
+       } else {
+               /*
+                * The per-cpu pmap data structure itself.
+                */
+               ret = kmem_alloc(kernel_map,
+                                (vm_offset_t *) &cp, sizeof(cpu_pmap_t));
+               if (ret != KERN_SUCCESS) {
+                       printf("pmap_cpu_alloc() failed ret=%d\n", ret);
+                       return NULL;
+               }
+               bzero((void *)cp, sizeof(cpu_pmap_t));
+
+               /*
+                * The temporary windows used for copy/zero - see loose_ends.c
+                */
+               ret = vm_map_find_space(kernel_map,
+                   &mapaddr, PMAP_NWINDOWS*PAGE_SIZE, (vm_map_offset_t)0, 0, &entry);
+               if (ret != KERN_SUCCESS) {
+                       printf("pmap_cpu_alloc() "
+                               "vm_map_find_space ret=%d\n", ret);
+                       pmap_cpu_free(cp);
+                       return NULL;
+               }
+               address = (vm_offset_t)mapaddr;
+
+               for (i = 0; i < PMAP_NWINDOWS; i++, address += PAGE_SIZE) {
+                       while ((pte = pmap_pte(kernel_pmap, (vm_map_offset_t)address)) == 0)
+                               pmap_expand(kernel_pmap, (vm_map_offset_t)address);
+                       * (int *) pte = 0; 
+                       cp->mapwindow[i].prv_CADDR = (caddr_t) address;
+                       cp->mapwindow[i].prv_CMAP = pte;
+               }
+               vm_map_unlock(kernel_map);
+       }
+
+       cp->pdpt_window_index = PMAP_PDPT_FIRST_WINDOW;
+       cp->pde_window_index = PMAP_PDE_FIRST_WINDOW;
+       cp->pte_window_index = PMAP_PTE_FIRST_WINDOW;
+
+       return cp;
+}
+
+void
+pmap_cpu_free(struct cpu_pmap *cp)
+{
+       if (cp != NULL && cp != &cpu_pmap_master) {
+               kfree((void *) cp, sizeof(cpu_pmap_t));
+       }
+}
+
+
+mapwindow_t *
+pmap_get_mapwindow(pt_entry_t pentry)
+{
+    mapwindow_t *mp;
+    int i;
+    boolean_t  istate;
+
+    /*
+     * can be called from hardware interrupt context
+     * so we need to protect the lookup process
+     */
+    istate = ml_set_interrupts_enabled(FALSE);
+
+    /*
+     * Note: 0th map reserved for pmap_pte()
+     */
+    for (i = PMAP_NWINDOWS_FIRSTFREE; i < PMAP_NWINDOWS; i++) {
+            mp = &current_cpu_datap()->cpu_pmap->mapwindow[i];
+
+           if (*mp->prv_CMAP == 0) {
+                   *mp->prv_CMAP = pentry;
+                   break;
+           }
+    }
+    if (i >= PMAP_NWINDOWS)
+            mp = NULL;
+    (void) ml_set_interrupts_enabled(istate);
+    
+    return (mp);
+}
+
+
 /*
- * pmap_pagemove
+ *     kern_return_t pmap_nest(grand, subord, vstart, size)
+ *
+ *     grand  = the pmap that we will nest subord into
+ *     subord = the pmap that goes into the grand
+ *     vstart  = start of range in pmap to be inserted
+ *     nstart  = start of range in pmap nested pmap
+ *     size   = Size of nest area (up to 16TB)
+ *
+ *     Inserts a pmap into another.  This is used to implement shared segments.
  *
- * BSD support routine to reassign virtual addresses.
+ *      on x86 this is very limited right now.  must be exactly 1 segment.
+ *
+ *     Note that we depend upon higher level VM locks to insure that things don't change while
+ *     we are doing this.  For example, VM should not be doing any pmap enters while it is nesting
+ *     or do 2 nests at once.
  */
 
+
+kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size) {
+               
+        vm_map_offset_t        vaddr, nvaddr;
+       pd_entry_t      *pde,*npde;
+       unsigned int    i, need_flush;
+       unsigned int    num_pde;
+       spl_t           s;
+
+       // do validity tests
+
+       if(size & 0x0FFFFFFFULL) return KERN_INVALID_VALUE;     /* We can only do this for multiples of 256MB */
+       if((size >> 28) > 65536)  return KERN_INVALID_VALUE;    /* Max size we can nest is 16TB */
+       if(vstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE;   /* We can only do this aligned to 256MB */
+       if(nstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE;   /* We can only do this aligned to 256MB */
+       if(size == 0) {    
+               panic("pmap_nest: size is invalid - %016llX\n", size);
+       }
+       if ((size >> 28) != 1) panic("pmap_nest: size 0x%llx must be 0x%x", size, NBPDE);
+
+       subord->pm_shared = TRUE;
+
+       // prepopulate subord pmap pde's if necessary
+
+       if (cpu_64bit) {
+         s = splhigh();
+         while (PD_ENTRY_NULL == (npde = pmap_pde(subord, nstart))) {
+           splx(s);
+           pmap_expand(subord, nstart);
+           s = splhigh();
+         }
+         splx(s);
+       }
+
+       PMAP_READ_LOCK(subord,s);
+       nvaddr = (vm_map_offset_t)nstart;
+       need_flush = 0;
+       num_pde = size >> PDESHIFT;
+
+       for (i=0;i<num_pde;i++) {
+         npde = pmap_pde(subord, nvaddr);
+         if ((0 == npde) || (*npde++ & INTEL_PTE_VALID) == 0) {
+           PMAP_READ_UNLOCK(subord,s);
+           pmap_expand(subord, nvaddr); // pmap_expand handles races
+           PMAP_READ_LOCK(subord,s);
+           need_flush++;
+         }
+         nvaddr += NBPDE;
+       }
+
+       if (need_flush) {
+         nvaddr = (vm_map_offset_t)nstart;
+         PMAP_UPDATE_TLBS(subord, nvaddr, nvaddr + (1 << 28) -1 );
+       }
+       PMAP_READ_UNLOCK(subord,s);
+
+       // copy pde's from subord pmap into grand pmap
+
+       if (cpu_64bit) {
+         s = splhigh();
+         while (PD_ENTRY_NULL == (pde = pmap_pde(grand, vstart))) {
+           splx(s);
+           pmap_expand(grand, vstart);
+           s = splhigh();
+         }
+         splx(s);
+       }
+
+       PMAP_READ_LOCK(grand,s);
+       vaddr = (vm_map_offset_t)vstart;
+       for (i=0;i<num_pde;i++,pde++) {
+         pd_entry_t tpde;
+         npde = pmap_pde(subord, nstart);
+         if (npde == 0) panic("pmap_nest: no npde, subord 0x%x nstart 0x%llx", subord, nstart);
+         tpde = *npde;
+         nstart += NBPDE;
+         pde = pmap_pde(grand, vaddr);
+         if (pde == 0) panic("pmap_nest: no pde, grand  0x%x vaddr 0x%llx", grand, vaddr);
+         vaddr += NBPDE;
+         pmap_store_pte(pde, tpde);
+       }
+       PMAP_UPDATE_TLBS(grand, vaddr, vaddr + (1 << 28) -1 );
+
+       PMAP_READ_UNLOCK(grand,s);
+
+       return KERN_SUCCESS;
+}
+
+/*
+ *     kern_return_t pmap_unnest(grand, vaddr)
+ *
+ *     grand  = the pmap that we will nest subord into
+ *     vaddr  = start of range in pmap to be unnested
+ *
+ *     Removes a pmap from another.  This is used to implement shared segments.
+ *     On the current PPC processors, this is limited to segment (256MB) aligned
+ *     segment sized ranges.
+ */
+
+kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr) {
+                       
+       spl_t s;
+       pd_entry_t *pde;
+       unsigned int i;
+       unsigned int num_pde;
+
+       PMAP_READ_LOCK(grand,s);
+
+       // invalidate all pdes for segment at vaddr in pmap grand
+
+       num_pde = (1<<28) >> PDESHIFT;
+
+       for (i=0;i<num_pde;i++,pde++) {
+         pde = pmap_pde(grand, (vm_map_offset_t)vaddr);
+         if (pde == 0) panic("pmap_unnest: no pde, grand 0x%x vaddr 0x%llx\n", grand, vaddr);
+         pmap_store_pte(pde, (pd_entry_t)0);
+         vaddr += NBPDE;
+       }
+       PMAP_UPDATE_TLBS(grand, vaddr, vaddr + (1<<28) -1 );
+
+       PMAP_READ_UNLOCK(grand,s);
+               
+       return KERN_SUCCESS;                                                            /* Bye, bye, butterfly... */
+}
+
+void
+pmap_switch(pmap_t tpmap)
+{
+        spl_t  s;
+       int     my_cpu;
+
+       s = splhigh();          /* Make sure interruptions are disabled */
+       my_cpu = cpu_number();
+
+       set_dirbase(tpmap, my_cpu);
+
+       splx(s);
+}
+
+
+/*
+ * disable no-execute capability on
+ * the specified pmap
+ */
+void pmap_disable_NX(pmap_t pmap) {
+  
+        pmap->nx_enabled = 0;
+}
+
 void
-pmap_movepage(unsigned long from, unsigned long to, vm_size_t size)
+pt_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
+                 vm_size_t *alloc_size, int *collectable, int *exhaustable)
 {
-       spl_t   spl;
-       pt_entry_t      *pte, saved_pte;
-       /* Lock the kernel map */
+        *count      = inuse_ptepages_count;
+       *cur_size   = PAGE_SIZE * inuse_ptepages_count;
+       *max_size   = PAGE_SIZE * (inuse_ptepages_count + vm_page_inactive_count + vm_page_active_count + vm_page_free_count);
+       *elem_size  = PAGE_SIZE;
+       *alloc_size = PAGE_SIZE;
+
+       *collectable = 1;
+       *exhaustable = 0;
+}
 
+vm_offset_t pmap_cpu_high_map_vaddr(int cpu, enum high_cpu_types e)
+{
+  enum high_fixed_addresses a;
+  a = e + HIGH_CPU_END * cpu;
+  return pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a);
+}
+
+vm_offset_t pmap_high_map_vaddr(enum high_cpu_types e)
+{
+  return pmap_cpu_high_map_vaddr(cpu_number(), e);
+}
+
+vm_offset_t pmap_high_map(pt_entry_t pte, enum high_cpu_types e)
+{
+  enum high_fixed_addresses a;
+  vm_offset_t vaddr;
+
+  a = e + HIGH_CPU_END * cpu_number();
+  vaddr = (vm_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a);
+  *(pte_unique_base + a) = pte;
 
-       while (size > 0) {
-               PMAP_READ_LOCK(kernel_pmap, spl);
-               pte = pmap_pte(kernel_pmap, from);
-               if (pte == NULL)
-                       panic("pmap_pagemove from pte NULL");
-               saved_pte = *pte;
-               PMAP_READ_UNLOCK(kernel_pmap, spl);
+  /* TLB flush for this page for this  cpu */
+  invlpg((uintptr_t)vaddr);
 
-               pmap_enter(kernel_pmap, to, i386_trunc_page(*pte),
-                       VM_PROT_READ|VM_PROT_WRITE, 0, *pte & INTEL_PTE_WIRED);
+  return  vaddr;
+}
 
-               pmap_remove(kernel_pmap, from, from+PAGE_SIZE);
 
-               PMAP_READ_LOCK(kernel_pmap, spl);
-               pte = pmap_pte(kernel_pmap, to);
-               if (pte == NULL)
-                       panic("pmap_pagemove 'to' pte NULL");
+/*
+ * Called with pmap locked, we:
+ *  - scan through per-cpu data to see which other cpus need to flush
+ *  - send an IPI to each non-idle cpu to be flushed
+ *  - wait for all to signal back that they are inactive or we see that
+ *    they are in an interrupt handler or at a safe point
+ *  - flush the local tlb is active for this pmap
+ *  - return ... the caller will unlock the pmap
+ */
+void
+pmap_flush_tlbs(pmap_t pmap)
+{
+       unsigned int    cpu;
+       unsigned int    cpu_bit;
+       cpu_set         cpus_to_signal;
+       unsigned int    my_cpu = cpu_number();
+       pmap_paddr_t    pmap_cr3 = pmap->pm_cr3;
+       boolean_t       flush_self = FALSE;
+       uint64_t        deadline;
 
-               *pte = saved_pte;
-               PMAP_READ_UNLOCK(kernel_pmap, spl);
+       assert(!ml_get_interrupts_enabled());
 
-               from += PAGE_SIZE;
-               to += PAGE_SIZE;
-               size -= PAGE_SIZE;
+       /*
+        * Scan other cpus for matching active or task CR3.
+        * For idle cpus (with no active map) we mark them invalid but
+        * don't signal -- they'll check as they go busy.
+        * Note: for the kernel pmap we look for 64-bit shared address maps.
+        */
+       cpus_to_signal = 0;
+       for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+               if (!cpu_datap(cpu)->cpu_running)
+                       continue;
+               if ((cpu_datap(cpu)->cpu_task_cr3   == pmap_cr3) ||
+                   (CPU_GET_ACTIVE_CR3(cpu) == pmap_cr3) ||
+                   (pmap->pm_shared) ||
+                   ((pmap == kernel_pmap) &&
+                    (!CPU_CR3_IS_ACTIVE(cpu) ||
+                     cpu_datap(cpu)->cpu_task_map == TASK_MAP_64BIT_SHARED))) {
+                       if (cpu == my_cpu) {
+                               flush_self = TRUE;
+                               continue;
+                       }
+                       cpu_datap(cpu)->cpu_tlb_invalid = TRUE;
+                       __asm__ volatile("mfence");
+
+                       if (CPU_CR3_IS_ACTIVE(cpu)) {
+                               cpus_to_signal |= cpu_bit;
+                               i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
+                       }
+               }
        }
 
-       /* Get the processors to update the TLBs */
-       PMAP_FLUSH_TLBS();
+       if (cpus_to_signal) {
+               KERNEL_DEBUG(0xef800024 | DBG_FUNC_START, cpus_to_signal, 0, 0, 0, 0);
+
+               deadline = mach_absolute_time() + LockTimeOut;
+               /*
+                * Wait for those other cpus to acknowledge
+                */
+               for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+                       while ((cpus_to_signal & cpu_bit) != 0) {
+                               if (!cpu_datap(cpu)->cpu_running ||
+                                   cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
+                                   !CPU_CR3_IS_ACTIVE(cpu)) {
+                                       cpus_to_signal &= ~cpu_bit;
+                                       break;
+                               }
+                               if (mach_absolute_time() > deadline)
+                                       panic("pmap_flush_tlbs() "
+                                             "timeout pmap=%p cpus_to_signal=%p",
+                                             pmap, cpus_to_signal);
+                               cpu_pause();
+                       }
+                       if (cpus_to_signal == 0)
+                               break;
+               }
+               KERNEL_DEBUG(0xef800024 | DBG_FUNC_END, cpus_to_signal, 0, 0, 0, 0);
+       }
+
+       /*
+        * Flush local tlb if required.
+        * We need this flush even if the pmap being changed
+        * is the user map... in case we do a copyin/out
+        * before returning to user mode.
+        */
+       if (flush_self)
+               flush_tlb();
 
 }
 
-kern_return_t bmapvideo(vm_offset_t *info);
-kern_return_t bmapvideo(vm_offset_t *info) {
+void
+process_pmap_updates(void)
+{
+       flush_tlb();
 
-       extern struct vc_info vinfo;
-#ifdef NOTIMPLEMENTED
-       (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info));    /* Copy out the video info */
-#endif
-       return KERN_SUCCESS;
+       current_cpu_datap()->cpu_tlb_invalid = FALSE;
+       __asm__ volatile("mfence");
 }
 
-kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr);
-kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) {
-       
-#ifdef NOTIMPLEMENTED
-       pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr);       /* Map it in */
-#endif
-       return KERN_SUCCESS;
+void
+pmap_update_interrupt(void)
+{
+        KERNEL_DEBUG(0xef800028 | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+       assert(!ml_get_interrupts_enabled());
+
+       process_pmap_updates();
+
+        KERNEL_DEBUG(0xef800028 | DBG_FUNC_END, 0, 0, 0, 0, 0);
 }
 
-kern_return_t bmapmapr(vm_offset_t va);
-kern_return_t bmapmapr(vm_offset_t va) {
-       
-#ifdef NOTIMPLEMENTED
-       mapping_remove(current_act()->task->map->pmap, va);     /* Remove map */
-#endif
-       return KERN_SUCCESS;
+
+unsigned int pmap_cache_attributes(ppnum_t pn) {
+
+       if (!pmap_valid_page(pn))
+               return (VM_WIMG_IO);
+
+       return (VM_WIMG_COPYBACK);
 }
-#endif
 
-/* temporary workaround */
-boolean_t
-coredumpok(vm_map_t map, vm_offset_t va)
+#ifdef PMAP_DEBUG
+void
+pmap_dump(pmap_t p)
 {
-  pt_entry_t *ptep;
-  ptep = pmap_pte(map->pmap, va);
-  if (0 == ptep) return FALSE;
-  return ((*ptep & (INTEL_PTE_NCACHE|INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE|INTEL_PTE_WIRED));
+  int i;
+
+  kprintf("pmap 0x%x\n",p);
+
+  kprintf("  pm_cr3 0x%llx\n",p->pm_cr3);
+  kprintf("  pm_pml4 0x%x\n",p->pm_pml4);
+  kprintf("  pm_pdpt 0x%x\n",p->pm_pdpt);
+
+  kprintf("    pml4[0] 0x%llx\n",*p->pm_pml4);
+  for (i=0;i<8;i++)
+    kprintf("    pdpt[%d] 0x%llx\n",i, p->pm_pdpt[i]);
 }
+
+void pmap_dump_wrap(void)
+{
+  pmap_dump(current_cpu_datap()->cpu_active_thread->task->map->pmap);
+}
+
+void
+dump_4GB_pdpt(pmap_t p)
+{
+       int             spl;
+       pdpt_entry_t    *user_pdptp;
+       pdpt_entry_t    *kern_pdptp;
+       pdpt_entry_t    *pml4p;
+
+       spl = splhigh();
+       while ((user_pdptp = pmap64_pdpt(p, 0x0)) == PDPT_ENTRY_NULL) {
+               splx(spl);
+               pmap_expand_pml4(p, 0x0);
+               spl = splhigh();
+       }
+       kern_pdptp = kernel_pmap->pm_pdpt;
+       if (kern_pdptp == NULL)
+               panic("kern_pdptp == NULL");
+       kprintf("dump_4GB_pdpt(%p)\n"
+               "kern_pdptp=%p (phys=0x%016llx)\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "user_pdptp=%p (phys=0x%016llx)\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n",
+               p, kern_pdptp, kvtophys(kern_pdptp),
+               kern_pdptp+0, *(kern_pdptp+0),
+               kern_pdptp+1, *(kern_pdptp+1),
+               kern_pdptp+2, *(kern_pdptp+2),
+               kern_pdptp+3, *(kern_pdptp+3),
+               kern_pdptp+4, *(kern_pdptp+4),
+               user_pdptp, kvtophys(user_pdptp),
+               user_pdptp+0, *(user_pdptp+0),
+               user_pdptp+1, *(user_pdptp+1),
+               user_pdptp+2, *(user_pdptp+2),
+               user_pdptp+3, *(user_pdptp+3),
+               user_pdptp+4, *(user_pdptp+4));
+       kprintf("user pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n",
+               p->pm_cr3, p->pm_hold, p->pm_pml4);
+       pml4p = (pdpt_entry_t *)p->pm_hold;
+       if (pml4p == NULL)
+               panic("user pml4p == NULL");
+       kprintf("\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n",
+               pml4p+0, *(pml4p),
+               pml4p+KERNEL_UBER_PML4_INDEX, *(pml4p+KERNEL_UBER_PML4_INDEX));
+       kprintf("kern pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n",
+               kernel_pmap->pm_cr3, kernel_pmap->pm_hold, kernel_pmap->pm_pml4);
+       pml4p = (pdpt_entry_t *)kernel_pmap->pm_hold;
+       if (pml4p == NULL)
+               panic("kern pml4p == NULL");
+       kprintf("\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n",
+               pml4p+0, *(pml4p),
+               pml4p+511, *(pml4p+511));
+       splx(spl);
+}
+
+void dump_4GB_pdpt_thread(thread_t tp)
+{
+       dump_4GB_pdpt(tp->map->pmap);
+}
+
+
+#endif