]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/pmap.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.c
index e43dac464f457e5ed6a7dae88dcfb65806864467..36dae2f3ef138edd719cd43106ceea56865a6a75 100644 (file)
@@ -1,23 +1,29 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
 #include <mach_kdb.h>
 #include <mach_ldebug.h>
 
+#include <libkern/OSAtomic.h>
+
 #include <mach/machine/vm_types.h>
 
 #include <mach/boolean.h>
 #include <kern/thread.h>
 #include <kern/zalloc.h>
+#include <kern/queue.h>
 
 #include <kern/lock.h>
 #include <kern/kalloc.h>
 #include <i386/cpu_number.h>
 #include <i386/machine_cpu.h>
 #include <i386/mp_slave_boot.h>
+#include <i386/seg.h>
+#include <i386/serial_io.h>
+#include <i386/cpu_capabilities.h>
+#include <i386/machine_routines.h>
+#include <i386/proc_reg.h>
+#include <i386/tsc.h>
 
 #if    MACH_KDB
 #include <ddb/db_command.h>
 #include <ddb/db_print.h>
 #endif /* MACH_KDB */
 
-#include <kern/xpr.h>
-
 #include <vm/vm_protos.h>
 
 #include <i386/mp.h>
+#include <i386/mp_desc.h>
+
+#include <sys/kdebug.h>
+
+/* #define DEBUGINTERRUPTS 1  uncomment to ensure pmap callers have interrupts enabled */
+#ifdef DEBUGINTERRUPTS
+#define pmap_intr_assert() {if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) panic("pmap interrupt assert %s, %d",__FILE__, __LINE__);}
+#else
+#define pmap_intr_assert()
+#endif
+
+#ifdef IWANTTODEBUG
+#undef DEBUG
+#define DEBUG 1
+#define POSTCODE_DELAY 1
+#include <i386/postcode.h>
+#endif /* IWANTTODEBUG */
+
+//#define PMAP_TRACES 1
+#ifdef PMAP_TRACES
+boolean_t      pmap_trace = FALSE;
+#define PMAP_TRACE(x,a,b,c,d,e)                                                \
+       if (pmap_trace) {                                               \
+               KERNEL_DEBUG_CONSTANT(x,a,b,c,d,e);                     \
+       }
+#else
+#define PMAP_TRACE(x,a,b,c,d,e)        KERNEL_DEBUG(x,a,b,c,d,e)
+#endif /* PMAP_TRACES */
 
 /*
  * Forward declarations for internal functions.
  */
-void           pmap_expand(
+void           pmap_expand_pml4(
+                       pmap_t          map,
+                       vm_map_offset_t v);
+
+void           pmap_expand_pdpt(
                        pmap_t          map,
-                       vm_offset_t     v);
+                       vm_map_offset_t v);
 
-extern void    pmap_remove_range(
+void   pmap_remove_range(
                        pmap_t          pmap,
-                       vm_offset_t     va,
+                       vm_map_offset_t va,
                        pt_entry_t      *spte,
                        pt_entry_t      *epte);
 
 void           phys_attribute_clear(
-                       ppnum_t phys,
+                       ppnum_t         phys,
                        int             bits);
 
-boolean_t      phys_attribute_test(
-                       ppnum_t phys,
+int            phys_attribute_test(
+                       ppnum_t         phys,
                        int             bits);
 
 void           phys_attribute_set(
-                       ppnum_t phys,
+                       ppnum_t         phys,
                        int             bits);
 
-void           pmap_growkernel(
-                       vm_offset_t addr);
-
 void           pmap_set_reference(
                        ppnum_t pn);
 
@@ -166,24 +208,35 @@ void              pmap_movepage(
                        unsigned long   to,
                        vm_size_t       size);
 
-pt_entry_t *   pmap_mapgetpte(
-                       vm_map_t        map,
-                       vm_offset_t     v);
-
 boolean_t      phys_page_exists(
                        ppnum_t pn);
 
-#ifndef        set_dirbase
-void           set_dirbase(vm_offset_t dirbase);
-#endif /* set_dirbase */
+
+#ifdef PMAP_DEBUG
+void dump_pmap(pmap_t);
+void dump_4GB_pdpt(pmap_t p);
+void dump_4GB_pdpt_thread(thread_t tp);
+#endif
 
 #define        iswired(pte)    ((pte) & INTEL_PTE_WIRED)
 
-#define        WRITE_PTE(pte_p, pte_entry)             *(pte_p) = (pte_entry);
-#define        WRITE_PTE_FAST(pte_p, pte_entry)        *(pte_p) = (pte_entry);
+int nx_enabled = 1;                    /* enable no-execute protection */
+int allow_data_exec  = VM_ABI_32;      /* 32-bit apps may execute data by default, 64-bit apps may not */
+int allow_stack_exec = 0;              /* No apps may execute from the stack by default */
+
+int cpu_64bit  = 0;
+
+/*
+ * when spinning through pmap_remove
+ * ensure that we don't spend too much
+ * time with preemption disabled.
+ * I'm setting the current threshold
+ * to 20us
+ */
+#define MAX_PREEMPTION_LATENCY_NS 20000
+
+uint64_t max_preemption_latency_tsc = 0;
 
-#define value_64bit(value)  ((value) & 0xFFFFFFFF00000000LL)
-#define low32(x) ((unsigned int)((x) & 0x00000000ffffffffLL))
 
 /*
  *     Private data structures.
@@ -192,59 +245,204 @@ void             set_dirbase(vm_offset_t dirbase);
 /*
  *     For each vm_page_t, there is a list of all currently
  *     valid virtual mappings of that page.  An entry is
- *     a pv_entry_t; the list is the pv_table.
+ *     a pv_rooted_entry_t; the list is the pv_table.
+ *
+ *      N.B.  with the new combo rooted/hashed scheme it is
+ *      only possibly to remove individual non-rooted entries
+ *      if they are found via the hashed chains as there is no
+ *      way to unlink the singly linked hashed entries if navigated to
+ *      via the queue list off the rooted entries.  Think of it as
+ *      hash/walk/pull, keeping track of the prev pointer while walking
+ *      the singly linked hash list.  All of this is to save memory and
+ *      keep both types of pv_entries as small as possible.
  */
 
+/*
+
+PV HASHING Changes - JK 1/2007
+
+Pve's establish physical to virtual mappings.  These are used for aliasing of a 
+physical page to (potentially many) virtual addresses within pmaps. In the previous 
+implementation the structure of the pv_entries (each 16 bytes in size) was
+
 typedef struct pv_entry {
-       struct pv_entry *next;          /* next pv_entry */
-       pmap_t          pmap;           /* pmap where mapping lies */
-       vm_offset_t     va;             /* virtual address for mapping */
+    struct pv_entry_t    next;
+    pmap_t                    pmap;
+    vm_map_offset_t   va;
 } *pv_entry_t;
 
-#define PV_ENTRY_NULL  ((pv_entry_t) 0)
+An initial array of these is created at boot time, one per physical page of memory, 
+indexed by the physical page number. Additionally, a pool of entries is created from a 
+pv_zone to be used as needed by pmap_enter() when it is creating new mappings.  
+Originally, we kept this pool around because the code in pmap_enter() was unable to 
+block if it needed an entry and none were available - we'd panic.  Some time ago I 
+restructured the pmap_enter() code so that for user pmaps it can block while zalloc'ing 
+a pv structure and restart, removing a panic from the code (in the case of the kernel 
+pmap we cannot block and still panic, so, we keep a separate hot pool for use only on 
+kernel pmaps).  The pool has not been removed since there is a large performance gain 
+keeping freed pv's around for reuse and not suffering the overhead of zalloc for every new pv we need.
+
+As pmap_enter() created new mappings it linked the new pve's for them off the fixed 
+pv array for that ppn (off the next pointer).  These pve's are accessed for several 
+operations, one of them being address space teardown.  In that case, we basically do this
+
+       for (every page/pte in the space) {
+               calc pve_ptr from the ppn in the pte
+               for (every pv in the list for the ppn) {
+                       if (this pv is for this pmap/vaddr) {
+                               do housekeeping
+                               unlink/free the pv
+                       }
+               }
+       }
+
+The problem arose when we were running, say 8000 (or even 2000) apache or other processes 
+and one or all terminate. The list hanging off each pv array entry could have thousands of 
+entries.  We were continuously linearly searching each of these lists as we stepped through 
+the address space we were tearing down.  Because of the locks we hold, likely taking a cache 
+miss for each node,  and interrupt disabling for MP issues the system became completely 
+unresponsive for many seconds while we did this.
+
+Realizing that pve's are accessed in two distinct ways (linearly running the list by ppn 
+for operations like pmap_page_protect and finding and modifying/removing a single pve as 
+part of pmap_enter processing) has led to modifying the pve structures and databases.
+
+There are now two types of pve structures.  A "rooted" structure which is basically the 
+original structure accessed in an array by ppn, and a ''hashed'' structure accessed on a 
+hash list via a hash of [pmap, vaddr].  These have been designed with the two goals of 
+minimizing wired memory and making the lookup of a ppn faster.  Since a vast majority of 
+pages in the system are not aliased and hence represented by a single pv entry I've kept 
+the rooted entry size as small as possible because there is one of these dedicated for 
+every physical page of memory.  The hashed pve's are larger due to the addition of the hash 
+link and the ppn entry needed for matching while running the hash list to find the entry we 
+are looking for.  This way, only systems that have lots of aliasing (like 2000+ httpd procs) 
+will pay the extra memory price. Both structures have the same first three fields allowing 
+some simplification in the code.
+
+They have these shapes
+
+typedef struct pv_rooted_entry {
+        queue_head_t qlink;
+        vm_map_offset_t va;
+        pmap_t          pmap;
+} *pv_rooted_entry_t;
+
+
+typedef struct pv_hashed_entry {
+  queue_head_t qlink;
+  vm_map_offset_t va;
+  pmap_t        pmap;
+  ppnum_t ppn;
+  struct pv_hashed_entry *nexth;
+} *pv_hashed_entry_t;
+
+The main flow difference is that the code is now aware of the rooted entry and the hashed 
+entries.  Code that runs the pv list still starts with the rooted entry and then continues 
+down the qlink onto the hashed entries.  Code that is looking up a specific pv entry first 
+checks the rooted entry and then hashes and runs the hash list for the match. The hash list 
+lengths are much smaller than the original pv lists that contained all aliases for the specific ppn.
+
+*/
+
+typedef struct pv_rooted_entry {     /* first three entries must match pv_hashed_entry_t */
+        queue_head_t qlink;
+       vm_map_offset_t va;             /* virtual address for mapping */
+       pmap_t          pmap;           /* pmap where mapping lies */
+} *pv_rooted_entry_t;
+
+#define PV_ROOTED_ENTRY_NULL   ((pv_rooted_entry_t) 0)
+
+pv_rooted_entry_t      pv_head_table;          /* array of entries, one per page */
+
+typedef struct pv_hashed_entry {     /* first three entries must match pv_rooted_entry_t */
+  queue_head_t qlink;
+  vm_map_offset_t va;
+  pmap_t        pmap;
+  ppnum_t ppn;
+  struct pv_hashed_entry *nexth;
+} *pv_hashed_entry_t;
+
+#define PV_HASHED_ENTRY_NULL ((pv_hashed_entry_t)0)
+
+#define NPVHASH 4095   /* MUST BE 2^N - 1 */
+pv_hashed_entry_t     *pv_hash_table;  /* hash lists */
+
+uint32_t npvhash = 0;
 
-pv_entry_t     pv_head_table;          /* array of entries, one per page */
+/* #define PV_DEBUG 1   uncomment to enable some PV debugging code */
+#ifdef PV_DEBUG
+#define CHK_NPVHASH() if(0 == npvhash) panic("npvhash uninitialized");
+#else
+#define CHK_NPVHASH()
+#endif
 
 /*
  *     pv_list entries are kept on a list that can only be accessed
  *     with the pmap system locked (at SPLVM, not in the cpus_active set).
- *     The list is refilled from the pv_list_zone if it becomes empty.
+ *     The list is refilled from the pv_hashed_list_zone if it becomes empty.
  */
-pv_entry_t     pv_free_list;           /* free list at SPLVM */
-decl_simple_lock_data(,pv_free_list_lock)
+pv_rooted_entry_t      pv_free_list = PV_ROOTED_ENTRY_NULL;            /* free list at SPLVM */
+pv_hashed_entry_t      pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
+pv_hashed_entry_t      pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
+decl_simple_lock_data(,pv_hashed_free_list_lock)
+decl_simple_lock_data(,pv_hashed_kern_free_list_lock)
+decl_simple_lock_data(,pv_hash_table_lock)
+
 int pv_free_count = 0;
-#define PV_LOW_WATER_MARK 5000
-#define PV_ALLOC_CHUNK 2000
+int pv_hashed_free_count = 0;
+int pv_kern_free_count = 0;
+int pv_hashed_kern_free_count = 0;
+#define PV_HASHED_LOW_WATER_MARK 5000
+#define PV_HASHED_KERN_LOW_WATER_MARK 100
+#define PV_HASHED_ALLOC_CHUNK 2000
+#define PV_HASHED_KERN_ALLOC_CHUNK 50
 thread_call_t  mapping_adjust_call;
 static thread_call_data_t  mapping_adjust_call_data;
-int mappingrecurse = 0;
-
-#define        PV_ALLOC(pv_e) { \
-       simple_lock(&pv_free_list_lock); \
-       if ((pv_e = pv_free_list) != 0) { \
-           pv_free_list = pv_e->next; \
-            pv_free_count--; \
-            if (pv_free_count < PV_LOW_WATER_MARK) \
-              if (hw_compare_and_store(0,1,&mappingrecurse)) \
+uint32_t mappingrecurse = 0;
+
+#define        PV_HASHED_ALLOC(pvh_e) { \
+       simple_lock(&pv_hashed_free_list_lock); \
+       if ((pvh_e = pv_hashed_free_list) != 0) { \
+         pv_hashed_free_list = (pv_hashed_entry_t)pvh_e->qlink.next;   \
+            pv_hashed_free_count--; \
+            if (pv_hashed_free_count < PV_HASHED_LOW_WATER_MARK) \
+              if (hw_compare_and_store(0,1,(u_int *)&mappingrecurse)) \
+                thread_call_enter(mapping_adjust_call); \
+       } \
+       simple_unlock(&pv_hashed_free_list_lock); \
+}
+
+#define        PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt) {   \
+       simple_lock(&pv_hashed_free_list_lock); \
+       pvh_et->qlink.next = (queue_entry_t)pv_hashed_free_list;        \
+       pv_hashed_free_list = pvh_eh; \
+        pv_hashed_free_count += pv_cnt; \
+       simple_unlock(&pv_hashed_free_list_lock); \
+}
+
+#define        PV_HASHED_KERN_ALLOC(pvh_e) { \
+       simple_lock(&pv_hashed_kern_free_list_lock); \
+       if ((pvh_e = pv_hashed_kern_free_list) != 0) { \
+         pv_hashed_kern_free_list = (pv_hashed_entry_t)pvh_e->qlink.next;      \
+            pv_hashed_kern_free_count--; \
+            if (pv_hashed_kern_free_count < PV_HASHED_KERN_LOW_WATER_MARK) \
+              if (hw_compare_and_store(0,1,(u_int *)&mappingrecurse)) \
                 thread_call_enter(mapping_adjust_call); \
        } \
-       simple_unlock(&pv_free_list_lock); \
+       simple_unlock(&pv_hashed_kern_free_list_lock); \
 }
 
-#define        PV_FREE(pv_e) { \
-       simple_lock(&pv_free_list_lock); \
-       pv_e->next = pv_free_list; \
-       pv_free_list = pv_e; \
-        pv_free_count++; \
-       simple_unlock(&pv_free_list_lock); \
+#define        PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt) {       \
+       simple_lock(&pv_hashed_kern_free_list_lock); \
+       pvh_et->qlink.next = (queue_entry_t)pv_hashed_kern_free_list;   \
+       pv_hashed_kern_free_list = pvh_eh; \
+        pv_hashed_kern_free_count += pv_cnt; \
+       simple_unlock(&pv_hashed_kern_free_list_lock); \
 }
 
-zone_t         pv_list_zone;           /* zone of pv_entry structures */
+zone_t         pv_hashed_list_zone;    /* zone of pv_hashed_entry structures */
 
-#ifdef PAE
 static zone_t pdpt_zone;
-#endif
-
 
 /*
  *     Each entry in the pv_head_table is locked by a bit in the
@@ -255,60 +453,63 @@ static zone_t pdpt_zone;
 char   *pv_lock_table;         /* pointer to array of bits */
 #define pv_lock_table_size(n)  (((n)+BYTE_SIZE-1)/BYTE_SIZE)
 
+char    *pv_hash_lock_table;
+#define pv_hash_lock_table_size(n)  (((n)+BYTE_SIZE-1)/BYTE_SIZE)
+
 /*
  *     First and last physical addresses that we maintain any information
  *     for.  Initialized to zero so that pmap operations done before
  *     pmap_init won't touch any non-existent structures.
  */
-pmap_paddr_t   vm_first_phys = (pmap_paddr_t) 0;
-pmap_paddr_t   vm_last_phys  = (pmap_paddr_t) 0;
 boolean_t      pmap_initialized = FALSE;/* Has pmap_init completed? */
 
-pmap_paddr_t    kernel_vm_end = (pmap_paddr_t)0;
-
-#define GROW_KERNEL_FUNCTION_IMPLEMENTED 1
-#if GROW_KERNEL_FUNCTION_IMPLEMENTED  /* not needed until growing kernel pmap */
 static struct vm_object kptobj_object_store;
 static vm_object_t kptobj;
-#endif
-
 
 /*
- *     Index into pv_head table, its lock bits, and the modify/reference
- *     bits starting at vm_first_phys.
+ *     Index into pv_head table, its lock bits, and the modify/reference and managed bits
  */
 
-#define pa_index(pa)   (i386_btop(pa - vm_first_phys))
+#define pa_index(pa)   (i386_btop(pa))
+#define ppn_to_pai(ppn)        ((int)ppn)
 
 #define pai_to_pvh(pai)                (&pv_head_table[pai])
 #define lock_pvh_pai(pai)      bit_lock(pai, (void *)pv_lock_table)
 #define unlock_pvh_pai(pai)    bit_unlock(pai, (void *)pv_lock_table)
 
+#define pvhashidx(pmap, va) (((uint32_t)pmap ^ ((uint32_t)((uint64_t)va >> PAGE_SHIFT) & 0xFFFFFFFF)) & npvhash)
+#define pvhash(idx)         (&pv_hash_table[idx])
+
+#define lock_hash_hash(hash)           bit_lock(hash, (void *)pv_hash_lock_table)
+#define unlock_hash_hash(hash) bit_unlock(hash, (void *)pv_hash_lock_table)
+
 /*
  *     Array of physical page attribites for managed pages.
  *     One byte per physical page.
  */
 char   *pmap_phys_attributes;
+unsigned int   last_managed_page = 0;
 
 /*
  *     Physical page attributes.  Copy bits from PTE definition.
  */
 #define        PHYS_MODIFIED   INTEL_PTE_MOD   /* page modified */
 #define        PHYS_REFERENCED INTEL_PTE_REF   /* page referenced */
-#define PHYS_NCACHE    INTEL_PTE_NCACHE
+#define PHYS_MANAGED   INTEL_PTE_VALID /* page is managed */
 
 /*
  *     Amount of virtual memory mapped by one
  *     page-directory entry.
  */
 #define        PDE_MAPPED_SIZE         (pdetova(1))
+uint64_t pde_mapped_size;
 
 /*
  *     Locking and TLB invalidation
  */
 
 /*
- *     Locking Protocols:
+ *     Locking Protocols: (changed 2/2007 JK)
  *
  *     There are two structures in the pmap module that need locking:
  *     the pmaps themselves, and the per-page pv_lists (which are locked
@@ -318,95 +519,61 @@ char      *pmap_phys_attributes;
  *     pmap_remove_all and pmap_copy_on_write operate on a physical page
  *     basis and want to do the locking in the reverse order, i.e. lock
  *     a pv_list and then go through all the pmaps referenced by that list.
- *     To protect against deadlock between these two cases, the pmap_lock
- *     is used.  There are three different locking protocols as a result:
- *
- *  1.  pmap operations only (pmap_extract, pmap_access, ...)  Lock only
- *             the pmap.
- *
- *  2.  pmap-based operations (pmap_enter, pmap_remove, ...)  Get a read
- *             lock on the pmap_lock (shared read), then lock the pmap
- *             and finally the pv_lists as needed [i.e. pmap lock before
- *             pv_list lock.]
- *
- *  3.  pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
- *             Get a write lock on the pmap_lock (exclusive write); this
- *             also guaranteees exclusive access to the pv_lists.  Lock the
- *             pmaps as needed.
  *
- *     At no time may any routine hold more than one pmap lock or more than
- *     one pv_list lock.  Because interrupt level routines can allocate
- *     mbufs and cause pmap_enter's, the pmap_lock and the lock on the
- *     kernel_pmap can only be held at splhigh.
+ *      The system wide pmap lock has been removed. Now, paths take a lock
+ *      on the pmap before changing its 'shape' and the reverse order lockers
+ *      (coming in by phys ppn) take a lock on the corresponding pv and then
+ *      retest to be sure nothing changed during the window before they locked
+ *      and can then run up/down the pv lists holding the list lock. This also
+ *      lets the pmap layer run (nearly completely) interrupt enabled, unlike
+ *      previously.
  */
 
 /*
- *     We raise the interrupt level to splvm, to block interprocessor
- *     interrupts during pmap operations.  We must take the CPU out of
- *     the cpus_active set while interrupts are blocked.
+ * pmap locking
  */
-#define SPLVM(spl)     { \
-       spl = splhigh(); \
-       mp_disable_preemption(); \
-       i_bit_clear(cpu_number(), &cpus_active); \
-       mp_enable_preemption(); \
+
+#define PMAP_LOCK(pmap) {      \
+       simple_lock(&(pmap)->lock);     \
 }
 
-#define SPLX(spl)      { \
-       mp_disable_preemption(); \
-       i_bit_set(cpu_number(), &cpus_active); \
-       mp_enable_preemption(); \
-       splx(spl); \
+#define PMAP_UNLOCK(pmap) {            \
+       simple_unlock(&(pmap)->lock);           \
 }
 
 /*
- *     Lock on pmap system
+ * PV locking
  */
-lock_t pmap_system_lock;
-
-#define PMAP_READ_LOCK(pmap, spl) {    \
-       SPLVM(spl);                     \
-       lock_read(&pmap_system_lock);   \
-       simple_lock(&(pmap)->lock);     \
-}
-
-#define PMAP_WRITE_LOCK(spl) {         \
-       SPLVM(spl);                     \
-       lock_write(&pmap_system_lock);  \
-}
 
-#define PMAP_READ_UNLOCK(pmap, spl) {          \
-       simple_unlock(&(pmap)->lock);           \
-       lock_read_done(&pmap_system_lock);      \
-       SPLX(spl);                              \
+#define LOCK_PVH(index)                {       \
+    mp_disable_preemption();           \
+    lock_pvh_pai(index);               \
 }
 
-#define PMAP_WRITE_UNLOCK(spl) {               \
-       lock_write_done(&pmap_system_lock);     \
-       SPLX(spl);                              \
+#define UNLOCK_PVH(index)  {      \
+    unlock_pvh_pai(index);        \
+    mp_enable_preemption();       \
 }
 
-#define PMAP_WRITE_TO_READ_LOCK(pmap) {                \
-       simple_lock(&(pmap)->lock);             \
-       lock_write_to_read(&pmap_system_lock);  \
-}
+/*
+ * PV hash locking
+ */
 
-#define LOCK_PVH(index)                lock_pvh_pai(index)
+#define LOCK_PV_HASH(hash)         lock_hash_hash(hash)
 
-#define UNLOCK_PVH(index)      unlock_pvh_pai(index)
+#define UNLOCK_PV_HASH(hash)       unlock_hash_hash(hash)
 
 #if    USLOCK_DEBUG
 extern int     max_lock_loops;
-extern int     disableSerialOuput;
 #define LOOP_VAR                                                       \
        unsigned int    loop_count;                                     \
-       loop_count = disableSerialOuput ? max_lock_loops                \
+       loop_count = disable_serial_output ? max_lock_loops             \
                                        : max_lock_loops*100
 #define LOOP_CHECK(msg, pmap)                                          \
        if (--loop_count == 0) {                                        \
                mp_disable_preemption();                                \
-               kprintf("%s: cpu %d pmap %x, cpus_active 0x%x\n",       \
-                         msg, cpu_number(), pmap, cpus_active);        \
+               kprintf("%s: cpu %d pmap %x\n",                         \
+                         msg, cpu_number(), pmap);                     \
                Debugger("deadlock detection");                         \
                mp_enable_preemption();                                 \
                loop_count = max_lock_loops;                            \
@@ -416,76 +583,15 @@ extern int        disableSerialOuput;
 #define LOOP_CHECK(msg, pmap)
 #endif /* USLOCK_DEBUG */
 
-#define PMAP_UPDATE_TLBS(pmap, s, e)                                   \
-{                                                                      \
-       cpu_set cpu_mask;                                               \
-       cpu_set users;                                                  \
-                                                                       \
-       mp_disable_preemption();                                        \
-       cpu_mask = 1 << cpu_number();                                   \
-                                                                       \
-       /* Since the pmap is locked, other updates are locked */        \
-       /* out, and any pmap_activate has finished. */                  \
-                                                                       \
-       /* find other cpus using the pmap */                            \
-       users = (pmap)->cpus_using & ~cpu_mask;                         \
-       if (users) {                                                    \
-            LOOP_VAR;                                                  \
-           /* signal them, and wait for them to finish */              \
-           /* using the pmap */                                        \
-           signal_cpus(users, (pmap), (s), (e));                       \
-           while (((pmap)->cpus_using & cpus_active & ~cpu_mask)) {    \
-               LOOP_CHECK("PMAP_UPDATE_TLBS", pmap);                   \
-               cpu_pause();                                            \
-           }                                                           \
-       }                                                               \
-       /* invalidate our own TLB if pmap is in use */                  \
-                                                                       \
-       if ((pmap)->cpus_using & cpu_mask) {                            \
-           INVALIDATE_TLB((pmap), (s), (e));                           \
-       }                                                               \
-                                                                       \
-       mp_enable_preemption();                                         \
-}
-
-#define MAX_TBIS_SIZE  32              /* > this -> TBIA */ /* XXX */
-
-#define INVALIDATE_TLB(m, s, e) {      \
-       flush_tlb();                    \
-}
-
-/*
- *     Structures to keep track of pending TLB invalidations
- */
-cpu_set                        cpus_active;
-cpu_set                        cpus_idle;
 
-#define UPDATE_LIST_SIZE       4
+static void pmap_flush_tlbs(pmap_t pmap);
 
-struct pmap_update_item {
-       pmap_t          pmap;           /* pmap to invalidate */
-       vm_offset_t     start;          /* start address to invalidate */
-       vm_offset_t     end;            /* end address to invalidate */
-};
+#define PMAP_UPDATE_TLBS(pmap, s, e)                                   \
+       pmap_flush_tlbs(pmap)
 
-typedef        struct pmap_update_item *pmap_update_item_t;
 
-/*
- *     List of pmap updates.  If the list overflows,
- *     the last entry is changed to invalidate all.
- */
-struct pmap_update_list {
-       decl_simple_lock_data(,lock)
-       int                     count;
-       struct pmap_update_item item[UPDATE_LIST_SIZE];
-} ;
-typedef        struct pmap_update_list *pmap_update_list_t;
+#define MAX_TBIS_SIZE  32              /* > this -> TBIA */ /* XXX */
 
-extern void signal_cpus(
-                       cpu_set         use_list,
-                       pmap_t          pmap,
-                       vm_offset_t     start,
-                       vm_offset_t     end);
 
 pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
 
@@ -493,20 +599,21 @@ pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
  *     Other useful macros.
  */
 #define current_pmap()         (vm_map_pmap(current_thread()->map))
-#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
 
 struct pmap    kernel_pmap_store;
 pmap_t         kernel_pmap;
 
-#ifdef PMAP_QUEUE
-decl_simple_lock_data(,free_pmap_lock)
-#endif
+pd_entry_t    high_shared_pde;
+pd_entry_t    commpage64_pde;
 
 struct zone    *pmap_zone;             /* zone of pmap structures */
 
 int            pmap_debug = 0;         /* flag for debugging prints */
 
-unsigned int   inuse_ptepages_count = 0;       /* debugging */
+unsigned int   inuse_ptepages_count = 0;
+
+addr64_t       kernel64_cr3;
+boolean_t      no_shared_cr3 = FALSE;  /* -no_shared_cr3 boot arg */
 
 /*
  *     Pmap cache.  Cache is threaded through ref_count field of pmap.
@@ -518,97 +625,240 @@ pmap_t           pmap_cache_list;
 int            pmap_cache_count;
 decl_simple_lock_data(,pmap_cache_lock)
 
-extern vm_offset_t     hole_start, hole_end;
-
 extern char end;
 
 static int nkpt;
+extern uint32_t lowGlo;
 
 pt_entry_t     *DMAP1, *DMAP2;
 caddr_t         DADDR1;
 caddr_t         DADDR2;
 
-#if  DEBUG_ALIAS
-#define PMAP_ALIAS_MAX 32
-struct pmap_alias {
-        vm_offset_t rpc;
-        pmap_t pmap;
-        vm_offset_t va;
-        int cookie;
-#define PMAP_ALIAS_COOKIE 0xdeadbeef
-} pmap_aliasbuf[PMAP_ALIAS_MAX];
-int pmap_alias_index = 0;
-extern vm_offset_t get_rpc();
+static inline
+void pmap_pvh_unlink(pv_hashed_entry_t pv);
+
+/*
+ * unlinks the pv_hashed_entry_t pvh from the singly linked hash chain.
+ * properly deals with the anchor.
+ * must be called with the hash locked, does not unlock it
+ */
+
+static inline
+void pmap_pvh_unlink(pv_hashed_entry_t pvh)
+{
+  pv_hashed_entry_t curh;
+  pv_hashed_entry_t *pprevh;
+  int pvhash_idx;
+
+  CHK_NPVHASH();
+  pvhash_idx = pvhashidx(pvh->pmap, pvh->va);
+
+  pprevh = pvhash(pvhash_idx);
 
-#endif  /* DEBUG_ALIAS */
+#if PV_DEBUG
+  if (NULL == *pprevh) panic("pvh_unlink null anchor"); /* JK DEBUG */
+#endif
+  curh = *pprevh;
 
-#define        pmap_pde(m, v)  (&((m)->dirbase[(vm_offset_t)(v) >> PDESHIFT]))
-#define pdir_pde(d, v) (d[(vm_offset_t)(v) >> PDESHIFT])
+  while (PV_HASHED_ENTRY_NULL != curh) {
+    if (pvh == curh)
+      break;
+    pprevh = &curh->nexth;
+    curh = curh->nexth;
+  }
+  if (PV_HASHED_ENTRY_NULL == curh) panic("pmap_pvh_unlink no pvh");
+  *pprevh = pvh->nexth;
+  return;
+}
 
-static __inline int
-pmap_is_current(pmap_t pmap)
+/*
+ * for legacy, returns the address of the pde entry.
+ * for 64 bit, causes the pdpt page containing the pde entry to be mapped,
+ * then returns the mapped address of the pde entry in that page
+ */
+pd_entry_t *
+pmap_pde(pmap_t m, vm_map_offset_t v)
 {
-  return (pmap == kernel_pmap ||
-         (pmap->dirbase[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME));
+  pd_entry_t *pde;
+       if (!cpu_64bit || (m == kernel_pmap)) {
+         pde = (&((m)->dirbase[(vm_offset_t)(v) >> PDESHIFT]));
+       } else {
+         assert(m);
+         assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+         pde = pmap64_pde(m, v);
+       }
+       return pde;
 }
 
 
 /*
- * return address of mapped pte for vaddr va in pmap pmap.
+ * the single pml4 page per pmap is allocated at pmap create time and exists
+ * for the duration of the pmap. we allocate this page in kernel vm (to save us one
+ * level of page table dynamic mapping.
+ * this returns the address of the requested pml4 entry in the top level page.
  */
-pt_entry_t     *
-pmap_pte(pmap_t pmap, vm_offset_t va)
-{
-  pd_entry_t     *pde;
-  pd_entry_t     newpf;
-
-  pde = pmap_pde(pmap, va);
-  if (*pde != 0) {
-    if (pmap_is_current(pmap))
-      return( vtopte(va));
-    newpf = *pde & PG_FRAME;
-    if (((*CM4) & PG_FRAME) != newpf) {
-      *CM4 = newpf | INTEL_PTE_RW | INTEL_PTE_VALID;
-      invlpg((u_int)CA4);
-    }
-    return (pt_entry_t *)CA4 + (i386_btop(va) & (NPTEPG-1));
+static inline
+pml4_entry_t *
+pmap64_pml4(pmap_t pmap, vm_map_offset_t vaddr)
+{
+  return ((pml4_entry_t *)pmap->pm_hold + ((vm_offset_t)((vaddr>>PML4SHIFT)&(NPML4PG-1))));
+}
+
+/*
+ * maps in the pml4 page, if any, containing the pdpt entry requested
+ * and returns the address of the pdpt entry in that mapped page
+ */
+pdpt_entry_t *
+pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr)
+{
+  pml4_entry_t newpf;
+  pml4_entry_t *pml4;
+  int i;
+
+  assert(pmap);
+  assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+  if ((vaddr > 0x00007FFFFFFFFFFFULL) && (vaddr < 0xFFFF800000000000ULL)) {
+    return(0);
   }
-  return(0);
+
+  pml4 = pmap64_pml4(pmap, vaddr);
+
+       if (pml4 && ((*pml4 & INTEL_PTE_VALID))) {
+
+               newpf = *pml4 & PG_FRAME;
+
+
+               for (i=PMAP_PDPT_FIRST_WINDOW; i < PMAP_PDPT_FIRST_WINDOW+PMAP_PDPT_NWINDOWS; i++) {
+                 if (((*(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP)) & PG_FRAME) == newpf) {
+                 return((pdpt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR) + 
+                        ((vm_offset_t)((vaddr>>PDPTSHIFT)&(NPDPTPG-1))));
+                 }
+               }
+
+                 current_cpu_datap()->cpu_pmap->pdpt_window_index++;
+                 if (current_cpu_datap()->cpu_pmap->pdpt_window_index > (PMAP_PDPT_FIRST_WINDOW+PMAP_PDPT_NWINDOWS-1))
+                   current_cpu_datap()->cpu_pmap->pdpt_window_index = PMAP_PDPT_FIRST_WINDOW;
+                 pmap_store_pte(
+                                (current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pdpt_window_index].prv_CMAP),
+                                newpf | INTEL_PTE_RW | INTEL_PTE_VALID);
+                 invlpg((u_int)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pdpt_window_index].prv_CADDR));
+                 return ((pdpt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pdpt_window_index].prv_CADDR) +
+                         ((vm_offset_t)((vaddr>>PDPTSHIFT)&(NPDPTPG-1))));
+       }
+
+       return (NULL);
 }
-       
-#define DEBUG_PTE_PAGE 0
 
-#if    DEBUG_PTE_PAGE
-void
-ptep_check(
-       ptep_t  ptep)
+/*
+ * maps in the pdpt page, if any, containing the pde entry requested
+ * and returns the address of the pde entry in that mapped page
+ */
+pd_entry_t *
+pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr)
 {
-       register pt_entry_t     *pte, *epte;
-       int                     ctu, ctw;
+  pdpt_entry_t newpf;
+  pdpt_entry_t *pdpt;
+  int i;
 
-       /* check the use and wired counts */
-       if (ptep == PTE_PAGE_NULL)
-               return;
-       pte = pmap_pte(ptep->pmap, ptep->va);
-       epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t);
-       ctu = 0;
-       ctw = 0;
-       while (pte < epte) {
-               if (pte->pfn != 0) {
-                       ctu++;
-                       if (pte->wired)
-                               ctw++;
+  assert(pmap);
+  assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+  if ((vaddr > 0x00007FFFFFFFFFFFULL) && (vaddr < 0xFFFF800000000000ULL)) {
+    return(0);
+  }
+
+  /*  if (vaddr & (1ULL << 63)) panic("neg addr");*/
+  pdpt = pmap64_pdpt(pmap, vaddr);
+
+         if (pdpt && ((*pdpt & INTEL_PTE_VALID))) {
+
+               newpf = *pdpt & PG_FRAME;
+
+               for (i=PMAP_PDE_FIRST_WINDOW; i < PMAP_PDE_FIRST_WINDOW+PMAP_PDE_NWINDOWS; i++) {
+                 if (((*(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP)) & PG_FRAME) == newpf) {
+                 return((pd_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR) + 
+                        ((vm_offset_t)((vaddr>>PDSHIFT)&(NPDPG-1))));
+                 }
                }
-               pte++;
+
+                 current_cpu_datap()->cpu_pmap->pde_window_index++;
+                 if (current_cpu_datap()->cpu_pmap->pde_window_index > (PMAP_PDE_FIRST_WINDOW+PMAP_PDE_NWINDOWS-1))
+                   current_cpu_datap()->cpu_pmap->pde_window_index = PMAP_PDE_FIRST_WINDOW;
+                 pmap_store_pte(
+                                (current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pde_window_index].prv_CMAP),
+                                newpf | INTEL_PTE_RW | INTEL_PTE_VALID);
+                 invlpg((u_int)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pde_window_index].prv_CADDR));
+                 return ((pd_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pde_window_index].prv_CADDR) +
+                         ((vm_offset_t)((vaddr>>PDSHIFT)&(NPDPG-1))));
        }
 
-       if (ctu != ptep->use_count || ctw != ptep->wired_count) {
-               printf("use %d wired %d - actual use %d wired %d\n",
-                       ptep->use_count, ptep->wired_count, ctu, ctw);
-               panic("pte count");
+       return (NULL);
+}
+
+/*
+ * Because the page tables (top 3 levels) are mapped into per cpu windows,
+ * callers must either disable interrupts or disable preemption before calling
+ * one of the pte mapping routines (e.g. pmap_pte()) as the returned vaddr
+ * is in one of those mapped windows and that cannot be allowed to change until
+ * the caller is done using the returned pte pointer. When done, the caller
+ * restores interrupts or preemption to its previous state after which point the
+ * vaddr for the returned pte can no longer be used
+ */
+
+
+/*
+ * return address of mapped pte for vaddr va in pmap pmap.
+ * must be called with pre-emption or interrupts disabled
+ * if targeted pmap is not the kernel pmap
+ * since we may be passing back a virtual address that is
+ * associated with this cpu... pre-emption or interrupts
+ * must remain disabled until the caller is done using
+ * the pointer that was passed back .
+ *
+ * maps the pde page, if any, containing the pte in and returns
+ * the address of the pte in that mapped page
+ */
+pt_entry_t     *
+pmap_pte(pmap_t pmap, vm_map_offset_t vaddr)
+{
+        pd_entry_t     *pde;
+       pd_entry_t     newpf;
+       int i;
+
+       assert(pmap);
+       pde = pmap_pde(pmap,vaddr);
+
+       if (pde && ((*pde & INTEL_PTE_VALID))) {
+           if (pmap == kernel_pmap)
+               return (vtopte(vaddr)); /* compat kernel still has pte's mapped */
+#if TESTING
+           if (ml_get_interrupts_enabled() && get_preemption_level() == 0)
+               panic("pmap_pte: unsafe call");
+#endif
+               assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+               newpf = *pde & PG_FRAME;
+
+               for (i=PMAP_PTE_FIRST_WINDOW; i < PMAP_PTE_FIRST_WINDOW+PMAP_PTE_NWINDOWS; i++) {
+                 if (((*(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP)) & PG_FRAME) == newpf) {
+                 return((pt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR) + 
+                        ((vm_offset_t)i386_btop(vaddr) & (NPTEPG-1)));
+                 }
+               }
+
+                 current_cpu_datap()->cpu_pmap->pte_window_index++;
+                 if (current_cpu_datap()->cpu_pmap->pte_window_index > (PMAP_PTE_FIRST_WINDOW+PMAP_PTE_NWINDOWS-1))
+                   current_cpu_datap()->cpu_pmap->pte_window_index = PMAP_PTE_FIRST_WINDOW;
+                 pmap_store_pte(
+                                (current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pte_window_index].prv_CMAP),
+                                newpf | INTEL_PTE_RW | INTEL_PTE_VALID);
+                 invlpg((u_int)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pte_window_index].prv_CADDR));
+                 return ((pt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pte_window_index].prv_CADDR) +
+                         ((vm_offset_t)i386_btop(vaddr) & (NPTEPG-1)));
        }
+
+       return(NULL);
 }
-#endif /* DEBUG_PTE_PAGE */
+       
 
 /*
  *     Map memory at initialization.  The physical addresses being
@@ -619,17 +869,18 @@ ptep_check(
  */
 vm_offset_t
 pmap_map(
-       register vm_offset_t    virt,
-       register vm_offset_t    start_addr,
-       register vm_offset_t    end_addr,
-       register vm_prot_t      prot)
+       vm_offset_t     virt,
+       vm_map_offset_t start_addr,
+       vm_map_offset_t end_addr,
+       vm_prot_t       prot,
+       unsigned int    flags)
 {
-       register int            ps;
+       int             ps;
 
        ps = PAGE_SIZE;
        while (start_addr < end_addr) {
-               pmap_enter(kernel_pmap,
-                       virt, (ppnum_t) i386_btop(start_addr), prot, 0, FALSE);
+               pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
+                          (ppnum_t) i386_btop(start_addr), prot, flags, FALSE);
                virt += ps;
                start_addr += ps;
        }
@@ -640,35 +891,43 @@ pmap_map(
  *     Back-door routine for mapping kernel VM at initialization.  
  *     Useful for mapping memory outside the range
  *      Sets no-cache, A, D.
- *     [vm_first_phys, vm_last_phys) (i.e., devices).
  *     Otherwise like pmap_map.
  */
 vm_offset_t
 pmap_map_bd(
-       register vm_offset_t    virt,
-       register vm_offset_t    start_addr,
-       register vm_offset_t    end_addr,
-       vm_prot_t               prot)
+       vm_offset_t     virt,
+       vm_map_offset_t start_addr,
+       vm_map_offset_t end_addr,
+       vm_prot_t       prot,
+       unsigned int    flags)
 {
-       register pt_entry_t     template;
-       register pt_entry_t     *pte;
+       pt_entry_t      template;
+       pt_entry_t      *pte;
+       spl_t           spl;
 
        template = pa_to_pte(start_addr)
-               | INTEL_PTE_NCACHE
                | INTEL_PTE_REF
                | INTEL_PTE_MOD
                | INTEL_PTE_WIRED
                | INTEL_PTE_VALID;
+
+       if(flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)) {
+           template |= INTEL_PTE_NCACHE;
+           if(!(flags & (VM_MEM_GUARDED | VM_WIMG_USE_DEFAULT)))
+                   template |= INTEL_PTE_PTA;
+       }
+
        if (prot & VM_PROT_WRITE)
            template |= INTEL_PTE_WRITE;
 
-       /* XXX move pmap_pte out of loop, once one pte mapped, all are */
        while (start_addr < end_addr) {
-               pte = pmap_pte(kernel_pmap, virt);
+               spl = splhigh();
+               pte = pmap_pte(kernel_pmap, (vm_map_offset_t)virt);
                if (pte == PT_ENTRY_NULL) {
                        panic("pmap_map_bd: Invalid kernel address\n");
                }
-               WRITE_PTE_FAST(pte, template)
+               pmap_store_pte(pte, template);
+               splx(spl);
                pte_increment_pa(template);
                virt += PAGE_SIZE;
                start_addr += PAGE_SIZE;
@@ -685,6 +944,167 @@ extern  vm_offset_t     etext;
 extern  void            *sectHIBB;
 extern  int             sectSizeHIB;
 
+void
+pmap_cpu_init(void)
+{
+       /*
+        * Here early in the life of a processor (from cpu_mode_init()).
+        * If we're not in 64-bit mode, enable the global TLB feature.
+        * Note: regardless of mode we continue to set the global attribute
+        * bit in ptes for all (32-bit) global pages such as the commpage.
+        */
+       if (!cpu_64bit) {
+               set_cr4(get_cr4() | CR4_PGE);
+       }
+
+       /*
+        * Initialize the per-cpu, TLB-related fields.
+        */
+       current_cpu_datap()->cpu_active_cr3 = kernel_pmap->pm_cr3;
+       current_cpu_datap()->cpu_tlb_invalid = FALSE;
+}
+
+vm_offset_t
+pmap_high_shared_remap(enum high_fixed_addresses e, vm_offset_t va, int sz)
+{
+  vm_offset_t ve = pmap_index_to_virt(e);
+  pt_entry_t *ptep;
+  pmap_paddr_t pa;
+  int i;
+  spl_t s;
+
+  assert(0 == (va & PAGE_MASK));  /* expecting page aligned */
+  s = splhigh();
+  ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)ve);
+
+  for (i=0; i< sz; i++) {
+    pa = (pmap_paddr_t) kvtophys(va);
+    pmap_store_pte(ptep, (pa & PG_FRAME)
+                               | INTEL_PTE_VALID
+                               | INTEL_PTE_GLOBAL
+                               | INTEL_PTE_RW
+                               | INTEL_PTE_REF
+                               | INTEL_PTE_MOD);
+    va+= PAGE_SIZE;
+    ptep++;
+  }
+  splx(s);
+  return ve;
+}
+
+vm_offset_t
+pmap_cpu_high_shared_remap(int cpu, enum high_cpu_types e, vm_offset_t va, int sz)
+{ 
+  enum high_fixed_addresses    a = e + HIGH_CPU_END * cpu;
+  return pmap_high_shared_remap(HIGH_FIXED_CPUS_BEGIN + a, va, sz);
+}
+
+void pmap_init_high_shared(void);
+
+extern vm_offset_t gdtptr, idtptr;
+
+extern uint32_t low_intstack;
+
+extern struct fake_descriptor ldt_desc_pattern;
+extern struct fake_descriptor tss_desc_pattern;
+
+extern char hi_remap_text, hi_remap_etext;
+extern char t_zero_div;
+
+pt_entry_t *pte_unique_base;
+
+void
+pmap_init_high_shared(void)
+{
+
+       vm_offset_t haddr;
+        struct __gdt_desc_struct gdt_desc = {0,0,0};
+       struct __idt_desc_struct idt_desc = {0,0,0};
+       spl_t s;
+#if MACH_KDB
+       struct i386_tss *ttss;
+#endif
+
+       kprintf("HIGH_MEM_BASE 0x%x fixed per-cpu begin 0x%x\n", 
+               HIGH_MEM_BASE,pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN));
+       s = splhigh();
+       pte_unique_base = pmap_pte(kernel_pmap, (vm_map_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN));
+       splx(s);
+
+       if (i386_btop(&hi_remap_etext - &hi_remap_text + 1) >
+                               HIGH_FIXED_TRAMPS_END - HIGH_FIXED_TRAMPS + 1)
+               panic("tramps too large");
+       haddr = pmap_high_shared_remap(HIGH_FIXED_TRAMPS,
+                                       (vm_offset_t) &hi_remap_text, 3);
+       kprintf("tramp: 0x%x, ",haddr);
+       printf("hi mem tramps at 0x%x\n",haddr);
+       /* map gdt up high and update ptr for reload */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_GDT,
+                                       (vm_offset_t) master_gdt, 1);
+       __asm__ __volatile__("sgdt %0": "=m" (gdt_desc): :"memory");
+       gdt_desc.address = haddr;
+       kprintf("GDT: 0x%x, ",haddr);
+       /* map ldt up high */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_LDT_BEGIN,
+                                       (vm_offset_t) master_ldt,
+                                       HIGH_FIXED_LDT_END - HIGH_FIXED_LDT_BEGIN + 1);
+       kprintf("LDT: 0x%x, ",haddr);
+       /* put new ldt addr into gdt */
+       master_gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
+       master_gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(KERNEL_LDT)], 1);
+       master_gdt[sel_idx(USER_LDT)] = ldt_desc_pattern;
+       master_gdt[sel_idx(USER_LDT)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(USER_LDT)], 1);
+
+       /* map idt up high */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_IDT,
+                                       (vm_offset_t) master_idt, 1);
+       __asm__ __volatile__("sidt %0" : "=m" (idt_desc));
+       idt_desc.address = haddr;
+       kprintf("IDT: 0x%x, ", haddr);
+       /* remap ktss up high and put new high addr into gdt */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_KTSS,
+                                       (vm_offset_t) &master_ktss, 1);
+       master_gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
+       master_gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(KERNEL_TSS)], 1);
+       kprintf("KTSS: 0x%x, ",haddr);
+#if MACH_KDB
+       /* remap dbtss up high and put new high addr into gdt */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_DBTSS,
+                                       (vm_offset_t) &master_dbtss, 1);
+       master_gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
+       master_gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(DEBUG_TSS)], 1);
+       ttss = (struct i386_tss *)haddr;
+       kprintf("DBTSS: 0x%x, ",haddr);
+#endif /* MACH_KDB */
+
+       /* remap dftss up high and put new high addr into gdt */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_DFTSS,
+                                       (vm_offset_t) &master_dftss, 1);
+       master_gdt[sel_idx(DF_TSS)] = tss_desc_pattern;
+       master_gdt[sel_idx(DF_TSS)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(DF_TSS)], 1);
+       kprintf("DFTSS: 0x%x\n",haddr);
+
+       /* remap mctss up high and put new high addr into gdt */
+       haddr = pmap_high_shared_remap(HIGH_FIXED_DFTSS,
+                                       (vm_offset_t) &master_mctss, 1);
+       master_gdt[sel_idx(MC_TSS)] = tss_desc_pattern;
+       master_gdt[sel_idx(MC_TSS)].offset = (vm_offset_t) haddr;
+       fix_desc(&master_gdt[sel_idx(MC_TSS)], 1);
+       kprintf("MCTSS: 0x%x\n",haddr);
+
+       __asm__ __volatile__("lgdt %0": "=m" (gdt_desc));
+       __asm__ __volatile__("lidt %0": "=m" (idt_desc));
+       kprintf("gdt/idt reloaded, ");
+       set_tr(KERNEL_TSS);
+       kprintf("tr reset to KERNEL_TSS\n");
+}
+
+
 /*
  *     Bootstrap the system enough to run with virtual memory.
  *     Map the kernel's code and data, and allocate the system page table.
@@ -706,50 +1126,63 @@ extern  int             sectSizeHIB;
 
 void
 pmap_bootstrap(
-       __unused vm_offset_t    load_start)
+       __unused vm_offset_t    load_start,
+       boolean_t               IA32e)
 {
        vm_offset_t     va;
        pt_entry_t      *pte;
        int i;
        int wpkernel, boot_arg;
+       pdpt_entry_t *pdpt;
+       spl_t s;
 
        vm_last_addr = VM_MAX_KERNEL_ADDRESS;   /* Set the highest address
                                                 * known to VM */
-
        /*
         *      The kernel's pmap is statically allocated so we don't
         *      have to use pmap_create, which is unlikely to work
         *      correctly at this part of the boot sequence.
         */
 
+
        kernel_pmap = &kernel_pmap_store;
-#ifdef PMAP_QUEUE
-       kernel_pmap->pmap_link.next = (queue_t)kernel_pmap;             /* Set up anchor forward */
-       kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap;             /* Set up anchor reverse */
-#endif
        kernel_pmap->ref_count = 1;
+       kernel_pmap->nx_enabled = FALSE;
+       kernel_pmap->pm_task_map = TASK_MAP_32BIT;
        kernel_pmap->pm_obj = (vm_object_t) NULL;
        kernel_pmap->dirbase = (pd_entry_t *)((unsigned int)IdlePTD | KERNBASE);
-       kernel_pmap->pdirbase = (pd_entry_t *)IdlePTD;
-#ifdef PAE
-       kernel_pmap->pm_pdpt = (pd_entry_t *)((unsigned int)IdlePDPT | KERNBASE );
-       kernel_pmap->pm_ppdpt = (vm_offset_t)IdlePDPT;
-#endif
+       kernel_pmap->pdirbase = (pmap_paddr_t)((int)IdlePTD);
+       pdpt = (pd_entry_t *)((unsigned int)IdlePDPT | KERNBASE );
+       kernel_pmap->pm_pdpt = pdpt;
+       kernel_pmap->pm_cr3 = (pmap_paddr_t)((int)IdlePDPT);
 
        va = (vm_offset_t)kernel_pmap->dirbase;
        /* setup self referential mapping(s) */
-       for (i = 0; i< NPGPTD; i++ ) {
+       for (i = 0; i< NPGPTD; i++, pdpt++) {
          pmap_paddr_t pa;
          pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i));
-         * (pd_entry_t *) (kernel_pmap->dirbase + PTDPTDI + i) = 
+         pmap_store_pte(
+           (pd_entry_t *) (kernel_pmap->dirbase + PTDPTDI + i),
            (pa & PG_FRAME) | INTEL_PTE_VALID | INTEL_PTE_RW | INTEL_PTE_REF |
-           INTEL_PTE_MOD | INTEL_PTE_WIRED ;
-#ifdef PAE
-         kernel_pmap->pm_pdpt[i] = pa | INTEL_PTE_VALID;
-#endif
+             INTEL_PTE_MOD | INTEL_PTE_WIRED) ;
+         pmap_store_pte(pdpt, pa | INTEL_PTE_VALID);
        }
 
+       cpu_64bit = IA32e;
+       
+       lo_kernel_cr3 = kernel_pmap->pm_cr3;
+       current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
+
+       /* save the value we stuff into created pmaps to share the gdts etc */
+       high_shared_pde = *pmap_pde(kernel_pmap, HIGH_MEM_BASE);
+       /* make sure G bit is on for high shared pde entry */
+       high_shared_pde |= INTEL_PTE_GLOBAL;
+       s = splhigh();
+       pmap_store_pte(pmap_pde(kernel_pmap, HIGH_MEM_BASE), high_shared_pde);
+       splx(s);
+
        nkpt = NKPT;
+       inuse_ptepages_count += NKPT;
 
        virtual_avail = (vm_offset_t)VADDR(KPTDI,0) + (vm_offset_t)first_avail;
        virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS);
@@ -759,77 +1192,150 @@ pmap_bootstrap(
         * mapping of pages.
         */
 #define        SYSMAP(c, p, v, n)      \
-       v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n);
+       v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n)
 
        va = virtual_avail;
-       pte = (pt_entry_t *) pmap_pte(kernel_pmap, va);
+       pte = vtopte(va);
 
-       /*
-        * CMAP1/CMAP2 are used for zeroing and copying pages.
-         * CMAP3 is used for ml_phys_read/write.
-        */
-       SYSMAP(caddr_t, CM1, CA1, 1)
-       * (pt_entry_t *) CM1 = 0;
-       SYSMAP(caddr_t, CM2, CA2, 1)
-       * (pt_entry_t *) CM2 = 0;
-       SYSMAP(caddr_t, CM3, CA3, 1)
-       * (pt_entry_t *) CM3 = 0;
-
-       /* used by pmap_pte */
-       SYSMAP(caddr_t, CM4, CA4, 1)
-         * (pt_entry_t *) CM4 = 0;
+        for (i=0; i<PMAP_NWINDOWS; i++) {
+            SYSMAP(caddr_t,
+                  (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
+                   (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR),
+                  1);
+            *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0;
+        }
 
        /* DMAP user for debugger */
        SYSMAP(caddr_t, DMAP1, DADDR1, 1);
        SYSMAP(caddr_t, DMAP2, DADDR2, 1);  /* XXX temporary - can remove */
 
-
-       lock_init(&pmap_system_lock,
-                 FALSE,                /* NOT a sleep lock */
-                 0, 0);
-
        virtual_avail = va;
 
+       if (PE_parse_boot_arg("npvhash", &npvhash)) {
+         if (0 != ((npvhash+1) & npvhash)) {
+           kprintf("invalid hash %d, must be ((2^N)-1), using default %d\n",npvhash,NPVHASH);
+           npvhash = NPVHASH;
+         }
+       } else {
+         npvhash = NPVHASH;
+       }
+       printf("npvhash=%d\n",npvhash);
+
        wpkernel = 1;
-       if (PE_parse_boot_arg("debug", &boot_arg)) {
-         if (boot_arg & DB_PRT) wpkernel = 0;
-         if (boot_arg & DB_NMI) wpkernel = 0;
+       if (PE_parse_boot_arg("wpkernel", &boot_arg)) {
+               if (boot_arg == 0)
+                       wpkernel = 0;
        }
 
-       /* remap kernel text readonly if not debugging or kprintfing */
+       s = splhigh();
+
+       /* Remap kernel text readonly unless the "wpkernel" boot-arg is present
+        * and set to 0.
+        */
        if (wpkernel)
        {
                vm_offset_t     myva;
                pt_entry_t     *ptep;
 
-               for (myva = i386_round_page(VM_MIN_KERNEL_ADDRESS + MP_BOOT + MP_BOOTSTACK); myva < etext; myva += PAGE_SIZE) {
+               for (myva = i386_round_page(MP_BOOT + MP_BOOTSTACK); myva < etext; myva += PAGE_SIZE) {
                         if (myva >= (vm_offset_t)sectHIBB && myva < ((vm_offset_t)sectHIBB + sectSizeHIB))
                                 continue;
-                       ptep = pmap_pte(kernel_pmap, myva);
+                       ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
                        if (ptep)
-                               *ptep &= ~INTEL_PTE_RW;
+                               pmap_store_pte(ptep, *ptep & ~INTEL_PTE_RW);
                }
-               flush_tlb();
        }
 
+       /* no matter what,  kernel page zero is not accessible */
+       pte = pmap_pte(kernel_pmap, 0);
+       pmap_store_pte(pte, INTEL_PTE_INVALID);
+
+       /* map lowmem global page into fixed addr 0x2000 */
+       if (0 == (pte = pmap_pte(kernel_pmap,0x2000))) panic("lowmem pte");
+       assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK)); /* make sure it is defined on page boundary */
+       pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)|INTEL_PTE_VALID|INTEL_PTE_REF|INTEL_PTE_MOD|INTEL_PTE_WIRED|INTEL_PTE_RW);
+       splx(s);
+       flush_tlb();
+
        simple_lock_init(&kernel_pmap->lock, 0);
-       simple_lock_init(&pv_free_list_lock, 0);
+       simple_lock_init(&pv_hashed_free_list_lock, 0);
+       simple_lock_init(&pv_hashed_kern_free_list_lock, 0);
+       simple_lock_init(&pv_hash_table_lock,0);
+
+       pmap_init_high_shared();
+
+       pde_mapped_size = PDE_MAPPED_SIZE;
+
+       if (cpu_64bit) {
+         pdpt_entry_t *ppdpt   = (pdpt_entry_t *)IdlePDPT;
+         pdpt_entry_t *ppdpt64 = (pdpt_entry_t *)IdlePDPT64;
+         pdpt_entry_t *ppml4   = (pdpt_entry_t *)IdlePML4;
+         int istate = ml_set_interrupts_enabled(FALSE);
+
+         /*
+          * Clone a new 64-bit 3rd-level page table directory, IdlePML4,
+          * with page bits set for the correct IA-32e operation and so that
+          * the legacy-mode IdlePDPT is retained for slave processor start-up.
+          * This is necessary due to the incompatible use of page bits between
+          * 64-bit and legacy modes.
+          */
+         kernel_pmap->pm_cr3 = (pmap_paddr_t)((int)IdlePML4); /* setup in start.s for us */
+         kernel_pmap->pm_pml4 = IdlePML4;
+         kernel_pmap->pm_pdpt = (pd_entry_t *)
+                                       ((unsigned int)IdlePDPT64 | KERNBASE );
+#define PAGE_BITS INTEL_PTE_VALID|INTEL_PTE_RW|INTEL_PTE_USER|INTEL_PTE_REF
+         pmap_store_pte(kernel_pmap->pm_pml4,
+                        (uint32_t)IdlePDPT64 | PAGE_BITS);
+         pmap_store_pte((ppdpt64+0), *(ppdpt+0) | PAGE_BITS);
+         pmap_store_pte((ppdpt64+1), *(ppdpt+1) | PAGE_BITS);
+         pmap_store_pte((ppdpt64+2), *(ppdpt+2) | PAGE_BITS);
+         pmap_store_pte((ppdpt64+3), *(ppdpt+3) | PAGE_BITS);
+
+         /*
+          * The kernel is also mapped in the uber-sapce at the 4GB starting
+          * 0xFFFFFF80:00000000. This is the highest entry in the 4th-level.
+          */
+         pmap_store_pte((ppml4+KERNEL_UBER_PML4_INDEX), *(ppml4+0));
+
+         kernel64_cr3 = (addr64_t) kernel_pmap->pm_cr3;
+
+         /* Re-initialize descriptors and prepare to switch modes */
+         cpu_desc_init64(&cpu_data_master, TRUE);
+         current_cpu_datap()->cpu_is64bit = TRUE;
+         current_cpu_datap()->cpu_active_cr3 = kernel64_cr3;
+
+         pde_mapped_size = 512*4096 ; 
+
+         ml_set_interrupts_enabled(istate);
+       }
+
+       /* Set 64-bit mode if required. */
+       cpu_mode_init(&cpu_data_master);
 
-       /* invalidate user virtual addresses */
-       memset((char *)kernel_pmap->dirbase,
-              0,
-              (KPTDI) * sizeof(pd_entry_t));
+       kernel_pmap->pm_hold = (vm_offset_t)kernel_pmap->pm_pml4;
 
        kprintf("Kernel virtual space from 0x%x to 0x%x.\n",
                        VADDR(KPTDI,0), virtual_end);
-#ifdef PAE
-       kprintf("Available physical space from 0x%llx to 0x%llx\n",
-                       avail_start, avail_end);
        printf("PAE enabled\n");
-#else
-       kprintf("Available physical space from 0x%x to 0x%x\n",
+       if (cpu_64bit){
+         printf("64 bit mode enabled\n");kprintf("64 bit mode enabled\n"); }
+
+       kprintf("Available physical space from 0x%llx to 0x%llx\n",
                        avail_start, avail_end);
-#endif
+
+       /*
+        * By default for 64-bit users loaded at 4GB, share kernel mapping.
+        * But this may be overridden by the -no_shared_cr3 boot-arg.
+        */
+       if (PE_parse_boot_arg("-no_shared_cr3", &no_shared_cr3)) {
+               kprintf("Shared kernel address space disabled\n");
+       }       
+
+#ifdef PMAP_TRACES
+       if (PE_parse_boot_arg("-pmap_trace", &pmap_trace)) {
+               kprintf("Kernel traces for pmap operations enabled\n");
+       }       
+#endif /* PMAP_TRACES */
 }
 
 void
@@ -852,7 +1358,7 @@ pmap_init(void)
        register long           npages;
        vm_offset_t             addr;
        register vm_size_t      s;
-       vm_offset_t vaddr;
+       vm_map_offset_t         vaddr;
        ppnum_t ppn;
 
        /*
@@ -860,11 +1366,16 @@ pmap_init(void)
         *      the modify bit array, and the pte_page table.
         */
 
-       /* zero bias all these arrays now instead of off avail_start
-          so we cover all memory */
+       /*
+        * zero bias all these arrays now instead of off avail_start
+        * so we cover all memory
+        */
+
        npages = i386_btop(avail_end);
-       s = (vm_size_t) (sizeof(struct pv_entry) * npages
-                               + pv_lock_table_size(npages)
+       s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
+                        + (sizeof (struct pv_hashed_entry_t *) * (npvhash+1))
+                        + pv_lock_table_size(npages)
+                        + pv_hash_lock_table_size((npvhash+1))
                                + npages);
 
        s = round_page(s);
@@ -873,16 +1384,47 @@ pmap_init(void)
 
        memset((char *)addr, 0, s);
 
+#if PV_DEBUG
+       if (0 == npvhash) panic("npvhash not initialized");
+#endif
+
        /*
         *      Allocate the structures first to preserve word-alignment.
         */
-       pv_head_table = (pv_entry_t) addr;
+       pv_head_table = (pv_rooted_entry_t) addr;
        addr = (vm_offset_t) (pv_head_table + npages);
 
+       pv_hash_table = (pv_hashed_entry_t *)addr;
+       addr = (vm_offset_t) (pv_hash_table + (npvhash + 1));
+
        pv_lock_table = (char *) addr;
        addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
 
+       pv_hash_lock_table = (char *) addr;
+       addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhash+1)));
+
        pmap_phys_attributes = (char *) addr;
+       {
+               unsigned int i;
+               unsigned int pn;
+               ppnum_t  last_pn;
+               pmap_memory_region_t *pmptr = pmap_memory_regions;
+
+               last_pn = i386_btop(avail_end);
+
+               for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
+                       if (pmptr->type == kEfiConventionalMemory) {
+                               for (pn = pmptr->base; pn <= pmptr->end; pn++) {
+                                       if (pn < last_pn) {
+                                               pmap_phys_attributes[pn] |= PHYS_MANAGED;
+
+                                               if (pn > last_managed_page)
+                                                       last_managed_page = pn;
+                                       }
+                               }
+                       }
+               }
+       }
 
        /*
         *      Create the zone of physical maps,
@@ -890,57 +1432,40 @@ pmap_init(void)
         */
        s = (vm_size_t) sizeof(struct pmap);
        pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
-       s = (vm_size_t) sizeof(struct pv_entry);
-       pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
-#ifdef PAE
-       //      s = (vm_size_t) (sizeof(pdpt_entry_t) * NPGPTD);
+       s = (vm_size_t) sizeof(struct pv_hashed_entry);
+       pv_hashed_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
        s = 63;
        pdpt_zone = zinit(s, 400*s, 4096, "pdpt"); /* XXX */
-#endif
 
-       /*
-        *      Only now, when all of the data structures are allocated,
-        *      can we set vm_first_phys and vm_last_phys.  If we set them
-        *      too soon, the kmem_alloc_wired above will try to use these
-        *      data structures and blow up.
-        */
-
-       /* zero bias this now so we cover all memory */
-       vm_first_phys = 0;
-       vm_last_phys = avail_end;
-
-#if GROW_KERNEL_FUNCTION_IMPLEMENTED
        kptobj = &kptobj_object_store;
-       _vm_object_allocate((vm_object_size_t)NKPDE, kptobj);
+       _vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG), kptobj);
        kernel_pmap->pm_obj = kptobj;
-#endif
 
        /* create pv entries for kernel pages mapped by low level
           startup code.  these have to exist so we can pmap_remove()
           e.g. kext pages from the middle of our addr space */
 
-       vaddr = (vm_offset_t)VM_MIN_KERNEL_ADDRESS;
+       vaddr = (vm_map_offset_t)0;
        for (ppn = 0; ppn < i386_btop(avail_start) ; ppn++ ) {
-         pv_entry_t    pv_e;
+         pv_rooted_entry_t     pv_e;
 
          pv_e = pai_to_pvh(ppn);
          pv_e->va = vaddr;
          vaddr += PAGE_SIZE;
          pv_e->pmap = kernel_pmap;
-         pv_e->next = PV_ENTRY_NULL;
+         queue_init(&pv_e->qlink);
        }
 
        pmap_initialized = TRUE;
 
        /*
-        *      Initializie pmap cache.
+        *      Initialize pmap cache.
         */
        pmap_cache_list = PMAP_NULL;
        pmap_cache_count = 0;
        simple_lock_init(&pmap_cache_lock, 0);
-#ifdef PMAP_QUEUE
-       simple_lock_init(&free_pmap_lock, 0);
-#endif
+
+       max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t);
 
 }
 
@@ -952,42 +1477,82 @@ x86_lowmem_free(void)
           the actual pages that are released are determined by which
           pages the memory sizing code puts into the region table */
 
-       ml_static_mfree((vm_offset_t) i386_ptob(pmap_memory_regions[0].base)|VM_MIN_KERNEL_ADDRESS,
+       ml_static_mfree((vm_offset_t) i386_ptob(pmap_memory_regions[0].base),
                        (vm_size_t) i386_ptob(pmap_memory_regions[0].end - pmap_memory_regions[0].base));
 }
 
 
-#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
+#define managed_page(x) ( (unsigned int)x <= last_managed_page && (pmap_phys_attributes[x] & PHYS_MANAGED) )
 
+/*
+ * this function is only used for debugging fron the vm layer
+ */
 boolean_t
 pmap_verify_free(
                 ppnum_t pn)
 {
-        pmap_paddr_t   phys;
-       pv_entry_t      pv_h;
+       pv_rooted_entry_t       pv_h;
        int             pai;
-       spl_t           spl;
        boolean_t       result;
 
        assert(pn != vm_page_fictitious_addr);
-       phys = (pmap_paddr_t)i386_ptob(pn);
+
        if (!pmap_initialized)
                return(TRUE);
 
-       if (!pmap_valid_page(pn))
-               return(FALSE);
+       if (pn == vm_page_guard_addr)
+               return TRUE;
 
-       PMAP_WRITE_LOCK(spl);
+       pai = ppn_to_pai(pn);
+       if (!managed_page(pai))
+               return(FALSE);
+       pv_h = pai_to_pvh(pn);
+       result = (pv_h->pmap == PMAP_NULL);
+       return(result);
+}
 
-       pai = pa_index(phys);
-       pv_h = pai_to_pvh(pai);
+boolean_t
+pmap_is_empty(
+       pmap_t          pmap,
+       vm_map_offset_t vstart,
+       vm_map_offset_t vend)
+{
+       vm_map_offset_t offset;
+       ppnum_t         phys_page;
 
-       result = (pv_h->pmap == PMAP_NULL);
-       PMAP_WRITE_UNLOCK(spl);
+       if (pmap == PMAP_NULL) {
+               return TRUE;
+       }
+       for (offset = vstart;
+            offset < vend;
+            offset += PAGE_SIZE_64) {
+               phys_page = pmap_find_phys(pmap, offset);
+               if (phys_page) {
+                       if (pmap != kernel_pmap &&
+                           pmap->pm_task_map == TASK_MAP_32BIT &&
+                           offset >= HIGH_MEM_BASE) {
+                               /*
+                                * The "high_shared_pde" is used to share
+                                * the entire top-most 2MB of address space
+                                * between the kernel and all 32-bit tasks.
+                                * So none of this can be removed from 32-bit
+                                * tasks.
+                                * Let's pretend there's nothing up
+                                * there...
+                                */
+                               return TRUE;
+                       }
+                       kprintf("pmap_is_empty(%p,0x%llx,0x%llx): "
+                               "page %d at 0x%llx\n",
+                               pmap, vstart, vend, phys_page, offset);
+                       return FALSE;
+               }
+       }
 
-       return(result);
+       return TRUE;
 }
 
+
 /*
  *     Create and return a physical map.
  *
@@ -1002,15 +1567,23 @@ pmap_verify_free(
  */
 pmap_t
 pmap_create(
-       vm_size_t       size)
+           vm_map_size_t       sz,
+           boolean_t           is_64bit)
 {
-  register pmap_t                      p;
-#ifdef PMAP_QUEUE
-  register pmap_t pro;
-  spl_t s;
-#endif
-       register int i;
-       register vm_offset_t va;
+       pmap_t                  p;
+       int             i;
+       vm_offset_t     va;
+       vm_size_t       size;
+       pdpt_entry_t    *pdpt;
+       pml4_entry_t    *pml4p;
+       pd_entry_t      *pdp;
+       int template;
+       spl_t s;
+
+       PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
+                  (int) (sz>>32), (int) sz, (int) is_64bit, 0, 0);
+
+       size = (vm_size_t) sz;
 
        /*
         *      A software use-only map doesn't even need a map.
@@ -1022,62 +1595,212 @@ pmap_create(
 
        p = (pmap_t) zalloc(pmap_zone);
        if (PMAP_NULL == p)
-         panic("pmap_create zalloc");
-       if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->dirbase), NBPTD))
-         panic("pmap_create kmem_alloc_wired");
-#ifdef PAE
-       p->pm_hold = (vm_offset_t)zalloc(pdpt_zone);
-       if ((vm_offset_t)NULL == p->pm_hold) {
-         panic("pdpt zalloc");
-       }
-       p->pm_pdpt = (pdpt_entry_t *) (( p->pm_hold + 31) & ~31);
-       p->pm_ppdpt = kvtophys((vm_offset_t)p->pm_pdpt);  /* XXX */
-#endif
-       if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPGPTD*NPDEPG))))
-         panic("pmap_create vm_object_allocate");
-       memcpy(p->dirbase, 
-              (void *)((unsigned int)IdlePTD | KERNBASE),
-              NBPTD);
-       va = (vm_offset_t)p->dirbase;
-       p->pdirbase = (pd_entry_t *)(kvtophys(va));
-       simple_lock_init(&p->lock, 0);
-
-       /* setup self referential mapping(s) */
-       for (i = 0; i< NPGPTD; i++ ) {
-         pmap_paddr_t pa;
-         pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i));
-         * (pd_entry_t *) (p->dirbase + PTDPTDI + i) = 
-           (pa & PG_FRAME) | INTEL_PTE_VALID | INTEL_PTE_RW | INTEL_PTE_REF |
-           INTEL_PTE_MOD | INTEL_PTE_WIRED ;
-#ifdef PAE
-         p->pm_pdpt[i] = pa | INTEL_PTE_VALID;
-#endif
-       }
+               panic("pmap_create zalloc");
 
-       p->cpus_using = 0;
+       /* init counts now since we'll be bumping some */
+       simple_lock_init(&p->lock, 0);
        p->stats.resident_count = 0;
+       p->stats.resident_max = 0;
        p->stats.wired_count = 0;
        p->ref_count = 1;
+       p->nx_enabled = 1;
+       p->pm_shared = FALSE;
+
+       assert(!is_64bit || cpu_64bit);
+       p->pm_task_map = is_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT;;
+
+       if (!cpu_64bit) {
+               /* legacy 32 bit setup */
+               /* in the legacy case the pdpt layer is hardwired to 4 entries and each
+                * entry covers 1GB of addr space */
+               if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->dirbase), NBPTD))
+                       panic("pmap_create kmem_alloc_wired");
+               p->pm_hold = (vm_offset_t)zalloc(pdpt_zone);
+               if ((vm_offset_t)NULL == p->pm_hold) {
+                       panic("pdpt zalloc");
+               }
+               pdpt = (pdpt_entry_t *) (( p->pm_hold + 31) & ~31);
+               p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)pdpt);
+               if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG))))
+                       panic("pmap_create vm_object_allocate");
 
-#ifdef PMAP_QUEUE
-       /* insert new pmap at head of queue hanging off kernel_pmap */
-       SPLVM(s);
-       simple_lock(&free_pmap_lock);
-       p->pmap_link.next = (queue_t)kernel_pmap->pmap_link.next;
-       kernel_pmap->pmap_link.next = (queue_t)p;
+               memset((char *)p->dirbase, 0, NBPTD);
 
-       pro = (pmap_t) p->pmap_link.next;
-       p->pmap_link.prev = (queue_t)pro->pmap_link.prev;
-       pro->pmap_link.prev = (queue_t)p;
+               va = (vm_offset_t)p->dirbase;
+               p->pdirbase = kvtophys(va);
 
-       
-       simple_unlock(&free_pmap_lock);
-       SPLX(s);
-#endif
+               template = cpu_64bit ? INTEL_PTE_VALID|INTEL_PTE_RW|INTEL_PTE_USER|INTEL_PTE_REF : INTEL_PTE_VALID;
+               for (i = 0; i< NPGPTD; i++, pdpt++ ) {
+                       pmap_paddr_t pa;
+                       pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i));
+                       pmap_store_pte(pdpt, pa | template);
+               }
+
+               /* map the high shared pde */
+               s = splhigh();
+               pmap_store_pte(pmap_pde(p, HIGH_MEM_BASE), high_shared_pde);
+               splx(s);
+
+       } else {
+               /* 64 bit setup  */
+
+               /* alloc the pml4 page in kernel vm */
+               if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->pm_hold), PAGE_SIZE))
+                       panic("pmap_create kmem_alloc_wired pml4");
+
+               memset((char *)p->pm_hold, 0, PAGE_SIZE);
+               p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_hold);
+
+               vm_page_lock_queues();
+               inuse_ptepages_count++;
+               vm_page_unlock_queues();
+
+               /* allocate the vm_objs to hold the pdpt, pde and pte pages */
+
+               if (NULL == (p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS))))
+                       panic("pmap_create pdpt obj");
+
+               if (NULL == (p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS))))
+                       panic("pmap_create pdpt obj");
+
+               if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS))))
+                       panic("pmap_create pte obj");
+
+               /* uber space points to uber mapped kernel */
+               s = splhigh();
+               pml4p = pmap64_pml4(p, 0ULL);
+               pmap_store_pte((pml4p+KERNEL_UBER_PML4_INDEX),*kernel_pmap->pm_pml4);
+
+
+               if (!is_64bit) {
+                       while ((pdp = pmap64_pde(p, (uint64_t)HIGH_MEM_BASE)) == PD_ENTRY_NULL) {
+                               splx(s);
+                               pmap_expand_pdpt(p, (uint64_t)HIGH_MEM_BASE); /* need room for another pde entry */
+                               s = splhigh();
+                       }
+                       pmap_store_pte(pdp, high_shared_pde);
+               }
+               splx(s);
+       }
+
+       PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
+                  (int) p, is_64bit, 0, 0, 0);
 
        return(p);
 }
 
+/*
+ * The following routines implement the shared address optmization for 64-bit
+ * users with a 4GB page zero.
+ *
+ * pmap_set_4GB_pagezero()
+ *     is called in the exec and fork paths to mirror the kernel's
+ *     mapping in the bottom 4G of the user's pmap. The task mapping changes
+ *     from TASK_MAP_64BIT to TASK_MAP_64BIT_SHARED. This routine returns
+ *     without doing anything if the -no_shared_cr3 boot-arg is set.
+ *
+ * pmap_clear_4GB_pagezero()
+ *     is called in the exec/exit paths to undo this mirror. The task mapping
+ *     reverts to TASK_MAP_64BIT. In addition, we switch to the kernel's
+ *     CR3 by calling pmap_load_kernel_cr3(). 
+ *
+ * pmap_load_kernel_cr3()
+ *     loads cr3 with the kernel's page table. In addition to being called
+ *     by pmap_clear_4GB_pagezero(), it is used both prior to teardown and
+ *     when we go idle in the context of a shared map.
+ *
+ * Further notes on per-cpu data used:
+ *
+ *     cpu_kernel_cr3  is the cr3 for the kernel's pmap.
+ *                     This is loaded in a trampoline on entering the kernel
+ *                     from a 32-bit user (or non-shared-cr3 64-bit user).
+ *     cpu_task_cr3    is the cr3 for the current thread.
+ *                     This is loaded in a trampoline as we exit the kernel.
+ *     cpu_active_cr3  reflects the cr3 currently loaded.
+ *                     However, the low order bit is set when the
+ *                     processor is idle or interrupts are disabled
+ *                     while the system pmap lock is held. It is used by
+ *                     tlb shoot-down.
+ *     cpu_task_map    indicates whether the task cr3 belongs to
+ *                     a 32-bit, a 64-bit or a 64-bit shared map.
+ *                     The latter allows the avoidance of the cr3 load
+ *                     on kernel entry and exit.
+ *     cpu_tlb_invalid set TRUE when a tlb flush is requested.
+ *                     If the cr3 is "inactive" (the cpu is idle or the
+ *                     system-wide pmap lock is held) this not serviced by
+ *                     an IPI but at time when the cr3 becomes "active".
+ */ 
+
+void
+pmap_set_4GB_pagezero(pmap_t p)
+{
+       pdpt_entry_t    *user_pdptp;
+       pdpt_entry_t    *kern_pdptp;
+
+       assert(p->pm_task_map != TASK_MAP_32BIT);
+
+       /* Kernel-shared cr3 may be disabled by boot arg. */
+       if (no_shared_cr3)
+               return;
+
+       /*
+        * Set the bottom 4 3rd-level pte's to be the kernel's.
+        */
+       PMAP_LOCK(p);
+       while ((user_pdptp = pmap64_pdpt(p, 0x0)) == PDPT_ENTRY_NULL) {
+               PMAP_UNLOCK(p);
+               pmap_expand_pml4(p, 0x0);
+               PMAP_LOCK(p);
+       }
+       kern_pdptp = kernel_pmap->pm_pdpt;
+       pmap_store_pte(user_pdptp+0, *(kern_pdptp+0));
+       pmap_store_pte(user_pdptp+1, *(kern_pdptp+1));
+       pmap_store_pte(user_pdptp+2, *(kern_pdptp+2));
+       pmap_store_pte(user_pdptp+3, *(kern_pdptp+3));
+       p->pm_task_map = TASK_MAP_64BIT_SHARED;
+       PMAP_UNLOCK(p);
+}
+
+void
+pmap_clear_4GB_pagezero(pmap_t p)
+{
+       pdpt_entry_t    *user_pdptp;
+
+       if (p->pm_task_map != TASK_MAP_64BIT_SHARED)
+               return;
+
+       PMAP_LOCK(p);
+
+       p->pm_task_map = TASK_MAP_64BIT;
+
+       pmap_load_kernel_cr3();
+
+       user_pdptp = pmap64_pdpt(p, 0x0);
+       pmap_store_pte(user_pdptp+0, 0);
+       pmap_store_pte(user_pdptp+1, 0);
+       pmap_store_pte(user_pdptp+2, 0);
+       pmap_store_pte(user_pdptp+3, 0);
+
+       PMAP_UNLOCK(p);
+}
+
+void
+pmap_load_kernel_cr3(void)
+{
+       uint64_t        kernel_cr3;
+
+       assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+       /*
+        * Reload cr3 with the true kernel cr3.
+        */
+       kernel_cr3 = current_cpu_datap()->cpu_kernel_cr3;
+       set64_cr3(kernel_cr3);
+       current_cpu_datap()->cpu_active_cr3 = kernel_cr3;
+       current_cpu_datap()->cpu_tlb_invalid = FALSE;
+       __asm__ volatile("mfence");
+}
+
 /*
  *     Retire the given physical map from service.
  *     Should only be called if the map contains
@@ -1088,108 +1811,77 @@ void
 pmap_destroy(
        register pmap_t p)
 {
-       register pt_entry_t     *pdep;
        register int            c;
-       spl_t                   s;
-       register vm_page_t      m;
-#ifdef PMAP_QUEUE
-       register pmap_t        pre,pro;
-#endif
 
        if (p == PMAP_NULL)
                return;
 
-       SPLVM(s);
-       simple_lock(&p->lock);
-       c = --p->ref_count;
-       if (c == 0) {
-               register int    my_cpu;
+       PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START,
+                  (int) p, 0, 0, 0, 0);
+
+       PMAP_LOCK(p);
 
-               mp_disable_preemption();
-               my_cpu = cpu_number();
+       c = --p->ref_count;
 
+       if (c == 0) {
                /* 
                 * If some cpu is not using the physical pmap pointer that it
                 * is supposed to be (see set_dirbase), we might be using the
                 * pmap that is being destroyed! Make sure we are
                 * physically on the right pmap:
                 */
-               /* force pmap/cr3 update */
                PMAP_UPDATE_TLBS(p,
-                                VM_MIN_ADDRESS,
-                                VM_MAX_KERNEL_ADDRESS);
-
-               if (PMAP_REAL(my_cpu) == p) {
-                       PMAP_CPU_CLR(p, my_cpu);
-                       PMAP_REAL(my_cpu) = kernel_pmap;
-#ifdef PAE
-                       set_cr3((unsigned int)kernel_pmap->pm_ppdpt);
-#else
-                       set_cr3((unsigned int)kernel_pmap->pdirbase);
-#endif
-               }
-               mp_enable_preemption();
+                                0x0ULL,
+                                0xFFFFFFFFFFFFF000ULL);
        }
-       simple_unlock(&p->lock);
-       SPLX(s);
+
+       PMAP_UNLOCK(p);
 
        if (c != 0) {
-           return;     /* still in use */
+               PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
+                          (int) p, 1, 0, 0, 0);
+               return; /* still in use */
        }
 
-#ifdef PMAP_QUEUE
-       /* remove from pmap queue */
-       SPLVM(s);
-       simple_lock(&free_pmap_lock);
-
-       pre = (pmap_t)p->pmap_link.prev;
-       pre->pmap_link.next = (queue_t)p->pmap_link.next;
-       pro = (pmap_t)p->pmap_link.next;
-       pro->pmap_link.prev = (queue_t)p->pmap_link.prev;
-
-       simple_unlock(&free_pmap_lock);
-       SPLX(s);
-#endif
-
        /*
         *      Free the memory maps, then the
         *      pmap structure.
         */
+       if (!cpu_64bit) {
+               vm_page_lock_queues();
+               inuse_ptepages_count -= p->pm_obj->resident_page_count;
+               vm_page_unlock_queues();
 
-       pdep = (pt_entry_t *)p->dirbase;
+               kmem_free(kernel_map, (vm_offset_t)p->dirbase, NBPTD);
+               zfree(pdpt_zone, (void *)p->pm_hold);
+
+               vm_object_deallocate(p->pm_obj);
+       } else {
+               /* 64 bit */
+               int inuse_ptepages = 0;
+
+               /* free 64 bit mode structs */
+               inuse_ptepages++;
+               kmem_free(kernel_map, (vm_offset_t)p->pm_hold, PAGE_SIZE);
+
+               inuse_ptepages += p->pm_obj_pml4->resident_page_count;
+               vm_object_deallocate(p->pm_obj_pml4);
+
+               inuse_ptepages += p->pm_obj_pdpt->resident_page_count;
+               vm_object_deallocate(p->pm_obj_pdpt);
+
+               inuse_ptepages += p->pm_obj->resident_page_count;
+               vm_object_deallocate(p->pm_obj);
 
-       while (pdep < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)]) {
-         int ind;
-           if (*pdep & INTEL_PTE_VALID) {
-             ind = pdep - (pt_entry_t *)&p->dirbase[0];
-               vm_object_lock(p->pm_obj);
-               m = vm_page_lookup(p->pm_obj, (vm_object_offset_t)ind);
-               if (m == VM_PAGE_NULL) {
-                   panic("pmap_destroy: pte page not in object");
-               }
                vm_page_lock_queues();
-               vm_page_free(m);
-               inuse_ptepages_count--;
-               vm_object_unlock(p->pm_obj);
+               inuse_ptepages_count -= inuse_ptepages;
                vm_page_unlock_queues();
-
-               /*
-                *      Clear pdes, this might be headed for the cache.
-                */
-               *pdep++ = 0;
-           }
-           else {
-             *pdep++ = 0;
-           }
-       
        }
-
-       vm_object_deallocate(p->pm_obj);
-       kmem_free(kernel_map, (vm_offset_t)p->dirbase, NBPTD);
-#ifdef PAE
-       zfree(pdpt_zone, (void *)p->pm_hold);
-#endif
        zfree(pmap_zone, p);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
+
 }
 
 /*
@@ -1200,14 +1892,11 @@ void
 pmap_reference(
        register pmap_t p)
 {
-       spl_t   s;
 
        if (p != PMAP_NULL) {
-               SPLVM(s);
-               simple_lock(&p->lock);
+               PMAP_LOCK(p);
                p->ref_count++;
-               simple_unlock(&p->lock);
-               SPLX(s);
+               PMAP_UNLOCK(p);;
        }
 }
 
@@ -1223,113 +1912,218 @@ pmap_reference(
  *     Assumes that the pte-page exists.
  */
 
-/* static */
 void
 pmap_remove_range(
        pmap_t                  pmap,
-       vm_offset_t             va,
+       vm_map_offset_t         start_vaddr,
        pt_entry_t              *spte,
        pt_entry_t              *epte)
 {
        register pt_entry_t     *cpte;
-       int                     num_removed, num_unwired;
+       pv_hashed_entry_t       pvh_et = PV_HASHED_ENTRY_NULL;
+       pv_hashed_entry_t       pvh_eh = PV_HASHED_ENTRY_NULL;
+       pv_hashed_entry_t       pvh_e;
+       int                     pvh_cnt = 0;
+       int                     num_removed, num_unwired, num_found;
        int                     pai;
        pmap_paddr_t            pa;
+       vm_map_offset_t         vaddr;
+       int                     pvhash_idx;
+       uint32_t                pv_cnt;
 
-#if    DEBUG_PTE_PAGE
-       if (pmap != kernel_pmap)
-               ptep_check(get_pte_page(spte));
-#endif /* DEBUG_PTE_PAGE */
        num_removed = 0;
        num_unwired = 0;
+       num_found   = 0;
+
+       if (pmap != kernel_pmap &&
+           pmap->pm_task_map == TASK_MAP_32BIT &&
+           start_vaddr >= HIGH_MEM_BASE) {
+               /*
+                * The range is in the "high_shared_pde" which is shared
+                * between the kernel and all 32-bit tasks.  It holds
+                * the 32-bit commpage but also the trampolines, GDT, etc...
+                * so we can't let user tasks remove anything from it.
+                */
+               return;
+       }
 
-       for (cpte = spte; cpte < epte;
-            cpte++, va += PAGE_SIZE) {
+       /* invalidate the PTEs first to "freeze" them */
+       for (cpte = spte, vaddr = start_vaddr;
+            cpte < epte;
+            cpte++, vaddr += PAGE_SIZE_64) {
 
            pa = pte_to_pa(*cpte);
            if (pa == 0)
                continue;
+           num_found++;
 
-           num_removed++;
            if (iswired(*cpte))
                num_unwired++;
 
-           if (!valid_page(i386_btop(pa))) {
+           pai = pa_index(pa);
 
+           if (!managed_page(pai)) {
                /*
                 *      Outside range of managed physical memory.
                 *      Just remove the mappings.
                 */
-               register pt_entry_t     *lpte = cpte;
-
-               *lpte = 0;
+               pmap_store_pte(cpte, 0);
                continue;
            }
 
+           /* invalidate the PTE */ 
+           pmap_update_pte(cpte, *cpte, (*cpte & ~INTEL_PTE_VALID));
+       }
+
+       if (num_found == 0) {
+               /* nothing was changed: we're done */
+               goto update_counts;
+       }
+
+       /* propagate the invalidates to other CPUs */
+
+       PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
+
+       for (cpte = spte, vaddr = start_vaddr;
+            cpte < epte;
+            cpte++, vaddr += PAGE_SIZE_64) {
+
+           pa = pte_to_pa(*cpte);
+           if (pa == 0)
+               continue;
+
            pai = pa_index(pa);
+
            LOCK_PVH(pai);
 
+           pa = pte_to_pa(*cpte);
+           if (pa == 0) {
+             UNLOCK_PVH(pai);
+             continue;
+           }
+             
+           num_removed++;
+
            /*
-            *  Get the modify and reference bits.
+            *  Get the modify and reference bits, then
+            *  nuke the entry in the page table
             */
-           {
-               register pt_entry_t     *lpte;
-
-               lpte = cpte;
-                   pmap_phys_attributes[pai] |=
-                       *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
-                   *lpte = 0;
-
-           }
+           /* remember reference and change */
+           pmap_phys_attributes[pai] |=
+                   (char)(*cpte & (PHYS_MODIFIED | PHYS_REFERENCED));
+           /* completely invalidate the PTE */
+           pmap_store_pte(cpte, 0);
 
            /*
             *  Remove the mapping from the pvlist for
             *  this physical page.
             */
            {
-               register pv_entry_t     pv_h, prev, cur;
+             pv_rooted_entry_t pv_h;
+             pv_hashed_entry_t *pprevh;
+             ppnum_t ppn = (ppnum_t)pai;
 
                pv_h = pai_to_pvh(pai);
-               if (pv_h->pmap == PMAP_NULL) {
-                   panic("pmap_remove: null pv_list!");
-               }
-               if (pv_h->va == va && pv_h->pmap == pmap) {
+               pvh_e = PV_HASHED_ENTRY_NULL;
+               if (pv_h->pmap == PMAP_NULL)
+                   panic("pmap_remove_range: null pv_list!");
+
+               if (pv_h->va == vaddr && pv_h->pmap == pmap) { /* rooted or not */
                    /*
-                    * Header is the pv_entry.  Copy the next one
-                    * to header and free the next one (we cannot
-                    * free the header)
+                    * Header is the pv_rooted_entry. We can't free that. If there is a queued
+                    * entry after this one we remove that
+                    * from the ppn queue, we remove it from the hash chain
+                    * and copy it to the rooted entry. Then free it instead.
                     */
-                   cur = pv_h->next;
-                   if (cur != PV_ENTRY_NULL) {
-                       *pv_h = *cur;
-                       PV_FREE(cur);
+
+                 pvh_e = (pv_hashed_entry_t)queue_next(&pv_h->qlink);
+                 if (pv_h != (pv_rooted_entry_t)pvh_e) {  /* any queued after rooted? */
+                   CHK_NPVHASH();
+                   pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
+                   LOCK_PV_HASH(pvhash_idx);
+                   remque(&pvh_e->qlink);
+                   {
+                     pprevh = pvhash(pvhash_idx);
+                     if (PV_HASHED_ENTRY_NULL == *pprevh) {
+                       panic("pmap_remove_range empty hash removing rooted pv");
+                     }
                    }
-                   else {
-                       pv_h->pmap = PMAP_NULL;
+                   pmap_pvh_unlink(pvh_e);
+                   UNLOCK_PV_HASH(pvhash_idx);
+                   pv_h->pmap = pvh_e->pmap;
+                   pv_h->va = pvh_e->va;   /* dispose of pvh_e */
+                 } else {  /* none queued after rooted */
+                   pv_h->pmap = PMAP_NULL;
+                   pvh_e = PV_HASHED_ENTRY_NULL;
+                 }   /* any queued after rooted */
+
+               } else { /* rooted or not */
+                 /* not removing rooted pv. find it on hash chain, remove from ppn queue and
+                  * hash chain and free it */
+                 CHK_NPVHASH();
+                 pvhash_idx = pvhashidx(pmap,vaddr);
+                 LOCK_PV_HASH(pvhash_idx);
+                 pprevh = pvhash(pvhash_idx);
+                 if (PV_HASHED_ENTRY_NULL == *pprevh) {
+                   panic("pmap_remove_range empty hash removing hashed pv");
                    }
-               }
-               else {
-                   cur = pv_h;
-                   do {
-                       prev = cur;
-                       if ((cur = prev->next) == PV_ENTRY_NULL) {
-                         panic("pmap-remove: mapping not in pv_list!");
-                       }
-                   } while (cur->va != va || cur->pmap != pmap);
-                   prev->next = cur->next;
-                   PV_FREE(cur);
-               }
+                 pvh_e = *pprevh;
+                 pmap_pv_hashlist_walks++;
+                 pv_cnt = 0;
+                 while (PV_HASHED_ENTRY_NULL != pvh_e) {
+                       pv_cnt++;
+                       if (pvh_e->pmap == pmap && pvh_e->va == vaddr && pvh_e->ppn == ppn) break;
+                       pprevh = &pvh_e->nexth;
+                       pvh_e = pvh_e->nexth;
+                 }
+                 pmap_pv_hashlist_cnts += pv_cnt;
+                 if (pmap_pv_hashlist_max < pv_cnt) pmap_pv_hashlist_max = pv_cnt;
+                 if (PV_HASHED_ENTRY_NULL == pvh_e) panic("pmap_remove_range pv not on hash");
+                 *pprevh = pvh_e->nexth;
+                 remque(&pvh_e->qlink);
+                 UNLOCK_PV_HASH(pvhash_idx);
+
+               } /* rooted or not */
+
                UNLOCK_PVH(pai);
-           }
+
+               if (pvh_e != PV_HASHED_ENTRY_NULL) {
+                 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                 pvh_eh = pvh_e;
+
+                 if (pvh_et == PV_HASHED_ENTRY_NULL) {
+                   pvh_et = pvh_e;
+                 }
+
+                 pvh_cnt++;
+               }
+
+           } /* removing mappings for this phy page */
+       } /* for loop */
+       
+       if (pvh_eh != PV_HASHED_ENTRY_NULL) {
+           PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
        }
 
+update_counts:
        /*
         *      Update the counts
         */
+#if TESTING
+       if (pmap->stats.resident_count < num_removed)
+               panic("pmap_remove_range: resident_count");
+#endif
        assert(pmap->stats.resident_count >= num_removed);
-       pmap->stats.resident_count -= num_removed;
+       OSAddAtomic(-num_removed, (SInt32 *) &pmap->stats.resident_count);
+
+#if TESTING
+       if (pmap->stats.wired_count < num_unwired)
+               panic("pmap_remove_range: wired_count");
+#endif
        assert(pmap->stats.wired_count >= num_unwired);
-       pmap->stats.wired_count -= num_unwired;
+       OSAddAtomic(-num_unwired, (SInt32 *) &pmap->stats.wired_count);
+
+       return;
 }
 
 /*
@@ -1361,44 +2155,91 @@ pmap_remove(
        addr64_t        s64,
        addr64_t        e64)
 {
-       spl_t                   spl;
-       register pt_entry_t     *pde;
-       register pt_entry_t     *spte, *epte;
-       vm_offset_t             l;
-       vm_offset_t    s, e;
-       vm_offset_t    orig_s;
+       pt_entry_t      *pde;
+       pt_entry_t      *spte, *epte;
+       addr64_t        l64;
+       addr64_t        orig_s64;
+       uint64_t        deadline;
 
-       if (map == PMAP_NULL)
+       pmap_intr_assert();
+
+       if (map == PMAP_NULL || s64 == e64)
                return;
+       PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START,
+                  (int) map,
+                  (int) (s64>>32), (int) s64,
+                  (int) (e64>>32), (int) e64);
 
-       PMAP_READ_LOCK(map, spl);
+       PMAP_LOCK(map);
+
+#if 0
+       /*
+        * Check that address range in the kernel does not overlap the stacks.
+        * We initialize local static min/max variables once to avoid making
+        * 2 function calls for every remove. Note also that these functions
+        * both return 0 before kernel stacks have been initialized, and hence
+        * the panic is not triggered in this case.
+        */
+       if (map == kernel_pmap) {
+               static vm_offset_t      kernel_stack_min = 0;
+               static vm_offset_t      kernel_stack_max = 0;
 
-       if (value_64bit(s64) || value_64bit(e64)) {
-         panic("pmap_remove addr overflow");
+               if (kernel_stack_min == 0) {
+                       kernel_stack_min = min_valid_stack_address();
+                       kernel_stack_max = max_valid_stack_address();
+               }
+               if  ((kernel_stack_min <= s64 && s64 <  kernel_stack_max) ||
+                    (kernel_stack_min <  e64 && e64 <= kernel_stack_max))
+                       panic("pmap_remove() attempted in kernel stack");
        }
+#else
+
+       /*
+        * The values of kernel_stack_min and kernel_stack_max are no longer
+        * relevant now that we allocate kernel stacks anywhere in the kernel map,
+        * so the old code above no longer applies.  If we wanted to check that
+        * we weren't removing a mapping of a page in a kernel stack we'd have to
+        * mark the PTE with an unused bit and check that here.
+        */
+
+#endif
+
+       deadline = rdtsc64() + max_preemption_latency_tsc;
+
+       orig_s64 = s64;
+
+       while (s64 < e64) {
 
-       orig_s = s = (vm_offset_t)low32(s64);
-       e = (vm_offset_t)low32(e64);
+           l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size-1);
+           if (l64 > e64)
+               l64 = e64;
+           pde = pmap_pde(map, s64);
 
-       pde = pmap_pde(map, s);
+           if (pde && (*pde & INTEL_PTE_VALID)) {
+               spte = (pt_entry_t *)pmap_pte(map, (s64 & ~(pde_mapped_size-1)));
+               spte = &spte[ptenum(s64)];
+               epte = &spte[intel_btop(l64-s64)];
 
-       while (s < e) {
-           l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
-           if (l > e)
-               l = e;
-           if (*pde & INTEL_PTE_VALID) {
-             spte = (pt_entry_t *)pmap_pte(map, (s & ~(PDE_MAPPED_SIZE-1)));
-               spte = &spte[ptenum(s)];
-               epte = &spte[intel_btop(l-s)];
-               pmap_remove_range(map, s, spte, epte);
+               pmap_remove_range(map, s64, spte, epte);
            }
-           s = l;
+           s64 = l64;
            pde++;
+
+           if (s64 < e64 && rdtsc64() >= deadline) {
+             PMAP_UNLOCK(map)
+               PMAP_LOCK(map)
+
+             deadline = rdtsc64() + max_preemption_latency_tsc;
+           }
+
        }
 
-       PMAP_UPDATE_TLBS(map, orig_s, e);
+       PMAP_UNLOCK(map);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_END,
+                  (int) map, 0, 0, 0, 0);
 
-       PMAP_READ_UNLOCK(map, spl);
 }
 
 /*
@@ -1413,24 +2254,36 @@ pmap_page_protect(
         ppnum_t         pn,
        vm_prot_t       prot)
 {
-       pv_entry_t              pv_h, prev;
-       register pv_entry_t     pv_e;
-       register pt_entry_t     *pte;
+       pv_hashed_entry_t               pvh_eh = PV_HASHED_ENTRY_NULL;
+       pv_hashed_entry_t               pvh_et = PV_HASHED_ENTRY_NULL;
+       pv_hashed_entry_t       nexth;
+       int                     pvh_cnt = 0;
+       pv_rooted_entry_t               pv_h;
+       pv_rooted_entry_t               pv_e;
+       pv_hashed_entry_t       pvh_e;
+       pt_entry_t              *pte;
        int                     pai;
        register pmap_t         pmap;
-       spl_t                   spl;
        boolean_t               remove;
-       pmap_paddr_t             phys;
+       int                     pvhash_idx;
 
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
-       phys = (pmap_paddr_t)i386_ptob(pn);
-       if (!valid_page(pn)) {
+       if (pn == vm_page_guard_addr)
+               return;
+
+       pai = ppn_to_pai(pn);
+
+       if (!managed_page(pai)) {
            /*
             *  Not a managed page.
             */
            return;
        }
 
+       PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_START,
+                  (int) pn, (int) prot, 0, 0, 0);
+
        /*
         * Determine the new protection.
         */
@@ -1446,45 +2299,32 @@ pmap_page_protect(
                break;
        }
 
-       /*
-        *      Lock the pmap system first, since we will be changing
-        *      several pmaps.
-        */
-
-       PMAP_WRITE_LOCK(spl);
-
-       pai = pa_index(phys);
        pv_h = pai_to_pvh(pai);
 
+       LOCK_PVH(pai);
+
        /*
         * Walk down PV list, changing or removing all mappings.
-        * We do not have to lock the pv_list because we have
-        * the entire pmap system locked.
         */
        if (pv_h->pmap != PMAP_NULL) {
 
-           prev = pv_e = pv_h;
+           pv_e = pv_h;
+           pvh_e = (pv_hashed_entry_t)pv_e; /* cheat */
+
            do {
-             register vm_offset_t va;
+               register vm_map_offset_t vaddr;
                pmap = pv_e->pmap;
-               /*
-                * Lock the pmap to block pmap_extract and similar routines.
-                */
-               simple_lock(&pmap->lock);
-
-               {
-
-                   va = pv_e->va;
-                   pte = pmap_pte(pmap, va);
-
-                   /*
-                    * Consistency checks.
-                    */
-                   /* assert(*pte & INTEL_PTE_VALID); XXX */
-                   /* assert(pte_to_phys(*pte) == phys); */
 
+               vaddr = pv_e->va;
+               pte = pmap_pte(pmap, vaddr);
+               
+               if (0 == pte) {
+                   kprintf("pmap_page_protect pmap %p pn 0x%x vaddr 0x%llx\n",pmap, pn, vaddr);
+                   panic("pmap_page_protect");
                }
 
+               nexth = (pv_hashed_entry_t)queue_next(&pvh_e->qlink);  /* if there is one */
+
                /*
                 * Remove the mapping if new protection is NONE
                 * or if write-protecting a kernel mapping.
@@ -1493,19 +2333,25 @@ pmap_page_protect(
                    /*
                     * Remove the mapping, collecting any modify bits.
                     */
-                   {
-                           pmap_phys_attributes[pai] |=
-                               *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-                           *pte++ = 0;
-                           PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
-                   }
+                   pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
 
+                   PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+
+                   pmap_phys_attributes[pai] |= *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+
+                   pmap_store_pte(pte, 0);
+
+#if TESTING
+                   if (pmap->stats.resident_count < 1)
+                       panic("pmap_page_protect: resident_count");
+#endif
                    assert(pmap->stats.resident_count >= 1);
-                   pmap->stats.resident_count--;
+                   OSAddAtomic(-1, (SInt32 *) &pmap->stats.resident_count);
 
                    /*
-                    * Remove the pv_entry.
+                    * Deal with the pv_rooted_entry.
                     */
+
                    if (pv_e == pv_h) {
                        /*
                         * Fix up head later.
@@ -1516,43 +2362,68 @@ pmap_page_protect(
                        /*
                         * Delete this entry.
                         */
-                       prev->next = pv_e->next;
-                       PV_FREE(pv_e);
+                     CHK_NPVHASH();
+                     pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
+                     LOCK_PV_HASH(pvhash_idx);
+                     remque(&pvh_e->qlink);
+                     pmap_pvh_unlink(pvh_e);
+                     UNLOCK_PV_HASH(pvhash_idx);
+
+                     pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                       pvh_eh = pvh_e;
+
+                       if (pvh_et == PV_HASHED_ENTRY_NULL)
+                           pvh_et = pvh_e;
+                       pvh_cnt++;
                    }
-               }
-               else {
+               } else {
                    /*
                     * Write-protect.
                     */
-
-                       *pte &= ~INTEL_PTE_WRITE;
-                       pte++;
-                       PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
-                   /*
-                    * Advance prev.
-                    */
-                   prev = pv_e;
+                   pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WRITE));
+                   PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
                }
 
-               simple_unlock(&pmap->lock);
-
-           } while ((pv_e = prev->next) != PV_ENTRY_NULL);
+               pvh_e = nexth;
+           } while ((pv_e = (pv_rooted_entry_t)nexth) != pv_h);
 
            /*
             * If pv_head mapping was removed, fix it up.
             */
+
            if (pv_h->pmap == PMAP_NULL) {
-               pv_e = pv_h->next;
-               if (pv_e != PV_ENTRY_NULL) {
-                   *pv_h = *pv_e;
-                   PV_FREE(pv_e);
+             pvh_e = (pv_hashed_entry_t)queue_next(&pv_h->qlink);
+
+             if (pvh_e != (pv_hashed_entry_t)pv_h) {
+               CHK_NPVHASH();
+               pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
+               LOCK_PV_HASH(pvhash_idx);
+               remque(&pvh_e->qlink);
+               pmap_pvh_unlink(pvh_e);
+               UNLOCK_PV_HASH(pvhash_idx);
+                 pv_h->pmap = pvh_e->pmap;
+                 pv_h->va = pvh_e->va;
+                 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                   pvh_eh = pvh_e;
+
+                   if (pvh_et == PV_HASHED_ENTRY_NULL)
+                       pvh_et = pvh_e;
+                   pvh_cnt++;
                }
            }
        }
+       if (pvh_eh != PV_HASHED_ENTRY_NULL) {
+           PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
+       }
+
+       UNLOCK_PVH(pai);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
 
-       PMAP_WRITE_UNLOCK(spl);
 }
 
+
 /*
  *     Routine:
  *             pmap_disconnect
@@ -1565,7 +2436,7 @@ pmap_page_protect(
 unsigned int pmap_disconnect(
        ppnum_t pa)
 {
-       pmap_page_protect(pa, 0);                               /* disconnect the page */
+       pmap_page_protect(pa, 0);                       /* disconnect the page */
        return (pmap_get_refmod(pa));                   /* return ref/chg status */
 }
 
@@ -1577,64 +2448,100 @@ unsigned int pmap_disconnect(
 void
 pmap_protect(
        pmap_t          map,
-       vm_offset_t     s,
-       vm_offset_t     e,
+       vm_map_offset_t sva,
+       vm_map_offset_t eva,
        vm_prot_t       prot)
 {
        register pt_entry_t     *pde;
        register pt_entry_t     *spte, *epte;
-       vm_offset_t             l;
-       spl_t           spl;
-       vm_offset_t    orig_s = s;
+       vm_map_offset_t         lva;
+       vm_map_offset_t         orig_sva;
+       boolean_t       set_NX;
+       int             num_found = 0;
 
+       pmap_intr_assert();
 
        if (map == PMAP_NULL)
                return;
 
-       /*
-        * Determine the new protection.
-        */
-       switch (prot) {
-           case VM_PROT_READ:
-           case VM_PROT_READ|VM_PROT_EXECUTE:
-               break;
-           case VM_PROT_READ|VM_PROT_WRITE:
-           case VM_PROT_ALL:
-               return; /* nothing to do */
-           default:
-               pmap_remove(map, (addr64_t)s, (addr64_t)e);
+       if (prot == VM_PROT_NONE) {
+               pmap_remove(map, sva, eva);
                return;
        }
 
-       SPLVM(spl);
-       simple_lock(&map->lock);
+       PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
+                  (int) map,
+                  (int) (sva>>32), (int) sva,
+                  (int) (eva>>32), (int) eva);
+
+       if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled )
+               set_NX = FALSE;
+       else
+               set_NX = TRUE;
+
+       PMAP_LOCK(map);
 
-       pde = pmap_pde(map, s);
-       while (s < e) {
-           l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
-           if (l > e)
-               l = e;
-           if (*pde & INTEL_PTE_VALID) {
-             spte = (pt_entry_t *)pmap_pte(map, (s & ~(PDE_MAPPED_SIZE-1)));
-               spte = &spte[ptenum(s)];
-               epte = &spte[intel_btop(l-s)];
+       orig_sva = sva;
+       while (sva < eva) {
+           lva = (sva + pde_mapped_size) & ~(pde_mapped_size-1);
+           if (lva > eva)
+               lva = eva;
+           pde = pmap_pde(map, sva);
+           if (pde && (*pde & INTEL_PTE_VALID)) {
+               spte = (pt_entry_t *)pmap_pte(map, (sva & ~(pde_mapped_size-1)));
+               spte = &spte[ptenum(sva)];
+               epte = &spte[intel_btop(lva-sva)];
 
                while (spte < epte) {
-                   if (*spte & INTEL_PTE_VALID)
-                       *spte &= ~INTEL_PTE_WRITE;
+
+                   if (*spte & INTEL_PTE_VALID) {
+                     
+                       if (prot & VM_PROT_WRITE)
+                           pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_WRITE));
+                       else
+                           pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_WRITE));
+
+                       if (set_NX == TRUE)
+                           pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_NX));
+                       else
+                           pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_NX));
+
+                       num_found++;
+                   }
                    spte++;
                }
            }
-           s = l;
-           pde++;
+           sva = lva;
        }
+       if (num_found)
+           PMAP_UPDATE_TLBS(map, orig_sva, eva);
 
-       PMAP_UPDATE_TLBS(map, orig_s, e);
+       PMAP_UNLOCK(map);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
 
-       simple_unlock(&map->lock);
-       SPLX(spl);
 }
 
+/* Map a (possibly) autogenned block */
+void
+pmap_map_block(
+       pmap_t          pmap, 
+       addr64_t        va,
+       ppnum_t         pa,
+       uint32_t        size,
+       vm_prot_t       prot,
+       int             attr,
+       __unused unsigned int   flags)
+{
+    uint32_t page;
+
+    for (page = 0; page < size; page++) {
+       pmap_enter(pmap, va, pa, prot, attr, TRUE);
+       va += PAGE_SIZE;
+       pa++;
+    }
+}
 
 
 /*
@@ -1652,32 +2559,47 @@ pmap_protect(
 void
 pmap_enter(
        register pmap_t         pmap,
-       vm_offset_t             v,
+       vm_map_offset_t         vaddr,
        ppnum_t                 pn,
        vm_prot_t               prot,
        unsigned int            flags,
        boolean_t               wired)
 {
        register pt_entry_t     *pte;
-       register pv_entry_t     pv_h;
+       register pv_rooted_entry_t      pv_h;
        register int            pai;
-       pv_entry_t              pv_e;
+       pv_hashed_entry_t               pvh_e;
+       pv_hashed_entry_t               pvh_new;
+       pv_hashed_entry_t       *hashp;
        pt_entry_t              template;
-       spl_t                   spl;
        pmap_paddr_t            old_pa;
        pmap_paddr_t             pa = (pmap_paddr_t)i386_ptob(pn);
-
-       XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n",
-           current_thread(),
-           current_thread(), 
-           pmap, v, pn);
-
+       boolean_t               need_tlbflush = FALSE;
+       boolean_t               set_NX;
+       char                    oattr;
+       int                     pvhash_idx;
+       uint32_t                pv_cnt;
+       boolean_t               old_pa_locked;
+
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
        if (pmap_debug)
-               printf("pmap(%x, %x)\n", v, pn);
+               printf("pmap(%qx, %x)\n", vaddr, pn);
        if (pmap == PMAP_NULL)
                return;
+       if (pn == vm_page_guard_addr)
+               return;
 
+       PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START,
+                  (int) pmap,
+                  (int) (vaddr>>32), (int) vaddr,
+                  (int) pn, prot);
+
+       if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled )
+               set_NX = FALSE;
+       else
+               set_NX = TRUE;
+       
        /*
         *      Must allocate a new pvlist entry while we're unlocked;
         *      zalloc may cause pageout (which will lock the pmap system).
@@ -1685,9 +2607,12 @@ pmap_enter(
         *      and allocate one.  Then we will retry, throughing away
         *      the allocated entry later (if we no longer need it).
         */
-       pv_e = PV_ENTRY_NULL;
 
-       PMAP_READ_LOCK(pmap, spl);
+       pvh_new = PV_HASHED_ENTRY_NULL;
+Retry:
+       pvh_e = PV_HASHED_ENTRY_NULL;
+
+       PMAP_LOCK(pmap);
 
        /*
         *      Expand pmap to include this pte.  Assume that
@@ -1695,29 +2620,49 @@ pmap_enter(
         *      pages to map one VM page.
         */
 
-       while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
+       while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
                /*
                 *      Must unlock to expand the pmap.
                 */
-               PMAP_READ_UNLOCK(pmap, spl);
+               PMAP_UNLOCK(pmap);
+               pmap_expand(pmap, vaddr); /* going to grow pde level page(s) */
+               PMAP_LOCK(pmap);
+       }
+
+       old_pa = pte_to_pa(*pte);
+       pai = pa_index(old_pa);
+       old_pa_locked = FALSE;
 
-               pmap_expand(pmap, v);
+       /*
+        * if we have a previous managed page, lock the pv entry now. after
+        * we lock it, check to see if someone beat us to the lock and if so
+        * drop the lock
+        */
 
-               PMAP_READ_LOCK(pmap, spl);
+       if ((0 != old_pa) && managed_page(pai)) {
+         LOCK_PVH(pai);
+         old_pa_locked = TRUE;
+         old_pa = pte_to_pa(*pte);
+         if (0 == old_pa) {
+           UNLOCK_PVH(pai);  /* some other path beat us to it */
+           old_pa_locked = FALSE;
+         }
        }
+
+
        /*
-        *      Special case if the physical page is already mapped
+        *      Special case if the incoming physical page is already mapped
         *      at this address.
         */
-       old_pa = pte_to_pa(*pte);
        if (old_pa == pa) {
+
            /*
             *  May be changing its wired attribute or protection
             */
-       
+
            template = pa_to_pte(pa) | INTEL_PTE_VALID;
 
-           if(flags & VM_MEM_NOT_CACHEABLE) {
+           if(VM_MEM_NOT_CACHEABLE == (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
                if(!(flags & VM_MEM_GUARDED))
                        template |= INTEL_PTE_PTA;
                template |= INTEL_PTE_NCACHE;
@@ -1727,23 +2672,29 @@ pmap_enter(
                template |= INTEL_PTE_USER;
            if (prot & VM_PROT_WRITE)
                template |= INTEL_PTE_WRITE;
+
+           if (set_NX == TRUE)
+               template |= INTEL_PTE_NX;
+
            if (wired) {
                template |= INTEL_PTE_WIRED;
                if (!iswired(*pte))
-                   pmap->stats.wired_count++;
+                   OSAddAtomic(+1, (SInt32 *) &pmap->stats.wired_count);
            }
            else {
                if (iswired(*pte)) {
                    assert(pmap->stats.wired_count >= 1);
-                   pmap->stats.wired_count--;
+                   OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count);
                }
            }
 
-               if (*pte & INTEL_PTE_MOD)
-                   template |= INTEL_PTE_MOD;
-               WRITE_PTE(pte, template)
-                 pte++;
-
+           /* store modified PTE and preserve RC bits */ 
+           pmap_update_pte(pte, *pte, template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
+           if (old_pa_locked) {
+             UNLOCK_PVH(pai);
+             old_pa_locked = FALSE;
+           }
+           need_tlbflush = TRUE;
            goto Done;
        }
 
@@ -1754,11 +2705,6 @@ pmap_enter(
         *         2) Add pvlist entry for new mapping
         *         3) Enter new mapping.
         *
-        *      SHARING_FAULTS complicates this slightly in that it cannot
-        *      replace the mapping, but must remove it (because adding the
-        *      pvlist entry for the new mapping may remove others), and
-        *      hence always enters the new mapping at step 3)
-        *
         *      If the old physical page is not managed step 1) is skipped
         *      (except for updating the TLBs), and the mapping is
         *      overwritten at step 3).  If the new physical page is not
@@ -1767,260 +2713,217 @@ pmap_enter(
 
        if (old_pa != (pmap_paddr_t) 0) {
 
-
-#if    DEBUG_PTE_PAGE
-           if (pmap != kernel_pmap)
-               ptep_check(get_pte_page(pte));
-#endif /* DEBUG_PTE_PAGE */
-
            /*
             *  Don't do anything to pages outside valid memory here.
             *  Instead convince the code that enters a new mapping
             *  to overwrite the old one.
             */
 
-           if (valid_page(i386_btop(old_pa))) {
+           /* invalidate the PTE */ 
+           pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
+           /* propagate invalidate everywhere */
+           PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+           /* remember reference and change */
+           oattr = (char)(*pte & (PHYS_MODIFIED | PHYS_REFERENCED));
+           /* completely invalidate the PTE */
+           pmap_store_pte(pte, 0);
 
-               pai = pa_index(old_pa);
-               LOCK_PVH(pai);
+           if (managed_page(pai)) {
 
+#if TESTING
+               if (pmap->stats.resident_count < 1)
+                   panic("pmap_enter: resident_count");
+#endif
                assert(pmap->stats.resident_count >= 1);
-               pmap->stats.resident_count--;
+               OSAddAtomic(-1, (SInt32 *) &pmap->stats.resident_count);
+
                if (iswired(*pte)) {
+
+#if TESTING
+                   if (pmap->stats.wired_count < 1)
+                       panic("pmap_enter: wired_count");
+#endif
                    assert(pmap->stats.wired_count >= 1);
-                   pmap->stats.wired_count--;
+                   OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count);
                }
 
-                   pmap_phys_attributes[pai] |=
-                       *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-                   WRITE_PTE(pte, 0)
-
+               pmap_phys_attributes[pai] |= oattr;
                /*
                 *      Remove the mapping from the pvlist for
                 *      this physical page.
+                *      We'll end up with either a rooted pv or a
+                *      hashed pv
                 */
                {
-                   register pv_entry_t prev, cur;
 
                    pv_h = pai_to_pvh(pai);
+
                    if (pv_h->pmap == PMAP_NULL) {
                        panic("pmap_enter: null pv_list!");
                    }
-                   if (pv_h->va == v && pv_h->pmap == pmap) {
+
+                   if (pv_h->va == vaddr && pv_h->pmap == pmap) {
                        /*
-                        * Header is the pv_entry.  Copy the next one
-                        * to header and free the next one (we cannot
+                        * Header is the pv_rooted_entry.  
+                        * If there is a next one, copy it to the
+                        * header and free the next one (we cannot
                         * free the header)
                         */
-                       cur = pv_h->next;
-                       if (cur != PV_ENTRY_NULL) {
-                           *pv_h = *cur;
-                           pv_e = cur;
-                       }
-                       else {
-                           pv_h->pmap = PMAP_NULL;
+                     pvh_e = (pv_hashed_entry_t)queue_next(&pv_h->qlink);
+                     if (pvh_e != (pv_hashed_entry_t)pv_h) {
+                       pvhash_idx = pvhashidx(pvh_e->pmap, pvh_e->va);
+                       LOCK_PV_HASH(pvhash_idx);
+                         remque(&pvh_e->qlink);
+                         pmap_pvh_unlink(pvh_e);
+                         UNLOCK_PV_HASH(pvhash_idx);
+                         pv_h->pmap = pvh_e->pmap;
+                         pv_h->va = pvh_e->va;
                        }
+                     else {
+                       pv_h->pmap = PMAP_NULL;
+                       pvh_e = PV_HASHED_ENTRY_NULL;
+                     }
                    }
                    else {
-                       cur = pv_h;
-                       do {
-                           prev = cur;
-                           if ((cur = prev->next) == PV_ENTRY_NULL) {
-                               panic("pmap_enter: mapping not in pv_list!");
-                           }
-                       } while (cur->va != v || cur->pmap != pmap);
-                       prev->next = cur->next;
-                       pv_e = cur;
+                     pv_hashed_entry_t *pprevh;
+                     ppnum_t old_ppn;
+                     /* wasn't the rooted pv - hash, find it, and unlink it */
+                     old_ppn = (ppnum_t)pa_index(old_pa);
+                     CHK_NPVHASH();
+                     pvhash_idx = pvhashidx(pmap,vaddr);
+                     LOCK_PV_HASH(pvhash_idx);
+                     pprevh = pvhash(pvhash_idx);
+#if PV_DEBUG
+                     if (NULL==pprevh)panic("pmap enter 1");
+#endif
+                     pvh_e = *pprevh;
+                     pmap_pv_hashlist_walks++;
+                     pv_cnt = 0;
+                     while (PV_HASHED_ENTRY_NULL != pvh_e) {
+                       pv_cnt++;
+                       if (pvh_e->pmap == pmap && pvh_e->va == vaddr && pvh_e->ppn == old_ppn) break;
+                       pprevh = &pvh_e->nexth;
+                       pvh_e = pvh_e->nexth;
+                     }
+                     pmap_pv_hashlist_cnts += pv_cnt;
+                     if (pmap_pv_hashlist_max < pv_cnt) pmap_pv_hashlist_max = pv_cnt;
+                     if (PV_HASHED_ENTRY_NULL == pvh_e) panic("pmap_enter: pv not in hash list");
+                     if(NULL==pprevh)panic("pmap enter 2");
+                     *pprevh = pvh_e->nexth;
+                     remque(&pvh_e->qlink);
+                     UNLOCK_PV_HASH(pvhash_idx);
                    }
                }
-               UNLOCK_PVH(pai);
            }
            else {
 
                /*
-                *      old_pa is not managed.  Pretend it's zero so code
-                *      at Step 3) will enter new mapping (overwriting old
-                *      one).  Do removal part of accounting.
+                *      old_pa is not managed.
+                *      Do removal part of accounting.
                 */
-               old_pa = (pmap_paddr_t) 0;
-               assert(pmap->stats.resident_count >= 1);
-               pmap->stats.resident_count--;
+
                if (iswired(*pte)) {
                    assert(pmap->stats.wired_count >= 1);
-                   pmap->stats.wired_count--;
+                   OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count);
                }
            }
-        
        }
 
-       if (valid_page(i386_btop(pa))) {
+       /*
+        * if we had a previously managed paged locked, unlock it now
+        */
+
+       if (old_pa_locked) {
+         UNLOCK_PVH(pai);
+         old_pa_locked = FALSE;
+       }
+
+       pai = pa_index(pa);     /* now working with new incoming phys page */
+       if (managed_page(pai)) {
 
            /*
             *  Step 2) Enter the mapping in the PV list for this
             *  physical page.
             */
+           pv_h = pai_to_pvh(pai);
 
-           pai = pa_index(pa);
-
-
-#if SHARING_FAULTS
-RetryPvList:
-           /*
-            * We can return here from the sharing fault code below
-            * in case we removed the only entry on the pv list and thus
-            * must enter the new one in the list header.
-            */
-#endif /* SHARING_FAULTS */
            LOCK_PVH(pai);
-           pv_h = pai_to_pvh(pai);
 
            if (pv_h->pmap == PMAP_NULL) {
                /*
-                *      No mappings yet
+                *      No mappings yet, use  rooted pv
                 */
-               pv_h->va = v;
+               pv_h->va = vaddr;
                pv_h->pmap = pmap;
-               pv_h->next = PV_ENTRY_NULL;
+               queue_init(&pv_h->qlink);
            }
            else {
-#if    DEBUG
-               {
-                   /*
-                    * check that this mapping is not already there
-                    * or there is no alias for this mapping in the same map
-                    */
-                   pv_entry_t  e = pv_h;
-                   while (e != PV_ENTRY_NULL) {
-                       if (e->pmap == pmap && e->va == v)
-                            panic("pmap_enter: already in pv_list");
-                       e = e->next;
-                   }
-               }
-#endif /* DEBUG */
-#if SHARING_FAULTS
-                {
-                    /*
-                     * do sharing faults.
-                     * if we find an entry on this pv list in the same address
-                    * space, remove it.  we know there will not be more
-                    * than one. 
-                    */
-                   pv_entry_t  e = pv_h;
-                    pt_entry_t      *opte;
-
-                   while (e != PV_ENTRY_NULL) {
-                       if (e->pmap == pmap) {
-                            /*
-                            *  Remove it, drop pv list lock first.
-                            */
-                            UNLOCK_PVH(pai);
-
-                            opte = pmap_pte(pmap, e->va);
-                            assert(opte != PT_ENTRY_NULL);
-                            /*
-                            *  Invalidate the translation buffer,
-                            *  then remove the mapping.
-                            */
-                             pmap_remove_range(pmap, e->va, opte,
-                                                      opte + 1);
-                            PMAP_UPDATE_TLBS(pmap, e->va, e->va + PAGE_SIZE);
-
-                            /*
-                             * We could have remove the head entry,
-                             * so there could be no more entries
-                             * and so we have to use the pv head entry.
-                             * so, go back to the top and try the entry
-                             * again.
-                             */
-                            goto RetryPvList;
-                       }
-                        e = e->next;
-                   }
-
-                   /*
-                     * check that this mapping is not already there
-                     */
-                   e = pv_h;
-                   while (e != PV_ENTRY_NULL) {
-                       if (e->pmap == pmap)
-                            panic("pmap_enter: alias in pv_list");
-                       e = e->next;
-                   }
-               }
-#endif /* SHARING_FAULTS */
-#if DEBUG_ALIAS
-                {
-                    /*
-                     * check for aliases within the same address space.
-                     */
-                   pv_entry_t  e = pv_h;
-                    vm_offset_t     rpc = get_rpc();
-
-                   while (e != PV_ENTRY_NULL) {
-                       if (e->pmap == pmap) {
-                            /*
-                             * log this entry in the alias ring buffer
-                            * if it's not there already.
-                             */
-                            struct pmap_alias *pma;
-                            int ii, logit;
-
-                            logit = TRUE;
-                            for (ii = 0; ii < pmap_alias_index; ii++) {
-                                if (pmap_aliasbuf[ii].rpc == rpc) {
-                                    /* found it in the log already */
-                                    logit = FALSE;
-                                    break;
-                               }
-                           }
-                            if (logit) {
-                                pma = &pmap_aliasbuf[pmap_alias_index];
-                                pma->pmap = pmap;
-                                pma->va = v;
-                                pma->rpc = rpc;
-                                pma->cookie = PMAP_ALIAS_COOKIE;
-                                if (++pmap_alias_index >= PMAP_ALIAS_MAX)
-                                    panic("pmap_enter: exhausted alias log");
-                           }
-                       }
-                        e = e->next;
-                   }
-               }
-#endif /* DEBUG_ALIAS */
                /*
-                *      Add new pv_entry after header.
+                *      Add new pv_hashed_entry after header.
                 */
-               if (pv_e == PV_ENTRY_NULL) {
-                   PV_ALLOC(pv_e);
-                   if (pv_e == PV_ENTRY_NULL) {
-                     panic("pmap no pv_e's");
+               if ((PV_HASHED_ENTRY_NULL == pvh_e) && pvh_new) {
+                 pvh_e = pvh_new;
+                 pvh_new = PV_HASHED_ENTRY_NULL;  /* show we used it */
+               } else if (PV_HASHED_ENTRY_NULL == pvh_e) {
+                 PV_HASHED_ALLOC(pvh_e);
+                 if (PV_HASHED_ENTRY_NULL == pvh_e) {
+                   /* the pv list is empty.
+                    * if we are on the kernel pmap we'll use one of the special private
+                    * kernel pv_e's, else, we need to unlock everything, zalloc a pv_e,
+                    * and restart bringing in the pv_e with us.
+                    */
+                   if (kernel_pmap == pmap) {
+                     PV_HASHED_KERN_ALLOC(pvh_e);
+                   } else {
+                     UNLOCK_PVH(pai);
+                     PMAP_UNLOCK(pmap);
+                     pvh_new = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+                     goto Retry;
                    }
+                 }
                }
-               pv_e->va = v;
-               pv_e->pmap = pmap;
-               pv_e->next = pv_h->next;
-               pv_h->next = pv_e;
+
+               if (PV_HASHED_ENTRY_NULL == pvh_e) panic("pvh_e exhaustion");
+               pvh_e->va = vaddr;
+               pvh_e->pmap = pmap;
+               pvh_e->ppn = pn;
+               CHK_NPVHASH();
+               pvhash_idx = pvhashidx(pmap,vaddr);
+               LOCK_PV_HASH(pvhash_idx);
+               insque(&pvh_e->qlink, &pv_h->qlink);
+               hashp = pvhash(pvhash_idx);
+#if PV_DEBUG
+               if(NULL==hashp)panic("pmap_enter 4");
+#endif
+               pvh_e->nexth = *hashp;
+               *hashp = pvh_e;
+               UNLOCK_PV_HASH(pvhash_idx);
+
                /*
                 *      Remember that we used the pvlist entry.
                 */
-               pv_e = PV_ENTRY_NULL;
+               pvh_e = PV_HASHED_ENTRY_NULL;
            }
-           UNLOCK_PVH(pai);
-       }
-
-       /*
-        * Step 3) Enter and count the mapping.
-        */
 
-       pmap->stats.resident_count++;
+           /*
+            * only count the mapping
+            * for 'managed memory'
+            */
+           OSAddAtomic(+1, (SInt32 *) &pmap->stats.resident_count);
+           if (pmap->stats.resident_count > pmap->stats.resident_max) {
+                   pmap->stats.resident_max = pmap->stats.resident_count;
+           }
+       }
 
        /*
+        * Step 3) Enter the mapping.
+        *
         *      Build a template to speed up entering -
         *      only the pfn changes.
         */
        template = pa_to_pte(pa) | INTEL_PTE_VALID;
 
-       if(flags & VM_MEM_NOT_CACHEABLE) {
+       if (flags & VM_MEM_NOT_CACHEABLE) {
                if(!(flags & VM_MEM_GUARDED))
                        template |= INTEL_PTE_PTA;
                template |= INTEL_PTE_NCACHE;
@@ -2030,21 +2933,38 @@ RetryPvList:
                template |= INTEL_PTE_USER;
        if (prot & VM_PROT_WRITE)
                template |= INTEL_PTE_WRITE;
+
+       if (set_NX == TRUE)
+               template |= INTEL_PTE_NX;
+
        if (wired) {
                template |= INTEL_PTE_WIRED;
-               pmap->stats.wired_count++;
+               OSAddAtomic(+1, (SInt32 *) &pmap->stats.wired_count);
        }
+       pmap_store_pte(pte, template);
+
+       /* if this was a managed page we delayed unlocking the pv until here
+        * to prevent pmap_page_protect et al from finding it until the pte
+        * has been stored */
 
-        WRITE_PTE(pte, template)
+       if (managed_page(pai)) {
+         UNLOCK_PVH(pai);
+       }
 
 Done:
-        PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+       if (need_tlbflush == TRUE)
+               PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+
+       if (pvh_e != PV_HASHED_ENTRY_NULL) {
+               PV_HASHED_FREE_LIST(pvh_e, pvh_e, 1);
+       }
 
-       if (pv_e != PV_ENTRY_NULL) {
-           PV_FREE(pv_e);
+       if (pvh_new != PV_HASHED_ENTRY_NULL) {
+         PV_HASHED_KERN_FREE_LIST(pvh_new, pvh_new, 1);
        }
 
-       PMAP_READ_UNLOCK(pmap, spl);
+       PMAP_UNLOCK(pmap);
+       PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
 }
 
 /*
@@ -2057,62 +2977,55 @@ Done:
 void
 pmap_change_wiring(
        register pmap_t map,
-       vm_offset_t     v,
+       vm_map_offset_t vaddr,
        boolean_t       wired)
 {
        register pt_entry_t     *pte;
-       spl_t                   spl;
 
-#if 1
        /*
         *      We must grab the pmap system lock because we may
         *      change a pte_page queue.
         */
-       PMAP_READ_LOCK(map, spl);
+       PMAP_LOCK(map);
 
-       if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
+       if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL)
                panic("pmap_change_wiring: pte missing");
 
        if (wired && !iswired(*pte)) {
            /*
             *  wiring down mapping
             */
-           map->stats.wired_count++;
-           *pte++ |= INTEL_PTE_WIRED;
+           OSAddAtomic(+1, (SInt32 *) &map->stats.wired_count);
+           pmap_update_pte(pte, *pte, (*pte | INTEL_PTE_WIRED));
        }
        else if (!wired && iswired(*pte)) {
            /*
             *  unwiring mapping
             */
            assert(map->stats.wired_count >= 1);
-           map->stats.wired_count--;
-           *pte++ &= ~INTEL_PTE_WIRED;
+           OSAddAtomic(-1, (SInt32 *) &map->stats.wired_count);
+           pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WIRED));
        }
 
-       PMAP_READ_UNLOCK(map, spl);
-
-#else
-       return;
-#endif
-
+       PMAP_UNLOCK(map);
 }
 
 ppnum_t
 pmap_find_phys(pmap_t pmap, addr64_t va)
 {
        pt_entry_t     *ptp;
-       vm_offset_t     a32;
        ppnum_t         ppn;
 
-       if (value_64bit(va))
-               panic("pmap_find_phys 64 bit value");
-       a32 = (vm_offset_t) low32(va);
-       ptp = pmap_pte(pmap, a32);
+       mp_disable_preemption();
+
+       ptp = pmap_pte(pmap, va);
        if (PT_ENTRY_NULL == ptp) {
                ppn = 0;
        } else {
                ppn = (ppnum_t) i386_btop(pte_to_pa(*ptp));
        }
+       mp_enable_preemption();
+
        return ppn;
 }
 
@@ -2129,54 +3042,41 @@ pmap_find_phys(pmap_t pmap, addr64_t va)
 vm_offset_t
 pmap_extract(
        register pmap_t pmap,
-       vm_offset_t     va)
+       vm_map_offset_t vaddr)
 {
-  ppnum_t ppn;
-  vm_offset_t vaddr;
+        ppnum_t ppn;
+       vm_offset_t paddr;
 
-  vaddr = (vm_offset_t)0;
-  ppn = pmap_find_phys(pmap, (addr64_t)va);
-  if (ppn) {
-    vaddr = ((vm_offset_t)i386_ptob(ppn)) | (va & INTEL_OFFMASK);
-  }
-  return (vaddr);
-}
+       paddr = (vm_offset_t)0;
+       ppn = pmap_find_phys(pmap, vaddr);
 
+       if (ppn) {
+               paddr = ((vm_offset_t)i386_ptob(ppn)) | (vaddr & INTEL_OFFMASK);
+       }
+       return (paddr);
+}
 
-/*
- *     Routine:        pmap_expand
- *
- *     Expands a pmap to be able to map the specified virtual address.
- *
- *     Allocates new virtual memory for the P0 or P1 portion of the
- *     pmap, then re-maps the physical pages that were in the old
- *     pmap to be in the new pmap.
- *
- *     Must be called with the pmap system and the pmap unlocked,
- *     since these must be unlocked to use vm_allocate or vm_deallocate.
- *     Thus it must be called in a loop that checks whether the map
- *     has been expanded enough.
- *     (We won't loop forever, since page tables aren't shrunk.)
- */
 void
-pmap_expand(
-       register pmap_t         map,
-       register vm_offset_t    v)
+pmap_expand_pml4(
+                pmap_t map,
+                vm_map_offset_t vaddr)
 {
-       pt_entry_t              *pdp;
        register vm_page_t      m;
        register pmap_paddr_t   pa;
-       register int            i;
+       uint64_t                i;
        spl_t                   spl;
        ppnum_t                 pn;
+       pml4_entry_t            *pml4p;
 
-       if (map == kernel_pmap) {
-         pmap_growkernel(v);
-         return;
-       }
+       if (kernel_pmap == map) panic("expand kernel pml4");
+
+       spl = splhigh();
+       pml4p = pmap64_pml4(map, vaddr);
+       splx(spl);
+       if (PML4_ENTRY_NULL == pml4p) panic("pmap_expand_pml4 no pml4p");
 
        /*
-        *      Allocate a VM page for the level 2 page table entries.
+        *      Allocate a VM page for the pml4 page
         */
        while ((m = vm_page_grab()) == VM_PAGE_NULL)
                VM_PAGE_WAIT();
@@ -2187,138 +3087,337 @@ pmap_expand(
         */
        pn = m->phys_page;
        pa = i386_ptob(pn);
-       i = pdenum(map, v);
-       vm_object_lock(map->pm_obj);
-       vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i);
-       vm_page_lock_queues();
-       vm_page_wire(m);
-       inuse_ptepages_count++;
-       vm_object_unlock(map->pm_obj);
-       vm_page_unlock_queues();
+       i = pml4idx(map, vaddr);
 
        /*
         *      Zero the page.
         */
        pmap_zero_page(pn);
 
-       PMAP_READ_LOCK(map, spl);
+       vm_page_lock_queues();
+       vm_page_wire(m);
+       inuse_ptepages_count++;
+       vm_page_unlock_queues();
+
+       /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
+       vm_object_lock(map->pm_obj_pml4);
+
+       PMAP_LOCK(map);
        /*
         *      See if someone else expanded us first
         */
-       if (pmap_pte(map, v) != PT_ENTRY_NULL) {
-               PMAP_READ_UNLOCK(map, spl);
-               vm_object_lock(map->pm_obj);
+       if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) {
+               PMAP_UNLOCK(map);
+               vm_object_unlock(map->pm_obj_pml4);
+
                vm_page_lock_queues();
                vm_page_free(m);
                inuse_ptepages_count--;
                vm_page_unlock_queues();
-               vm_object_unlock(map->pm_obj);
+
                return;
        }
 
+#if 0 /* DEBUG */
+       if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i)) {
+              panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
+                    map, map->pm_obj_pml4, vaddr, i);
+       }
+#endif
+       vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i);
+       vm_object_unlock(map->pm_obj_pml4);
+
        /*
         *      Set the page directory entry for this page table.
-        *      If we have allocated more than one hardware page,
-        *      set several page directory entries.
         */
+       pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
+
+       pmap_store_pte(pml4p, pa_to_pte(pa)
+                               | INTEL_PTE_VALID
+                               | INTEL_PTE_USER
+                               | INTEL_PTE_WRITE);
 
-       pdp = &map->dirbase[pdenum(map, v)];
-           *pdp = pa_to_pte(pa)
-               | INTEL_PTE_VALID
-               | INTEL_PTE_USER
-               | INTEL_PTE_WRITE;
+       PMAP_UNLOCK(map);
 
-       PMAP_READ_UNLOCK(map, spl);
        return;
-}
 
-/*
- *     Copy the range specified by src_addr/len
- *     from the source map to the range dst_addr/len
- *     in the destination map.
- *
- *     This routine is only advisory and need not do anything.
- */
-#if    0
-void
-pmap_copy(
-       pmap_t          dst_pmap,
-       pmap_t          src_pmap,
-       vm_offset_t     dst_addr,
-       vm_size_t       len,
-       vm_offset_t     src_addr)
-{
-#ifdef lint
-       dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
-#endif /* lint */
 }
-#endif/*       0 */
 
-/*
- * pmap_sync_page_data_phys(ppnum_t pa)
- * 
- * Invalidates all of the instruction cache on a physical page and
- * pushes any dirty data from the data cache for the same physical page
- * Not required in i386.
- */
 void
-pmap_sync_page_data_phys(__unused ppnum_t pa)
+pmap_expand_pdpt(
+                pmap_t map,
+                vm_map_offset_t vaddr)
 {
-       return;
-}
+       register vm_page_t      m;
+       register pmap_paddr_t   pa;
+       uint64_t                i;
+       spl_t                   spl;
+       ppnum_t                 pn;
+       pdpt_entry_t            *pdptp;
 
-/*
- * pmap_sync_page_attributes_phys(ppnum_t pa)
- * 
- * Write back and invalidate all cachelines on a physical page.
- */
-void
-pmap_sync_page_attributes_phys(ppnum_t pa)
-{
-       cache_flush_page_phys(pa);
-}
+       if (kernel_pmap == map) panic("expand kernel pdpt");
 
-int    collect_ref;
-int    collect_unref;
+       spl = splhigh();
+       while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
+               splx(spl);
+               pmap_expand_pml4(map, vaddr); /* need room for another pdpt entry */
+               spl = splhigh();
+       }
+       splx(spl);
 
-/*
- *     Routine:        pmap_collect
- *     Function:
- *             Garbage collects the physical map system for
- *             pages which are no longer used.
- *             Success need not be guaranteed -- that is, there
- *             may well be pages which are not referenced, but
- *             others may be collected.
- *     Usage:
- *             Called by the pageout daemon when pages are scarce.
- */
-void
-pmap_collect(
-       pmap_t          p)
-{
-       register pt_entry_t     *pdp, *ptp;
-       pt_entry_t              *eptp;
-       int                     wired;
-       spl_t                   spl;
+       /*
+        *      Allocate a VM page for the pdpt page
+        */
+       while ((m = vm_page_grab()) == VM_PAGE_NULL)
+               VM_PAGE_WAIT();
 
-       if (p == PMAP_NULL)
-               return;
+       /*
+        *      put the page into the pmap's obj list so it
+        *      can be found later.
+        */
+       pn = m->phys_page;
+       pa = i386_ptob(pn);
+       i = pdptidx(map, vaddr);
+
+       /*
+        *      Zero the page.
+        */
+       pmap_zero_page(pn);
+
+       vm_page_lock_queues();
+       vm_page_wire(m);
+       inuse_ptepages_count++;
+       vm_page_unlock_queues();
+
+       /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
+       vm_object_lock(map->pm_obj_pdpt);
+
+       PMAP_LOCK(map);
+       /*
+        *      See if someone else expanded us first
+        */
+       if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) {
+               PMAP_UNLOCK(map);
+               vm_object_unlock(map->pm_obj_pdpt);
+
+               vm_page_lock_queues();
+               vm_page_free(m);
+               inuse_ptepages_count--;
+               vm_page_unlock_queues();
 
-       if (p == kernel_pmap)
                return;
+       }
+
+#if 0 /* DEBUG */
+       if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i)) {
+              panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
+                    map, map->pm_obj_pdpt, vaddr, i);
+       }
+#endif
+       vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i);
+       vm_object_unlock(map->pm_obj_pdpt);
 
        /*
-        *      Garbage collect map.
+        *      Set the page directory entry for this page table.
         */
-       PMAP_READ_LOCK(p, spl);
+       pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
 
-       for (pdp = (pt_entry_t *)p->dirbase;
+       pmap_store_pte(pdptp, pa_to_pte(pa)
+                               | INTEL_PTE_VALID
+                               | INTEL_PTE_USER
+                               | INTEL_PTE_WRITE);
+
+       PMAP_UNLOCK(map);
+
+       return;
+
+}
+
+
+
+/*
+ *     Routine:        pmap_expand
+ *
+ *     Expands a pmap to be able to map the specified virtual address.
+ *
+ *     Allocates new virtual memory for the P0 or P1 portion of the
+ *     pmap, then re-maps the physical pages that were in the old
+ *     pmap to be in the new pmap.
+ *
+ *     Must be called with the pmap system and the pmap unlocked,
+ *     since these must be unlocked to use vm_allocate or vm_deallocate.
+ *     Thus it must be called in a loop that checks whether the map
+ *     has been expanded enough.
+ *     (We won't loop forever, since page tables aren't shrunk.)
+ */
+void
+pmap_expand(
+       pmap_t          map,
+       vm_map_offset_t vaddr)
+{
+       pt_entry_t              *pdp;
+       register vm_page_t      m;
+       register pmap_paddr_t   pa;
+       uint64_t                 i;
+       spl_t                   spl;
+       ppnum_t                 pn;
+
+       /*
+        * if not the kernel map (while we are still compat kernel mode)
+        * and we are 64 bit, propagate expand upwards
+        */
+
+       if (cpu_64bit && (map != kernel_pmap)) {
+               spl = splhigh();
+               while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
+                       splx(spl);
+                       pmap_expand_pdpt(map, vaddr); /* need room for another pde entry */
+                       spl = splhigh();
+               }
+               splx(spl);
+       }
+
+       /*
+        *      Allocate a VM page for the pde entries.
+        */
+       while ((m = vm_page_grab()) == VM_PAGE_NULL)
+               VM_PAGE_WAIT();
+
+       /*
+        *      put the page into the pmap's obj list so it
+        *      can be found later.
+        */
+       pn = m->phys_page;
+       pa = i386_ptob(pn);
+       i = pdeidx(map, vaddr);
+
+       /*
+        *      Zero the page.
+        */
+       pmap_zero_page(pn);
+
+       vm_page_lock_queues();
+       vm_page_wire(m);
+       inuse_ptepages_count++;
+       vm_page_unlock_queues();
+
+       /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
+       vm_object_lock(map->pm_obj);
+
+       PMAP_LOCK(map);
+       /*
+        *      See if someone else expanded us first
+        */
+
+       if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) {
+               PMAP_UNLOCK(map);
+               vm_object_unlock(map->pm_obj);
+
+               vm_page_lock_queues();
+               vm_page_free(m);
+               inuse_ptepages_count--;
+               vm_page_unlock_queues();
+
+               return;
+       }
+
+#if 0 /* DEBUG */
+       if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i)) {
+              panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
+                    map, map->pm_obj, vaddr, i);
+       }
+#endif
+       vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i);
+       vm_object_unlock(map->pm_obj);
+
+       /*
+        * refetch while locked 
+        */
+
+       pdp = pmap_pde(map, vaddr);
+
+       /*
+        *      Set the page directory entry for this page table.
+        */
+       pmap_store_pte(pdp, pa_to_pte(pa)
+                               | INTEL_PTE_VALID
+                               | INTEL_PTE_USER
+                               | INTEL_PTE_WRITE);
+
+       PMAP_UNLOCK(map);
+
+       return;
+}
+
+
+/*
+ * pmap_sync_page_data_phys(ppnum_t pa)
+ * 
+ * Invalidates all of the instruction cache on a physical page and
+ * pushes any dirty data from the data cache for the same physical page
+ * Not required in i386.
+ */
+void
+pmap_sync_page_data_phys(__unused ppnum_t pa)
+{
+       return;
+}
+
+/*
+ * pmap_sync_page_attributes_phys(ppnum_t pa)
+ * 
+ * Write back and invalidate all cachelines on a physical page.
+ */
+void
+pmap_sync_page_attributes_phys(ppnum_t pa)
+{
+       cache_flush_page_phys(pa);
+}
+
+
+
+#ifdef CURRENTLY_UNUSED_AND_UNTESTED
+
+int    collect_ref;
+int    collect_unref;
+
+/*
+ *     Routine:        pmap_collect
+ *     Function:
+ *             Garbage collects the physical map system for
+ *             pages which are no longer used.
+ *             Success need not be guaranteed -- that is, there
+ *             may well be pages which are not referenced, but
+ *             others may be collected.
+ *     Usage:
+ *             Called by the pageout daemon when pages are scarce.
+ */
+void
+pmap_collect(
+       pmap_t          p)
+{
+       register pt_entry_t     *pdp, *ptp;
+       pt_entry_t              *eptp;
+       int                     wired;
+
+       if (p == PMAP_NULL)
+               return;
+
+       if (p == kernel_pmap)
+               return;
+
+       /*
+        *      Garbage collect map.
+        */
+       PMAP_LOCK(p);
+
+       for (pdp = (pt_entry_t *)p->dirbase;
             pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
             pdp++)
        {
           if (*pdp & INTEL_PTE_VALID) {
              if(*pdp & INTEL_PTE_REF) {
-               *pdp &= ~INTEL_PTE_REF;
+               pmap_store_pte(pdp, *pdp & ~INTEL_PTE_REF);
                collect_ref++;
              } else {
                collect_unref++;
@@ -2351,9 +3450,9 @@ pmap_collect(
                    /*
                     * Invalidate the page directory pointer.
                     */
-                   *pdp = 0x0;
+                   pmap_store_pte(pdp, 0x0);
                 
-                   PMAP_READ_UNLOCK(p, spl);
+                   PMAP_UNLOCK(p);
 
                    /*
                     * And free the pte page itself.
@@ -2362,44 +3461,35 @@ pmap_collect(
                        register vm_page_t m;
 
                        vm_object_lock(p->pm_obj);
+
                        m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]));
                        if (m == VM_PAGE_NULL)
                            panic("pmap_collect: pte page not in object");
+
                        vm_page_lock_queues();
                        vm_page_free(m);
                        inuse_ptepages_count--;
                        vm_page_unlock_queues();
+
                        vm_object_unlock(p->pm_obj);
                    }
 
-                   PMAP_READ_LOCK(p, spl);
+                   PMAP_LOCK(p);
                }
              }
           }
        }
-       PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
-       PMAP_READ_UNLOCK(p, spl);
+
+       PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
+       PMAP_UNLOCK(p);
        return;
 
 }
+#endif
 
-/*
- *     Routine:        pmap_kernel
- *     Function:
- *             Returns the physical map handle for the kernel.
- */
-#if    0
-pmap_t
-pmap_kernel(void)
-{
-       return (kernel_pmap);
-}
-#endif/*       0 */
 
 void
-pmap_copy_page(src, dst)
-       ppnum_t src;
-       ppnum_t dst;
+pmap_copy_page(ppnum_t src, ppnum_t dst)
 {
   bcopy_phys((addr64_t)i386_ptob(src),
             (addr64_t)i386_ptob(dst),
@@ -2424,8 +3514,8 @@ pmap_copy_page(src, dst)
 void
 pmap_pageable(
        __unused pmap_t         pmap,
-       __unused vm_offset_t    start_addr,
-       __unused vm_offset_t    end_addr,
+       __unused vm_map_offset_t        start_addr,
+       __unused vm_map_offset_t        end_addr,
        __unused boolean_t      pageable)
 {
 #ifdef lint
@@ -2438,35 +3528,36 @@ pmap_pageable(
  */
 void
 phys_attribute_clear(
-       ppnum_t pn,
+       ppnum_t         pn,
        int             bits)
 {
-       pv_entry_t              pv_h;
-       register pv_entry_t     pv_e;
+       pv_rooted_entry_t               pv_h;
+       register pv_hashed_entry_t      pv_e;
        register pt_entry_t     *pte;
        int                     pai;
        register pmap_t         pmap;
-       spl_t                   spl;
-       pmap_paddr_t            phys;
 
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
-       if (!valid_page(pn)) {
+       if (pn == vm_page_guard_addr)
+               return;
+
+       pai = ppn_to_pai(pn);
+
+       if (!managed_page(pai)) {
            /*
             *  Not a managed page.
             */
            return;
        }
 
-       /*
-        *      Lock the pmap system first, since we will be changing
-        *      several pmaps.
-        */
+       PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_START,
+                  (int) pn, bits, 0, 0, 0);
 
-       PMAP_WRITE_LOCK(spl);
-       phys = i386_ptob(pn);
-       pai = pa_index(phys);
        pv_h = pai_to_pvh(pai);
 
+       LOCK_PVH(pai);
+
        /*
         * Walk down PV list, clearing all modify or reference bits.
         * We do not have to lock the pv_list because we have
@@ -2476,86 +3567,93 @@ phys_attribute_clear(
            /*
             * There are some mappings.
             */
-           for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
 
+         pv_e = (pv_hashed_entry_t)pv_h;
+
+         do {
                pmap = pv_e->pmap;
-               /*
-                * Lock the pmap to block pmap_extract and similar routines.
-                */
-               simple_lock(&pmap->lock);
 
                {
-                   register vm_offset_t va;
+                   vm_map_offset_t va;
 
                    va = pv_e->va;
-                   pte = pmap_pte(pmap, va);
+                   /*
+                    * first make sure any processor actively
+                    * using this pmap, flushes its TLB state
+                    */
+
+                   PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
 
-#if    0
                    /*
-                    * Consistency checks.
+                    * Clear modify and/or reference bits.
                     */
-                   assert(*pte & INTEL_PTE_VALID);
-                   /* assert(pte_to_phys(*pte) == phys); */
-#endif
 
-               /*
-                * Clear modify or reference bits.
-                */
+                   pte = pmap_pte(pmap, va);
+                   pmap_update_pte(pte, *pte, (*pte & ~bits));
 
-                       *pte++ &= ~bits;
-                       PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
                }
-               simple_unlock(&pmap->lock);
 
-           }
-       }
+               pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
 
+         } while (pv_e != (pv_hashed_entry_t)pv_h);
+       }
        pmap_phys_attributes[pai] &= ~bits;
 
-       PMAP_WRITE_UNLOCK(spl);
+       UNLOCK_PVH(pai);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
+
 }
 
 /*
  *     Check specified attribute bits.
  */
-boolean_t
+int
 phys_attribute_test(
-       ppnum_t pn,
+       ppnum_t         pn,
        int             bits)
 {
-       pv_entry_t              pv_h;
-       register pv_entry_t     pv_e;
+       pv_rooted_entry_t               pv_h;
+       register pv_hashed_entry_t      pv_e;
        register pt_entry_t     *pte;
        int                     pai;
        register pmap_t         pmap;
-       spl_t                   spl;
-       pmap_paddr_t            phys;
+       int                     attributes = 0;
 
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
-       if (!valid_page(pn)) {
+       if (pn == vm_page_guard_addr)
+               return 0;
+
+       pai = ppn_to_pai(pn);
+
+       if (!managed_page(pai)) {
            /*
             *  Not a managed page.
             */
-           return (FALSE);
+           return (0);
        }
 
        /*
-        *      Lock the pmap system first, since we will be checking
-        *      several pmaps.
+        * super fast check...  if bits already collected
+        * no need to take any locks...
+        * if not set, we need to recheck after taking
+        * the lock in case they got pulled in while
+        * we were waiting for the lock
         */
+       if ( (pmap_phys_attributes[pai] & bits) == bits)
+           return (bits);
 
-       PMAP_WRITE_LOCK(spl);
-       phys = i386_ptob(pn);
-       pai = pa_index(phys);
        pv_h = pai_to_pvh(pai);
 
-       if (pmap_phys_attributes[pai] & bits) {
-           PMAP_WRITE_UNLOCK(spl);
-           return (TRUE);
-       }
+       LOCK_PVH(pai);
+
+       attributes = pmap_phys_attributes[pai] & bits;
 
        /*
-        * Walk down PV list, checking all mappings.
+        * Walk down PV list, checking the mappings until we
+        * reach the end or we've found the attributes we've asked for
         * We do not have to lock the pv_list because we have
         * the entire pmap system locked.
         */
@@ -2563,44 +3661,37 @@ phys_attribute_test(
            /*
             * There are some mappings.
             */
-           for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+         pv_e = (pv_hashed_entry_t)pv_h;
+         if (attributes != bits) do {
 
-               pmap = pv_e->pmap;
-               /*
-                * Lock the pmap to block pmap_extract and similar routines.
-                */
-               simple_lock(&pmap->lock);
+               pmap = pv_e->pmap;
 
                {
-                   register vm_offset_t va;
+                   vm_map_offset_t va;
 
                    va = pv_e->va;
-                   pte = pmap_pte(pmap, va);
+                   /*
+                    * first make sure any processor actively
+                    * using this pmap, flushes its TLB state
+                    */
+                   PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
 
-#if    0
                    /*
-                    * Consistency checks.
+                    * pick up modify and/or reference bits from this mapping
                     */
-                   assert(*pte & INTEL_PTE_VALID);
-                   /* assert(pte_to_phys(*pte) == phys); */
-#endif
-               }
 
-               /*
-                * Check modify or reference bits.
-                */
-               {
-                       if (*pte++ & bits) {
-                           simple_unlock(&pmap->lock);
-                           PMAP_WRITE_UNLOCK(spl);
-                           return (TRUE);
-                       }
+                   pte = pmap_pte(pmap, va);
+                   attributes |= *pte & bits;
+
                }
-               simple_unlock(&pmap->lock);
-           }
+
+               pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
+
+           } while ((attributes != bits) && (pv_e != (pv_hashed_entry_t)pv_h));
        }
-       PMAP_WRITE_UNLOCK(spl);
-       return (FALSE);
+
+       UNLOCK_PVH(pai);
+       return (attributes);
 }
 
 /*
@@ -2608,29 +3699,30 @@ phys_attribute_test(
  */
 void
 phys_attribute_set(
-       ppnum_t pn,
+       ppnum_t         pn,
        int             bits)
 {
-       int                     spl;
-       pmap_paddr_t   phys;
+       int             pai;
 
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
-       if (!valid_page(pn)) {
+       if (pn == vm_page_guard_addr)
+               return;
+
+       pai = ppn_to_pai(pn);
+
+       if (!managed_page(pai)) {
            /*
             *  Not a managed page.
             */
            return;
        }
 
-       /*
-        *      Lock the pmap system and set the requested bits in
-        *      the phys attributes array.  Don't need to bother with
-        *      ptes because the test routine looks here first.
-        */
-       phys = i386_ptob(pn);
-       PMAP_WRITE_LOCK(spl);
-       pmap_phys_attributes[pa_index(phys)] |= bits;
-       PMAP_WRITE_UNLOCK(spl);
+       LOCK_PVH(pai);
+
+       pmap_phys_attributes[pai] |= bits;
+
+       UNLOCK_PVH(pai);
 }
 
 /*
@@ -2665,7 +3757,10 @@ boolean_t
 pmap_is_modified(
                 ppnum_t pn)
 {
-       return (phys_attribute_test(pn, PHYS_MODIFIED));
+        if (phys_attribute_test(pn, PHYS_MODIFIED))
+               return TRUE;
+
+       return FALSE;
 }
 
 /*
@@ -2698,7 +3793,10 @@ boolean_t
 pmap_is_referenced(
                   ppnum_t pn)
 {
-       return (phys_attribute_test(pn, PHYS_REFERENCED));
+        if (phys_attribute_test(pn, PHYS_REFERENCED))
+               return TRUE;
+
+       return FALSE;
 }
 
 /*
@@ -2709,8 +3807,17 @@ pmap_is_referenced(
 unsigned int
 pmap_get_refmod(ppnum_t pa)
 {
-       return (   ((phys_attribute_test(pa,   PHYS_MODIFIED))?   VM_MEM_MODIFIED : 0)
-                        | ((phys_attribute_test(pa, PHYS_REFERENCED))? VM_MEM_REFERENCED : 0));
+        int    refmod;
+       unsigned int retval = 0;
+
+       refmod = phys_attribute_test(pa, PHYS_MODIFIED | PHYS_REFERENCED);
+
+       if (refmod & PHYS_MODIFIED)
+               retval |= VM_MEM_MODIFIED;
+       if (refmod & PHYS_REFERENCED)
+               retval |= VM_MEM_REFERENCED;
+
+       return (retval);
 }
 
 /*
@@ -2728,59 +3835,6 @@ pmap_clear_refmod(ppnum_t pa, unsigned int mask)
        phys_attribute_clear(pa, x86Mask);
 }
 
-/*
- *     Set the modify bit on the specified range
- *     of this map as requested.
- *
- *     This optimization stands only if each time the dirty bit
- *     in vm_page_t is tested, it is also tested in the pmap.
- */
-void
-pmap_modify_pages(
-       pmap_t          map,
-       vm_offset_t     s,
-       vm_offset_t     e)
-{
-       spl_t                   spl;
-       register pt_entry_t     *pde;
-       register pt_entry_t     *spte, *epte;
-       vm_offset_t             l;
-       vm_offset_t             orig_s = s;
-
-       if (map == PMAP_NULL)
-               return;
-
-       PMAP_READ_LOCK(map, spl);
-
-       pde = pmap_pde(map, s);
-       while (s && s < e) {
-           l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
-           if (l > e)
-               l = e;
-           if (*pde & INTEL_PTE_VALID) {
-             spte = (pt_entry_t *)pmap_pte(map, (s & ~(PDE_MAPPED_SIZE-1)));
-               if (l) {
-                  spte = &spte[ptenum(s)];
-                  epte = &spte[intel_btop(l-s)];
-               } else {
-                  epte = &spte[intel_btop(PDE_MAPPED_SIZE)];
-                  spte = &spte[ptenum(s)];
-               }
-               while (spte < epte) {
-                   if (*spte & INTEL_PTE_VALID) {
-                       *spte |= (INTEL_PTE_MOD | INTEL_PTE_WRITE);
-                   }
-                   spte++;
-               }
-           }
-           s = l;
-           pde++;
-       }
-       PMAP_UPDATE_TLBS(map, orig_s, e);
-       PMAP_READ_UNLOCK(map, spl);
-}
-
-
 void 
 invalidate_icache(__unused vm_offset_t addr,
                  __unused unsigned     cnt,
@@ -2796,209 +3850,30 @@ flush_dcache(__unused vm_offset_t      addr,
        return;
 }
 
+#if CONFIG_DTRACE
 /*
-*          TLB Coherence Code (TLB "shootdown" code)
-* 
-* Threads that belong to the same task share the same address space and
-* hence share a pmap.  However, they  may run on distinct cpus and thus
-* have distinct TLBs that cache page table entries. In order to guarantee
-* the TLBs are consistent, whenever a pmap is changed, all threads that
-* are active in that pmap must have their TLB updated. To keep track of
-* this information, the set of cpus that are currently using a pmap is
-* maintained within each pmap structure (cpus_using). Pmap_activate() and
-* pmap_deactivate add and remove, respectively, a cpu from this set.
-* Since the TLBs are not addressable over the bus, each processor must
-* flush its own TLB; a processor that needs to invalidate another TLB
-* needs to interrupt the processor that owns that TLB to signal the
-* update.
-* 
-* Whenever a pmap is updated, the lock on that pmap is locked, and all
-* cpus using the pmap are signaled to invalidate. All threads that need
-* to activate a pmap must wait for the lock to clear to await any updates
-* in progress before using the pmap. They must ACQUIRE the lock to add
-* their cpu to the cpus_using set. An implicit assumption made
-* throughout the TLB code is that all kernel code that runs at or higher
-* than splvm blocks out update interrupts, and that such code does not
-* touch pageable pages.
-* 
-* A shootdown interrupt serves another function besides signaling a
-* processor to invalidate. The interrupt routine (pmap_update_interrupt)
-* waits for the both the pmap lock (and the kernel pmap lock) to clear,
-* preventing user code from making implicit pmap updates while the
-* sending processor is performing its update. (This could happen via a
-* user data write reference that turns on the modify bit in the page
-* table). It must wait for any kernel updates that may have started
-* concurrently with a user pmap update because the IPC code
-* changes mappings.
-* Spinning on the VALUES of the locks is sufficient (rather than
-* having to acquire the locks) because any updates that occur subsequent
-* to finding the lock unlocked will be signaled via another interrupt.
-* (This assumes the interrupt is cleared before the low level interrupt code 
-* calls pmap_update_interrupt()). 
-* 
-* The signaling processor must wait for any implicit updates in progress
-* to terminate before continuing with its update. Thus it must wait for an
-* acknowledgement of the interrupt from each processor for which such
-* references could be made. For maintaining this information, a set
-* cpus_active is used. A cpu is in this set if and only if it can 
-* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from
-* this set; when all such cpus are removed, it is safe to update.
-* 
-* Before attempting to acquire the update lock on a pmap, a cpu (A) must
-* be at least at the priority of the interprocessor interrupt
-* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a
-* kernel update; it would spin forever in pmap_update_interrupt() trying
-* to acquire the user pmap lock it had already acquired. Furthermore A
-* must remove itself from cpus_active.  Otherwise, another cpu holding
-* the lock (B) could be in the process of sending an update signal to A,
-* and thus be waiting for A to remove itself from cpus_active. If A is
-* spinning on the lock at priority this will never happen and a deadlock
-* will result.
-*/
-
-/*
- *     Signal another CPU that it must flush its TLB
+ * Constrain DTrace copyin/copyout actions
  */
-void
-signal_cpus(
-       cpu_set         use_list,
-       pmap_t          pmap,
-       vm_offset_t     start_addr,
-       vm_offset_t     end_addr)
-{
-       register int            which_cpu, j;
-       register pmap_update_list_t     update_list_p;
+extern kern_return_t dtrace_copyio_preflight(addr64_t);
+extern kern_return_t dtrace_copyio_postflight(addr64_t);
 
-       while ((which_cpu = ffs((unsigned long)use_list)) != 0) {
-           which_cpu -= 1;     /* convert to 0 origin */
+kern_return_t dtrace_copyio_preflight(__unused addr64_t va)
+{
+       thread_t thread = current_thread();
 
-           update_list_p = cpu_update_list(which_cpu);
-           simple_lock(&update_list_p->lock);
-
-           j = update_list_p->count;
-           if (j >= UPDATE_LIST_SIZE) {
-               /*
-                *      list overflowed.  Change last item to
-                *      indicate overflow.
-                */
-               update_list_p->item[UPDATE_LIST_SIZE-1].pmap  = kernel_pmap;
-               update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS;
-               update_list_p->item[UPDATE_LIST_SIZE-1].end   = VM_MAX_KERNEL_ADDRESS;
-           }
-           else {
-               update_list_p->item[j].pmap  = pmap;
-               update_list_p->item[j].start = start_addr;
-               update_list_p->item[j].end   = end_addr;
-               update_list_p->count = j+1;
-           }
-           cpu_update_needed(which_cpu) = TRUE;
-           simple_unlock(&update_list_p->lock);
-
-           /* if its the kernel pmap, ignore cpus_idle */
-           if (((cpus_idle & (1 << which_cpu)) == 0) ||
-               (pmap == kernel_pmap) || PMAP_REAL(which_cpu) == pmap)
-             {
-               i386_signal_cpu(which_cpu, MP_TLB_FLUSH, ASYNC);
-             }
-           use_list &= ~(1 << which_cpu);
-       }
-}
-
-void
-process_pmap_updates(
-       register pmap_t         my_pmap)
-{
-       register int            my_cpu;
-       register pmap_update_list_t     update_list_p;
-       register int            j;
-       register pmap_t         pmap;
-
-       mp_disable_preemption();
-       my_cpu = cpu_number();
-       update_list_p = cpu_update_list(my_cpu);
-       simple_lock(&update_list_p->lock);
-
-       for (j = 0; j < update_list_p->count; j++) {
-           pmap = update_list_p->item[j].pmap;
-           if (pmap == my_pmap ||
-               pmap == kernel_pmap) {
-
-               if (pmap->ref_count <= 0) {
-                       PMAP_CPU_CLR(pmap, my_cpu);
-                       PMAP_REAL(my_cpu) = kernel_pmap;
-#ifdef PAE
-                       set_cr3((unsigned int)kernel_pmap->pm_ppdpt);
-#else
-                       set_cr3((unsigned int)kernel_pmap->pdirbase);
-#endif
-               } else
-                       INVALIDATE_TLB(pmap,
-                                      update_list_p->item[j].start,
-                                      update_list_p->item[j].end);
-           }
-       }       
-       update_list_p->count = 0;
-       cpu_update_needed(my_cpu) = FALSE;
-       simple_unlock(&update_list_p->lock);
-       mp_enable_preemption();
+       if (current_map() == kernel_map)
+               return KERN_FAILURE;
+       else if (thread->machine.specFlags & CopyIOActive)
+               return KERN_FAILURE;
+       else
+               return KERN_SUCCESS;
 }
-
-/*
- *     Interrupt routine for TBIA requested from other processor.
- *     This routine can also be called at all interrupts time if
- *     the cpu was idle. Some driver interrupt routines might access
- *     newly allocated vm. (This is the case for hd)
- */
-void
-pmap_update_interrupt(void)
+kern_return_t dtrace_copyio_postflight(__unused addr64_t va)
 {
-       register int            my_cpu;
-       spl_t                   s;
-       register pmap_t         my_pmap;
-
-       mp_disable_preemption();
-       my_cpu = cpu_number();
-
-       /*
-        *      Raise spl to splvm (above splip) to block out pmap_extract
-        *      from IO code (which would put this cpu back in the active
-        *      set).
-        */
-       s = splhigh();
-       
-       my_pmap = PMAP_REAL(my_cpu);
-
-       if (!(my_pmap && pmap_in_use(my_pmap, my_cpu)))
-               my_pmap = kernel_pmap;
-
-       do {
-           LOOP_VAR;
-
-           /*
-            *  Indicate that we're not using either user or kernel
-            *  pmap.
-            */
-           i_bit_clear(my_cpu, &cpus_active);
-
-           /*
-            *  Wait for any pmap updates in progress, on either user
-            *  or kernel pmap.
-            */
-           while (*(volatile int *)(&my_pmap->lock.interlock.lock_data) ||
-                  *(volatile int *)(&kernel_pmap->lock.interlock.lock_data)) {
-               LOOP_CHECK("pmap_update_interrupt", my_pmap);
-               cpu_pause();
-           }
-
-           process_pmap_updates(my_pmap);
-
-           i_bit_set(my_cpu, &cpus_active);
-
-       } while (cpu_update_needed(my_cpu));
-       
-       splx(s);
-       mp_enable_preemption();
+       return KERN_SUCCESS;
 }
+#endif /* CONFIG_DTRACE */
 
 #if    MACH_KDB
 
@@ -3006,6 +3881,7 @@ pmap_update_interrupt(void)
 
 extern void    db_show_page(pmap_paddr_t pa);
 
+#if 0
 void
 db_show_page(pmap_paddr_t pa)
 {
@@ -3017,7 +3893,7 @@ db_show_page(pmap_paddr_t pa)
        pv_h = pai_to_pvh(pai);
 
        attr = pmap_phys_attributes[pai];
-       printf("phys page %x ", pa);
+       printf("phys page %llx ", pa);
        if (attr & PHYS_MODIFIED)
                printf("modified, ");
        if (attr & PHYS_REFERENCED)
@@ -3028,12 +3904,14 @@ db_show_page(pmap_paddr_t pa)
                printf(" not mapped\n");
        for (; pv_h; pv_h = pv_h->next)
                if (pv_h->pmap)
-                       printf("%x in pmap %x\n", pv_h->va, pv_h->pmap);
+                       printf("%llx in pmap %p\n", pv_h->va, pv_h->pmap);
 }
+#endif
 
 #endif /* MACH_KDB */
 
 #if    MACH_KDB
+#if 0
 void db_kvtophys(vm_offset_t);
 void db_show_vaddrs(pt_entry_t  *);
 
@@ -3044,7 +3922,7 @@ void
 db_kvtophys(
        vm_offset_t     vaddr)
 {
-       db_printf("0x%x", kvtophys(vaddr));
+       db_printf("0x%qx", kvtophys(vaddr));
 }
 
 /*
@@ -3055,7 +3933,7 @@ db_show_vaddrs(
        pt_entry_t      *dirbase)
 {
        pt_entry_t      *ptep, *pdep, tmp;
-       int             x, y, pdecnt, ptecnt;
+       unsigned int    x, y, pdecnt, ptecnt;
 
        if (dirbase == 0) {
                dirbase = kernel_pmap->dirbase;
@@ -3064,7 +3942,7 @@ db_show_vaddrs(
                db_printf("need a dirbase...\n");
                return;
        }
-       dirbase = (pt_entry_t *) ((unsigned long) dirbase & ~INTEL_OFFMASK);
+       dirbase = (pt_entry_t *) (int) ((unsigned long) dirbase & ~INTEL_OFFMASK);
 
        db_printf("dirbase: 0x%x\n", dirbase);
 
@@ -3075,7 +3953,7 @@ db_show_vaddrs(
                        continue;
                }
                pdecnt++;
-               ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK);
+               ptep = (pt_entry_t *) ((unsigned long)(*pdep) & ~INTEL_OFFMASK);
                db_printf("dir[%4d]: 0x%x\n", y, *pdep);
                for (x = 0; x < NPTEPG; x++, ptep++) {
                        if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) {
@@ -3093,6 +3971,7 @@ db_show_vaddrs(
        db_printf("total: %d tables, %d page table entries.\n", pdecnt, ptecnt);
 
 }
+#endif
 #endif /* MACH_KDB */
 
 #include <mach_vm_debug.h>
@@ -3109,189 +3988,90 @@ pmap_list_resident_pages(
 }
 #endif /* MACH_VM_DEBUG */
 
-#ifdef MACH_BSD
-/*
- * pmap_pagemove
- *
- * BSD support routine to reassign virtual addresses.
- */
-
-void
-pmap_movepage(unsigned long from, unsigned long to, vm_size_t size)
-{
-       spl_t   spl;
-       pt_entry_t      *pte, saved_pte;
-
-       /* Lock the kernel map */
-       PMAP_READ_LOCK(kernel_pmap, spl);
 
 
-       while (size > 0) {
-               pte = pmap_pte(kernel_pmap, from);
-               if (pte == NULL)
-                       panic("pmap_pagemove from pte NULL");
-               saved_pte = *pte;
-               PMAP_READ_UNLOCK(kernel_pmap, spl);
-
-               pmap_enter(kernel_pmap, to, (ppnum_t)i386_btop(i386_trunc_page(*pte)),
-                       VM_PROT_READ|VM_PROT_WRITE, 0, *pte & INTEL_PTE_WIRED);
-
-               pmap_remove(kernel_pmap, (addr64_t)from, (addr64_t)(from+PAGE_SIZE));
-
-               PMAP_READ_LOCK(kernel_pmap, spl);
-               pte = pmap_pte(kernel_pmap, to);
-               if (pte == NULL)
-                       panic("pmap_pagemove 'to' pte NULL");
-
-               *pte = saved_pte;
-
-               from += PAGE_SIZE;
-               to += PAGE_SIZE;
-               size -= PAGE_SIZE;
-       }
-
-       /* Get the processors to update the TLBs */
-       PMAP_UPDATE_TLBS(kernel_pmap, from, from+size);
-       PMAP_UPDATE_TLBS(kernel_pmap, to, to+size);
-
-       PMAP_READ_UNLOCK(kernel_pmap, spl);
-
-}
-#endif /* MACH_BSD */
-
 /* temporary workaround */
 boolean_t
-coredumpok(vm_map_t map, vm_offset_t va)
+coredumpok(__unused vm_map_t map, __unused vm_offset_t va)
 {
+#if 0
        pt_entry_t     *ptep;
 
        ptep = pmap_pte(map->pmap, va);
        if (0 == ptep)
                return FALSE;
        return ((*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED));
-}
-
-/*
- * grow the number of kernel page table entries, if needed
- */
-void
-pmap_growkernel(vm_offset_t addr)
-{
-#if GROW_KERNEL_FUNCTION_IMPLEMENTED
-       struct pmap *pmap;
-       int s;
-       vm_offset_t ptppaddr;
-       ppnum_t  ppn;
-       vm_page_t nkpg;
-       pd_entry_t newpdir = 0;
-
-       /*
-        * Serialize.
-        * Losers return to try again until the winner completes the work.
-        */
-       if (kptobj == 0) panic("growkernel 0");
-       if (!vm_object_lock_try(kptobj)) {
-           return;
-       }
-
-       vm_page_lock_queues();
-
-       s = splhigh();
-
-       /*
-        * If this is the first time thru, locate the end of the
-        * kernel page table entries and set nkpt to the current
-        * number of kernel page table pages
-        */
-       if (kernel_vm_end == 0) {
-               kernel_vm_end = KERNBASE;
-               nkpt = 0;
-
-               while (pdir_pde(kernel_pmap->dirbase, kernel_vm_end)) {
-                       kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
-                       nkpt++;
-               }
-       }
-
-       /*
-        * Now allocate and map the required number of page tables
-        */
-       addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
-       while (kernel_vm_end < addr) {
-               if (pdir_pde(kernel_pmap->dirbase, kernel_vm_end)) {
-                       kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
-                       continue; /* someone already filled this one */
-               }
-
-               nkpg = vm_page_alloc(kptobj, nkpt);
-               if (!nkpg)
-                       panic("pmap_growkernel: no memory to grow kernel");
-
-               nkpt++;
-               vm_page_wire(nkpg);
-               ppn  = nkpg->phys_page;
-               pmap_zero_page(ppn);
-               ptppaddr = i386_ptob(ppn);
-               newpdir = (pd_entry_t) (ptppaddr | INTEL_PTE_VALID | 
-                                       INTEL_PTE_RW | INTEL_PTE_REF | INTEL_PTE_MOD);
-               pdir_pde(kernel_pmap->dirbase, kernel_vm_end) = newpdir;
-
-               simple_lock(&free_pmap_lock);
-               for (pmap = (struct pmap *)kernel_pmap->pmap_link.next;
-                    pmap != kernel_pmap ;
-                    pmap = (struct pmap *)pmap->pmap_link.next ) {
-                               *pmap_pde(pmap, kernel_vm_end) = newpdir;
-               }
-               simple_unlock(&free_pmap_lock);
-       }
-       splx(s);
-       vm_page_unlock_queues();
-       vm_object_unlock(kptobj);
+#else
+       return TRUE;
 #endif
 }
 
-pt_entry_t *
-pmap_mapgetpte(vm_map_t map, vm_offset_t v)
-{
-       return pmap_pte(map->pmap, v);
-}
 
 boolean_t
 phys_page_exists(
                 ppnum_t pn)
 {
-       pmap_paddr_t     phys;
-
        assert(pn != vm_page_fictitious_addr);
 
        if (!pmap_initialized)
                return (TRUE);
-       phys = (pmap_paddr_t) i386_ptob(pn);
-       if (!pmap_valid_page(pn))
+
+       if (pn == vm_page_guard_addr)
+               return FALSE;
+
+       if (!managed_page(ppn_to_pai(pn)))
                return (FALSE);
 
        return TRUE;
 }
 
 void
-mapping_free_prime()
+mapping_free_prime(void)
 {
        int             i;
-       pv_entry_t      pv_e;
+       pv_hashed_entry_t      pvh_e;
+       pv_hashed_entry_t      pvh_eh;
+       pv_hashed_entry_t      pvh_et;
+       int             pv_cnt;
+
+       pv_cnt = 0;
+       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
+       for (i = 0; i < (5 * PV_HASHED_ALLOC_CHUNK); i++) {
+               pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+
+               pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+               pvh_eh = pvh_e;
+
+               if (pvh_et == PV_HASHED_ENTRY_NULL)
+                       pvh_et = pvh_e;
+               pv_cnt++;
+       }
+       PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
+
+       pv_cnt = 0;
+       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
+       for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
+               pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
 
-       for (i = 0; i < (5 * PV_ALLOC_CHUNK); i++) {
-               pv_e = (pv_entry_t) zalloc(pv_list_zone);
-               PV_FREE(pv_e);
+               pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+               pvh_eh = pvh_e;
+
+               if (pvh_et == PV_HASHED_ENTRY_NULL)
+                       pvh_et = pvh_e;
+               pv_cnt++;
        }
+       PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
+
 }
 
 void
-mapping_adjust()
+mapping_adjust(void)
 {
-       pv_entry_t      pv_e;
+       pv_hashed_entry_t      pvh_e;
+       pv_hashed_entry_t      pvh_eh;
+       pv_hashed_entry_t      pvh_et;
+       int             pv_cnt;
        int             i;
-       int             spl;
 
        if (mapping_adjust_call == NULL) {
                thread_call_setup(&mapping_adjust_call_data,
@@ -3299,40 +4079,89 @@ mapping_adjust()
                                  (thread_call_param_t) NULL);
                mapping_adjust_call = &mapping_adjust_call_data;
        }
-       /* XXX  rethink best way to do locking here */
-       if (pv_free_count < PV_LOW_WATER_MARK) {
-               for (i = 0; i < PV_ALLOC_CHUNK; i++) {
-                       pv_e = (pv_entry_t) zalloc(pv_list_zone);
-                       SPLVM(spl);
-                       PV_FREE(pv_e);
-                       SPLX(spl);
+
+       pv_cnt = 0;
+       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
+       if (pv_hashed_kern_free_count < PV_HASHED_KERN_LOW_WATER_MARK) {
+               for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
+                       pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+
+                       pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                       pvh_eh = pvh_e;
+
+                       if (pvh_et == PV_HASHED_ENTRY_NULL)
+                               pvh_et = pvh_e;
+                       pv_cnt++;
+               }
+               PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
+       }
+
+       pv_cnt = 0;
+       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
+       if (pv_hashed_free_count < PV_HASHED_LOW_WATER_MARK) {
+               for (i = 0; i < PV_HASHED_ALLOC_CHUNK; i++) {
+                       pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+
+                       pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                       pvh_eh = pvh_e;
+
+                       if (pvh_et == PV_HASHED_ENTRY_NULL)
+                               pvh_et = pvh_e;
+                       pv_cnt++;
                }
+               PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
        }
        mappingrecurse = 0;
 }
 
 void
-pmap_commpage_init(vm_offset_t kernel_commpage, vm_offset_t user_commpage, int cnt)
+pmap_commpage32_init(vm_offset_t kernel_commpage, vm_offset_t user_commpage, int cnt)
 {
-  int i;
-  pt_entry_t *opte, *npte;
-  pt_entry_t pte;
-
-  for (i = 0; i < cnt; i++) {
-    opte = pmap_pte(kernel_pmap, kernel_commpage);
-    if (0 == opte) panic("kernel_commpage");
-    npte = pmap_pte(kernel_pmap, user_commpage);
-    if (0 == npte) panic("user_commpage");
-    pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
-    pte &= ~INTEL_PTE_WRITE; // ensure read only
-    WRITE_PTE_FAST(npte, pte);
-    kernel_commpage += INTEL_PGBYTES;
-    user_commpage += INTEL_PGBYTES;
-  }
+       int i;
+       pt_entry_t *opte, *npte;
+       pt_entry_t pte;
+       spl_t s;
+
+       for (i = 0; i < cnt; i++) {
+               s = splhigh();
+               opte = pmap_pte(kernel_pmap, (vm_map_offset_t)kernel_commpage);
+               if (0 == opte)
+                       panic("kernel_commpage");
+               pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
+               pte &= ~INTEL_PTE_WRITE; // ensure read only
+               npte = pmap_pte(kernel_pmap, (vm_map_offset_t)user_commpage);
+               if (0 == npte)
+                       panic("user_commpage");
+               pmap_store_pte(npte, pte);
+               splx(s);
+               kernel_commpage += INTEL_PGBYTES;
+               user_commpage += INTEL_PGBYTES;
+       }
+}
+
+
+#define PMAP_COMMPAGE64_CNT  (_COMM_PAGE64_AREA_USED/PAGE_SIZE)
+pt_entry_t pmap_commpage64_ptes[PMAP_COMMPAGE64_CNT];
+
+void
+pmap_commpage64_init(vm_offset_t kernel_commpage, __unused vm_map_offset_t user_commpage, int cnt)
+{
+    int i;
+    pt_entry_t *kptep;
+
+    PMAP_LOCK(kernel_pmap);
+
+    for (i = 0; i < cnt; i++) {
+        kptep = pmap_pte(kernel_pmap, (uint64_t)kernel_commpage + (i*PAGE_SIZE));
+       if ((0 == kptep) || (0 == (*kptep & INTEL_PTE_VALID)))
+           panic("pmap_commpage64_init pte");
+       pmap_commpage64_ptes[i] = ((*kptep & ~INTEL_PTE_WRITE) | INTEL_PTE_USER);
+    }
+    PMAP_UNLOCK(kernel_pmap);
 }
 
+
 static cpu_pmap_t              cpu_pmap_master;
-static struct pmap_update_list cpu_update_list_master;
 
 struct cpu_pmap *
 pmap_cpu_alloc(boolean_t is_boot_cpu)
@@ -3340,13 +4169,13 @@ pmap_cpu_alloc(boolean_t is_boot_cpu)
        int                     ret;
        int                     i;
        cpu_pmap_t              *cp;
-       pmap_update_list_t      up;
        vm_offset_t             address;
+       vm_map_address_t        mapaddr;
        vm_map_entry_t          entry;
+       pt_entry_t              *pte;
        
        if (is_boot_cpu) {
                cp = &cpu_pmap_master;
-               up = &cpu_update_list_master;
        } else {
                /*
                 * The per-cpu pmap data structure itself.
@@ -3360,57 +4189,524 @@ pmap_cpu_alloc(boolean_t is_boot_cpu)
                bzero((void *)cp, sizeof(cpu_pmap_t));
 
                /*
-                * The tlb flush update list.
+                * The temporary windows used for copy/zero - see loose_ends.c
                 */
-               ret = kmem_alloc(kernel_map,
-                                (vm_offset_t *) &up, sizeof(*up));
+               ret = vm_map_find_space(kernel_map,
+                   &mapaddr, PMAP_NWINDOWS*PAGE_SIZE, (vm_map_offset_t)0, 0, &entry);
                if (ret != KERN_SUCCESS) {
-                       printf("pmap_cpu_alloc() failed ret=%d\n", ret);
+                       printf("pmap_cpu_alloc() "
+                               "vm_map_find_space ret=%d\n", ret);
                        pmap_cpu_free(cp);
                        return NULL;
                }
+               address = (vm_offset_t)mapaddr;
+
+               for (i = 0; i < PMAP_NWINDOWS; i++, address += PAGE_SIZE) {
+                 spl_t s;
+                       s = splhigh();
+                       while ((pte = pmap_pte(kernel_pmap, (vm_map_offset_t)address)) == 0)
+                               pmap_expand(kernel_pmap, (vm_map_offset_t)address);
+                       * (int *) pte = 0; 
+                       cp->mapwindow[i].prv_CADDR = (caddr_t) address;
+                       cp->mapwindow[i].prv_CMAP = pte;
+                       splx(s);
+               }
+               vm_map_unlock(kernel_map);
+       }
 
-               /*
-                * The temporary windows used for copy/zero - see loose_ends.c
-                */
-               for (i = 0; i < PMAP_NWINDOWS; i++) {
-                       ret = vm_map_find_space(kernel_map,
-                                       &address, PAGE_SIZE, 0, &entry);
-                       if (ret != KERN_SUCCESS) {
-                               printf("pmap_cpu_alloc() "
-                                       "vm_map_find_space ret=%d\n", ret);
-                               pmap_cpu_free(cp);
-                               return NULL;
+       cp->pdpt_window_index = PMAP_PDPT_FIRST_WINDOW;
+       cp->pde_window_index = PMAP_PDE_FIRST_WINDOW;
+       cp->pte_window_index = PMAP_PTE_FIRST_WINDOW;
+
+       return cp;
+}
+
+void
+pmap_cpu_free(struct cpu_pmap *cp)
+{
+       if (cp != NULL && cp != &cpu_pmap_master) {
+               kfree((void *) cp, sizeof(cpu_pmap_t));
+       }
+}
+
+
+mapwindow_t *
+pmap_get_mapwindow(pt_entry_t pentry)
+{
+    mapwindow_t *mp;
+    int i;
+
+    assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+    /*
+     * Note: 0th map reserved for pmap_pte()
+     */
+    for (i = PMAP_NWINDOWS_FIRSTFREE; i < PMAP_NWINDOWS; i++) {
+            mp = &current_cpu_datap()->cpu_pmap->mapwindow[i];
+
+           if (*mp->prv_CMAP == 0) {
+                   pmap_store_pte(mp->prv_CMAP, pentry);
+
+                   invlpg((uintptr_t)mp->prv_CADDR);
+
+                   return (mp);
+           }
+    }
+    panic("pmap_get_mapwindow: no windows available");
+
+    return NULL;
+}
+
+
+void
+pmap_put_mapwindow(mapwindow_t *mp)
+{
+    pmap_store_pte(mp->prv_CMAP, 0);
+}
+
+
+/*
+ * The Intel platform can nest at the PDE level, so NBPDE (i.e. 2MB) at a time,
+ * on a NBPDE boundary.
+ */
+uint64_t pmap_nesting_size_min = NBPDE;
+uint64_t pmap_nesting_size_max = 0 - (uint64_t)NBPDE; /* no limit, really... */
+
+/*
+ *     kern_return_t pmap_nest(grand, subord, vstart, size)
+ *
+ *     grand  = the pmap that we will nest subord into
+ *     subord = the pmap that goes into the grand
+ *     vstart  = start of range in pmap to be inserted
+ *     nstart  = start of range in pmap nested pmap
+ *     size   = Size of nest area (up to 16TB)
+ *
+ *     Inserts a pmap into another.  This is used to implement shared segments.
+ *
+ *      on x86 this is very limited right now.  must be exactly 1 segment.
+ *
+ *     Note that we depend upon higher level VM locks to insure that things don't change while
+ *     we are doing this.  For example, VM should not be doing any pmap enters while it is nesting
+ *     or do 2 nests at once.
+ */
+
+
+kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size) {
+               
+        vm_map_offset_t        vaddr, nvaddr;
+       pd_entry_t      *pde,*npde;
+       unsigned int    i;
+       uint64_t        num_pde;
+
+       // do validity tests
+       if (size & (pmap_nesting_size_min-1)) return KERN_INVALID_VALUE;
+       if(vstart & (pmap_nesting_size_min-1)) return KERN_INVALID_VALUE;
+       if(nstart & (pmap_nesting_size_min-1)) return KERN_INVALID_VALUE;
+       if((size >> 28) > 65536)  return KERN_INVALID_VALUE;    /* Max size we can nest is 16TB */
+       if(size == 0) {
+               panic("pmap_nest: size is invalid - %016llX\n", size);
+       }
+
+       PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START,
+                  (int) grand, (int) subord,
+                  (int) (vstart>>32), (int) vstart, 0);
+
+       subord->pm_shared = TRUE;
+       nvaddr = (vm_map_offset_t)nstart;
+       num_pde = size >> PDESHIFT;
+
+       PMAP_LOCK(subord);
+       for (i = 0; i < num_pde; i++) {
+         npde = pmap_pde(subord, nvaddr);
+         while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
+           PMAP_UNLOCK(subord);
+           pmap_expand(subord, nvaddr); // pmap_expand handles races
+           PMAP_LOCK(subord);
+           npde = pmap_pde(subord, nvaddr);
+         }
+         nvaddr += NBPDE;
+       }
+
+       PMAP_UNLOCK(subord);
+
+       vaddr = (vm_map_offset_t)vstart;
+
+       PMAP_LOCK(grand);
+
+       for (i = 0;i < num_pde; i++) {
+         pd_entry_t tpde;
+
+         npde = pmap_pde(subord, nstart);
+         if (npde == 0)
+                 panic("pmap_nest: no npde, subord %p nstart 0x%llx", subord, nstart);
+         tpde = *npde;
+         nstart += NBPDE;
+         pde = pmap_pde(grand, vaddr);
+/* Legacy mode does not require expansion.
+ * DRK: consider a debug mode test to verify that no PTEs are extant within
+ * this range.
+ */
+         if ((0 == pde) && cpu_64bit) {
+           PMAP_UNLOCK(grand);
+           pmap_expand_pdpt(grand, vaddr);
+           PMAP_LOCK(grand);
+           pde = pmap_pde(grand, vaddr);
+         }
+
+         if (pde == 0)
+                 panic("pmap_nest: no pde, grand  %p vaddr 0x%llx", grand, vaddr);
+         vaddr += NBPDE;
+         pmap_store_pte(pde, tpde);
+       }
+
+       /* XXX FBDP: why do we need to flush here ? */
+       PMAP_UPDATE_TLBS(grand, vstart, vstart + size - 1);
+
+       PMAP_UNLOCK(grand);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+       return KERN_SUCCESS;
+}
+
+/*
+ *     kern_return_t pmap_unnest(grand, vaddr)
+ *
+ *     grand  = the pmap that we will nest subord into
+ *     vaddr  = start of range in pmap to be unnested
+ *
+ *     Removes a pmap from another.  This is used to implement shared segments.
+ *     On the current PPC processors, this is limited to segment (256MB) aligned
+ *     segment sized ranges.
+ */
+
+kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) {
+                       
+       pd_entry_t *pde;
+       unsigned int i;
+       unsigned int num_pde;
+       addr64_t vstart, vend;
+
+       PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START,
+                  (int) grand, 
+                  (int) (vaddr>>32), (int) vaddr, 0, 0);
+
+       if ((size & (pmap_nesting_size_min-1)) ||
+           (vaddr & (pmap_nesting_size_min-1))) {
+               panic("pmap_unnest(%p,0x%llx,0x%llx): unaligned...\n",
+                     grand, vaddr, size);
+       }
+
+       /* align everything to PDE boundaries */
+       vstart = vaddr & ~(NBPDE-1);
+       vend = (vaddr + size + NBPDE - 1) & ~(NBPDE-1);
+       size = vend - vstart;
+
+       PMAP_LOCK(grand);
+
+       // invalidate all pdes for segment at vaddr in pmap grand
+
+       num_pde = size >> PDESHIFT;
+
+       vaddr = vstart;
+       for (i=0;i<num_pde;i++,pde++) {
+         pde = pmap_pde(grand, (vm_map_offset_t)vaddr);
+         if (pde == 0) panic("pmap_unnest: no pde, grand %p vaddr 0x%llx\n", grand, vaddr);
+         pmap_store_pte(pde, (pd_entry_t)0);
+         vaddr += NBPDE;
+       }
+       PMAP_UPDATE_TLBS(grand, vstart, vend);
+
+       PMAP_UNLOCK(grand);
+               
+       PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+       return KERN_SUCCESS;
+}
+
+void
+pmap_switch(pmap_t tpmap)
+{
+        spl_t  s;
+       int     my_cpu;
+
+       s = splhigh();          /* Make sure interruptions are disabled */
+       my_cpu = cpu_number();
+
+       set_dirbase(tpmap, my_cpu);
+
+       splx(s);
+}
+
+
+/*
+ * disable no-execute capability on
+ * the specified pmap
+ */
+void pmap_disable_NX(pmap_t pmap) {
+  
+        pmap->nx_enabled = 0;
+}
+
+void
+pt_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
+                 vm_size_t *alloc_size, int *collectable, int *exhaustable)
+{
+        *count      = inuse_ptepages_count;
+       *cur_size   = PAGE_SIZE * inuse_ptepages_count;
+       *max_size   = PAGE_SIZE * (inuse_ptepages_count + vm_page_inactive_count + vm_page_active_count + vm_page_free_count);
+       *elem_size  = PAGE_SIZE;
+       *alloc_size = PAGE_SIZE;
+
+       *collectable = 1;
+       *exhaustable = 0;
+}
+
+vm_offset_t pmap_cpu_high_map_vaddr(int cpu, enum high_cpu_types e)
+{
+  enum high_fixed_addresses a;
+  a = e + HIGH_CPU_END * cpu;
+  return pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a);
+}
+
+vm_offset_t pmap_high_map_vaddr(enum high_cpu_types e)
+{
+  return pmap_cpu_high_map_vaddr(cpu_number(), e);
+}
+
+vm_offset_t pmap_high_map(pt_entry_t pte, enum high_cpu_types e)
+{
+  enum high_fixed_addresses a;
+  vm_offset_t vaddr;
+
+  a = e + HIGH_CPU_END * cpu_number();
+  vaddr = (vm_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a);
+  pmap_store_pte(pte_unique_base + a, pte);
+
+  /* TLB flush for this page for this  cpu */
+  invlpg((uintptr_t)vaddr);
+
+  return  vaddr;
+}
+
+
+/*
+ * Called with pmap locked, we:
+ *  - scan through per-cpu data to see which other cpus need to flush
+ *  - send an IPI to each non-idle cpu to be flushed
+ *  - wait for all to signal back that they are inactive or we see that
+ *    they are in an interrupt handler or at a safe point
+ *  - flush the local tlb is active for this pmap
+ *  - return ... the caller will unlock the pmap
+ */
+void
+pmap_flush_tlbs(pmap_t pmap)
+{
+       unsigned int    cpu;
+       unsigned int    cpu_bit;
+       cpu_set         cpus_to_signal;
+       unsigned int    my_cpu = cpu_number();
+       pmap_paddr_t    pmap_cr3 = pmap->pm_cr3;
+       boolean_t       flush_self = FALSE;
+       uint64_t        deadline;
+
+       assert((processor_avail_count < 2) ||
+              (ml_get_interrupts_enabled() && get_preemption_level() != 0));
+
+       /*
+        * Scan other cpus for matching active or task CR3.
+        * For idle cpus (with no active map) we mark them invalid but
+        * don't signal -- they'll check as they go busy.
+        * Note: for the kernel pmap we look for 64-bit shared address maps.
+        */
+       cpus_to_signal = 0;
+       for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+               if (!cpu_datap(cpu)->cpu_running)
+                       continue;
+               if ((cpu_datap(cpu)->cpu_task_cr3 == pmap_cr3) ||
+                   (CPU_GET_ACTIVE_CR3(cpu)      == pmap_cr3) ||
+                   (pmap->pm_shared) ||
+                   ((pmap == kernel_pmap) &&
+                    (!CPU_CR3_IS_ACTIVE(cpu) ||
+                     cpu_datap(cpu)->cpu_task_map == TASK_MAP_64BIT_SHARED))) {
+                       if (cpu == my_cpu) {
+                               flush_self = TRUE;
+                               continue;
                        }
-                       vm_map_unlock(kernel_map);
+                       cpu_datap(cpu)->cpu_tlb_invalid = TRUE;
+                       __asm__ volatile("mfence");
 
-                       cp->mapwindow[i].prv_CADDR = (caddr_t) address;
-                       cp->mapwindow[i].prv_CMAP = vtopte(address);
-                       * (int *) cp->mapwindow[i].prv_CMAP = 0; 
+                       if (CPU_CR3_IS_ACTIVE(cpu)) {
+                               cpus_to_signal |= cpu_bit;
+                               i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
+                       }
+               }
+       }
+
+       PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_START,
+                  (int) pmap, cpus_to_signal, flush_self, 0, 0);
 
-                       kprintf("pmap_cpu_alloc() "
-                               "window=%d CADDR=0x%x CMAP=0x%x\n",
-                               i, address, vtopte(address));
+       if (cpus_to_signal) {
+               deadline = mach_absolute_time() + LockTimeOut;
+               /*
+                * Wait for those other cpus to acknowledge
+                */
+               for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+                       while ((cpus_to_signal & cpu_bit) != 0) {
+                               if (!cpu_datap(cpu)->cpu_running ||
+                                   cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
+                                   !CPU_CR3_IS_ACTIVE(cpu)) {
+                                       cpus_to_signal &= ~cpu_bit;
+                                       break;
+                               }
+                               if (mach_absolute_time() > deadline) {
+                                       force_immediate_debugger_NMI = TRUE;
+                                       panic("pmap_flush_tlbs() timeout: "
+                                                               "cpu %d failing to respond to interrupts, pmap=%p cpus_to_signal=%lx",
+                                                               cpu, pmap, cpus_to_signal);
+                               }
+                               cpu_pause();
+                       }
+                       if (cpus_to_signal == 0)
+                               break;
                }
        }
 
        /*
-        *      Set up the pmap request list
+        * Flush local tlb if required.
+        * We need this flush even if the pmap being changed
+        * is the user map... in case we do a copyin/out
+        * before returning to user mode.
         */
-       cp->update_list = up;
-       simple_lock_init(&up->lock, 0);
-       up->count = 0;
+       if (flush_self)
+               flush_tlb();
 
-       return cp;
+
+       PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_END,
+                  (int) pmap, cpus_to_signal, flush_self, 0, 0);
 }
 
 void
-pmap_cpu_free(struct cpu_pmap *cp)
+process_pmap_updates(void)
 {
-       if (cp != NULL && cp != &cpu_pmap_master) {
-               if (cp->update_list != NULL)
-                       kfree((void *) cp->update_list,
-                               sizeof(*cp->update_list));
-               kfree((void *) cp, sizeof(cpu_pmap_t));
+       assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+       flush_tlb();
+
+       current_cpu_datap()->cpu_tlb_invalid = FALSE;
+       __asm__ volatile("mfence");
+}
+
+void
+pmap_update_interrupt(void)
+{
+        PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START,
+                  0, 0, 0, 0, 0);
+
+       process_pmap_updates();
+
+        PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
+}
+
+
+unsigned int pmap_cache_attributes(ppnum_t pn) {
+
+       if (!managed_page(ppn_to_pai(pn)))
+               return (VM_WIMG_IO);
+
+       return (VM_WIMG_COPYBACK);
+}
+
+#ifdef PMAP_DEBUG
+void
+pmap_dump(pmap_t p)
+{
+  int i;
+
+  kprintf("pmap 0x%x\n",p);
+
+  kprintf("  pm_cr3 0x%llx\n",p->pm_cr3);
+  kprintf("  pm_pml4 0x%x\n",p->pm_pml4);
+  kprintf("  pm_pdpt 0x%x\n",p->pm_pdpt);
+
+  kprintf("    pml4[0] 0x%llx\n",*p->pm_pml4);
+  for (i=0;i<8;i++)
+    kprintf("    pdpt[%d] 0x%llx\n",i, p->pm_pdpt[i]);
+}
+
+void pmap_dump_wrap(void)
+{
+  pmap_dump(current_cpu_datap()->cpu_active_thread->task->map->pmap);
+}
+
+void
+dump_4GB_pdpt(pmap_t p)
+{
+       int             spl;
+       pdpt_entry_t    *user_pdptp;
+       pdpt_entry_t    *kern_pdptp;
+       pdpt_entry_t    *pml4p;
+
+       spl = splhigh();
+       while ((user_pdptp = pmap64_pdpt(p, 0x0)) == PDPT_ENTRY_NULL) {
+               splx(spl);
+               pmap_expand_pml4(p, 0x0);
+               spl = splhigh();
        }
+       kern_pdptp = kernel_pmap->pm_pdpt;
+       if (kern_pdptp == NULL)
+               panic("kern_pdptp == NULL");
+       kprintf("dump_4GB_pdpt(%p)\n"
+               "kern_pdptp=%p (phys=0x%016llx)\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "user_pdptp=%p (phys=0x%016llx)\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n",
+               p, kern_pdptp, kvtophys(kern_pdptp),
+               kern_pdptp+0, *(kern_pdptp+0),
+               kern_pdptp+1, *(kern_pdptp+1),
+               kern_pdptp+2, *(kern_pdptp+2),
+               kern_pdptp+3, *(kern_pdptp+3),
+               kern_pdptp+4, *(kern_pdptp+4),
+               user_pdptp, kvtophys(user_pdptp),
+               user_pdptp+0, *(user_pdptp+0),
+               user_pdptp+1, *(user_pdptp+1),
+               user_pdptp+2, *(user_pdptp+2),
+               user_pdptp+3, *(user_pdptp+3),
+               user_pdptp+4, *(user_pdptp+4));
+       kprintf("user pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n",
+               p->pm_cr3, p->pm_hold, p->pm_pml4);
+       pml4p = (pdpt_entry_t *)p->pm_hold;
+       if (pml4p == NULL)
+               panic("user pml4p == NULL");
+       kprintf("\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n",
+               pml4p+0, *(pml4p),
+               pml4p+KERNEL_UBER_PML4_INDEX, *(pml4p+KERNEL_UBER_PML4_INDEX));
+       kprintf("kern pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n",
+               kernel_pmap->pm_cr3, kernel_pmap->pm_hold, kernel_pmap->pm_pml4);
+       pml4p = (pdpt_entry_t *)kernel_pmap->pm_hold;
+       if (pml4p == NULL)
+               panic("kern pml4p == NULL");
+       kprintf("\t 0x%08x: 0x%016llx\n"
+               "\t 0x%08x: 0x%016llx\n",
+               pml4p+0, *(pml4p),
+               pml4p+511, *(pml4p+511));
+       splx(spl);
 }
+
+void dump_4GB_pdpt_thread(thread_t tp)
+{
+       dump_4GB_pdpt(tp->map->pmap);
+}
+
+
+#endif