]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/pmap.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.c
index a54c1ef3312a67a0cb3695928920a98d1f81cce6..36dae2f3ef138edd719cd43106ceea56865a6a75 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <mach_kdb.h>
 #include <mach_ldebug.h>
 
+#include <libkern/OSAtomic.h>
+
 #include <mach/machine/vm_types.h>
 
 #include <mach/boolean.h>
 #include <kern/thread.h>
 #include <kern/zalloc.h>
+#include <kern/queue.h>
 
 #include <kern/lock.h>
 #include <kern/kalloc.h>
 #include <i386/machine_cpu.h>
 #include <i386/mp_slave_boot.h>
 #include <i386/seg.h>
+#include <i386/serial_io.h>
 #include <i386/cpu_capabilities.h>
+#include <i386/machine_routines.h>
+#include <i386/proc_reg.h>
+#include <i386/tsc.h>
 
 #if    MACH_KDB
 #include <ddb/db_command.h>
 #include <ddb/db_print.h>
 #endif /* MACH_KDB */
 
-#include <kern/xpr.h>
-
 #include <vm/vm_protos.h>
 
 #include <i386/mp.h>
 
 #include <sys/kdebug.h>
 
+/* #define DEBUGINTERRUPTS 1  uncomment to ensure pmap callers have interrupts enabled */
+#ifdef DEBUGINTERRUPTS
+#define pmap_intr_assert() {if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) panic("pmap interrupt assert %s, %d",__FILE__, __LINE__);}
+#else
+#define pmap_intr_assert()
+#endif
+
 #ifdef IWANTTODEBUG
 #undef DEBUG
 #define DEBUG 1
 #include <i386/postcode.h>
 #endif /* IWANTTODEBUG */
 
+//#define PMAP_TRACES 1
+#ifdef PMAP_TRACES
+boolean_t      pmap_trace = FALSE;
+#define PMAP_TRACE(x,a,b,c,d,e)                                                \
+       if (pmap_trace) {                                               \
+               KERNEL_DEBUG_CONSTANT(x,a,b,c,d,e);                     \
+       }
+#else
+#define PMAP_TRACE(x,a,b,c,d,e)        KERNEL_DEBUG(x,a,b,c,d,e)
+#endif /* PMAP_TRACES */
+
 /*
  * Forward declarations for internal functions.
  */
@@ -159,26 +182,22 @@ void              pmap_expand_pdpt(
                        pmap_t          map,
                        vm_map_offset_t v);
 
-void           pmap_expand(
-                       pmap_t          map,
-                       vm_map_offset_t v);
-
-static void    pmap_remove_range(
+void   pmap_remove_range(
                        pmap_t          pmap,
                        vm_map_offset_t va,
                        pt_entry_t      *spte,
                        pt_entry_t      *epte);
 
 void           phys_attribute_clear(
-                       ppnum_t phys,
+                       ppnum_t         phys,
                        int             bits);
 
-boolean_t      phys_attribute_test(
-                       ppnum_t phys,
+int            phys_attribute_test(
+                       ppnum_t         phys,
                        int             bits);
 
 void           phys_attribute_set(
-                       ppnum_t phys,
+                       ppnum_t         phys,
                        int             bits);
 
 void           pmap_set_reference(
@@ -192,6 +211,7 @@ void                pmap_movepage(
 boolean_t      phys_page_exists(
                        ppnum_t pn);
 
+
 #ifdef PMAP_DEBUG
 void dump_pmap(pmap_t);
 void dump_4GB_pdpt(pmap_t p);
@@ -201,9 +221,22 @@ void dump_4GB_pdpt_thread(thread_t tp);
 #define        iswired(pte)    ((pte) & INTEL_PTE_WIRED)
 
 int nx_enabled = 1;                    /* enable no-execute protection */
+int allow_data_exec  = VM_ABI_32;      /* 32-bit apps may execute data by default, 64-bit apps may not */
+int allow_stack_exec = 0;              /* No apps may execute from the stack by default */
 
 int cpu_64bit  = 0;
 
+/*
+ * when spinning through pmap_remove
+ * ensure that we don't spend too much
+ * time with preemption disabled.
+ * I'm setting the current threshold
+ * to 20us
+ */
+#define MAX_PREEMPTION_LATENCY_NS 20000
+
+uint64_t max_preemption_latency_tsc = 0;
+
 
 /*
  *     Private data structures.
@@ -212,54 +245,202 @@ int cpu_64bit  = 0;
 /*
  *     For each vm_page_t, there is a list of all currently
  *     valid virtual mappings of that page.  An entry is
- *     a pv_entry_t; the list is the pv_table.
+ *     a pv_rooted_entry_t; the list is the pv_table.
+ *
+ *      N.B.  with the new combo rooted/hashed scheme it is
+ *      only possibly to remove individual non-rooted entries
+ *      if they are found via the hashed chains as there is no
+ *      way to unlink the singly linked hashed entries if navigated to
+ *      via the queue list off the rooted entries.  Think of it as
+ *      hash/walk/pull, keeping track of the prev pointer while walking
+ *      the singly linked hash list.  All of this is to save memory and
+ *      keep both types of pv_entries as small as possible.
  */
 
+/*
+
+PV HASHING Changes - JK 1/2007
+
+Pve's establish physical to virtual mappings.  These are used for aliasing of a 
+physical page to (potentially many) virtual addresses within pmaps. In the previous 
+implementation the structure of the pv_entries (each 16 bytes in size) was
+
 typedef struct pv_entry {
-       struct pv_entry *next;          /* next pv_entry */
-       pmap_t          pmap;           /* pmap where mapping lies */
-       vm_map_offset_t va;             /* virtual address for mapping */
+    struct pv_entry_t    next;
+    pmap_t                    pmap;
+    vm_map_offset_t   va;
 } *pv_entry_t;
 
-#define PV_ENTRY_NULL  ((pv_entry_t) 0)
+An initial array of these is created at boot time, one per physical page of memory, 
+indexed by the physical page number. Additionally, a pool of entries is created from a 
+pv_zone to be used as needed by pmap_enter() when it is creating new mappings.  
+Originally, we kept this pool around because the code in pmap_enter() was unable to 
+block if it needed an entry and none were available - we'd panic.  Some time ago I 
+restructured the pmap_enter() code so that for user pmaps it can block while zalloc'ing 
+a pv structure and restart, removing a panic from the code (in the case of the kernel 
+pmap we cannot block and still panic, so, we keep a separate hot pool for use only on 
+kernel pmaps).  The pool has not been removed since there is a large performance gain 
+keeping freed pv's around for reuse and not suffering the overhead of zalloc for every new pv we need.
+
+As pmap_enter() created new mappings it linked the new pve's for them off the fixed 
+pv array for that ppn (off the next pointer).  These pve's are accessed for several 
+operations, one of them being address space teardown.  In that case, we basically do this
+
+       for (every page/pte in the space) {
+               calc pve_ptr from the ppn in the pte
+               for (every pv in the list for the ppn) {
+                       if (this pv is for this pmap/vaddr) {
+                               do housekeeping
+                               unlink/free the pv
+                       }
+               }
+       }
+
+The problem arose when we were running, say 8000 (or even 2000) apache or other processes 
+and one or all terminate. The list hanging off each pv array entry could have thousands of 
+entries.  We were continuously linearly searching each of these lists as we stepped through 
+the address space we were tearing down.  Because of the locks we hold, likely taking a cache 
+miss for each node,  and interrupt disabling for MP issues the system became completely 
+unresponsive for many seconds while we did this.
+
+Realizing that pve's are accessed in two distinct ways (linearly running the list by ppn 
+for operations like pmap_page_protect and finding and modifying/removing a single pve as 
+part of pmap_enter processing) has led to modifying the pve structures and databases.
+
+There are now two types of pve structures.  A "rooted" structure which is basically the 
+original structure accessed in an array by ppn, and a ''hashed'' structure accessed on a 
+hash list via a hash of [pmap, vaddr].  These have been designed with the two goals of 
+minimizing wired memory and making the lookup of a ppn faster.  Since a vast majority of 
+pages in the system are not aliased and hence represented by a single pv entry I've kept 
+the rooted entry size as small as possible because there is one of these dedicated for 
+every physical page of memory.  The hashed pve's are larger due to the addition of the hash 
+link and the ppn entry needed for matching while running the hash list to find the entry we 
+are looking for.  This way, only systems that have lots of aliasing (like 2000+ httpd procs) 
+will pay the extra memory price. Both structures have the same first three fields allowing 
+some simplification in the code.
+
+They have these shapes
+
+typedef struct pv_rooted_entry {
+        queue_head_t qlink;
+        vm_map_offset_t va;
+        pmap_t          pmap;
+} *pv_rooted_entry_t;
+
+
+typedef struct pv_hashed_entry {
+  queue_head_t qlink;
+  vm_map_offset_t va;
+  pmap_t        pmap;
+  ppnum_t ppn;
+  struct pv_hashed_entry *nexth;
+} *pv_hashed_entry_t;
+
+The main flow difference is that the code is now aware of the rooted entry and the hashed 
+entries.  Code that runs the pv list still starts with the rooted entry and then continues 
+down the qlink onto the hashed entries.  Code that is looking up a specific pv entry first 
+checks the rooted entry and then hashes and runs the hash list for the match. The hash list 
+lengths are much smaller than the original pv lists that contained all aliases for the specific ppn.
+
+*/
 
-pv_entry_t     pv_head_table;          /* array of entries, one per page */
+typedef struct pv_rooted_entry {     /* first three entries must match pv_hashed_entry_t */
+        queue_head_t qlink;
+       vm_map_offset_t va;             /* virtual address for mapping */
+       pmap_t          pmap;           /* pmap where mapping lies */
+} *pv_rooted_entry_t;
+
+#define PV_ROOTED_ENTRY_NULL   ((pv_rooted_entry_t) 0)
+
+pv_rooted_entry_t      pv_head_table;          /* array of entries, one per page */
+
+typedef struct pv_hashed_entry {     /* first three entries must match pv_rooted_entry_t */
+  queue_head_t qlink;
+  vm_map_offset_t va;
+  pmap_t        pmap;
+  ppnum_t ppn;
+  struct pv_hashed_entry *nexth;
+} *pv_hashed_entry_t;
+
+#define PV_HASHED_ENTRY_NULL ((pv_hashed_entry_t)0)
+
+#define NPVHASH 4095   /* MUST BE 2^N - 1 */
+pv_hashed_entry_t     *pv_hash_table;  /* hash lists */
+
+uint32_t npvhash = 0;
+
+/* #define PV_DEBUG 1   uncomment to enable some PV debugging code */
+#ifdef PV_DEBUG
+#define CHK_NPVHASH() if(0 == npvhash) panic("npvhash uninitialized");
+#else
+#define CHK_NPVHASH()
+#endif
 
 /*
  *     pv_list entries are kept on a list that can only be accessed
  *     with the pmap system locked (at SPLVM, not in the cpus_active set).
- *     The list is refilled from the pv_list_zone if it becomes empty.
+ *     The list is refilled from the pv_hashed_list_zone if it becomes empty.
  */
-pv_entry_t     pv_free_list;           /* free list at SPLVM */
-decl_simple_lock_data(,pv_free_list_lock)
+pv_rooted_entry_t      pv_free_list = PV_ROOTED_ENTRY_NULL;            /* free list at SPLVM */
+pv_hashed_entry_t      pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
+pv_hashed_entry_t      pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
+decl_simple_lock_data(,pv_hashed_free_list_lock)
+decl_simple_lock_data(,pv_hashed_kern_free_list_lock)
+decl_simple_lock_data(,pv_hash_table_lock)
+
 int pv_free_count = 0;
-#define PV_LOW_WATER_MARK 5000
-#define PV_ALLOC_CHUNK 2000
+int pv_hashed_free_count = 0;
+int pv_kern_free_count = 0;
+int pv_hashed_kern_free_count = 0;
+#define PV_HASHED_LOW_WATER_MARK 5000
+#define PV_HASHED_KERN_LOW_WATER_MARK 100
+#define PV_HASHED_ALLOC_CHUNK 2000
+#define PV_HASHED_KERN_ALLOC_CHUNK 50
 thread_call_t  mapping_adjust_call;
 static thread_call_data_t  mapping_adjust_call_data;
-int mappingrecurse = 0;
-
-#define        PV_ALLOC(pv_e) { \
-       simple_lock(&pv_free_list_lock); \
-       if ((pv_e = pv_free_list) != 0) { \
-           pv_free_list = pv_e->next; \
-            pv_free_count--; \
-            if (pv_free_count < PV_LOW_WATER_MARK) \
+uint32_t mappingrecurse = 0;
+
+#define        PV_HASHED_ALLOC(pvh_e) { \
+       simple_lock(&pv_hashed_free_list_lock); \
+       if ((pvh_e = pv_hashed_free_list) != 0) { \
+         pv_hashed_free_list = (pv_hashed_entry_t)pvh_e->qlink.next;   \
+            pv_hashed_free_count--; \
+            if (pv_hashed_free_count < PV_HASHED_LOW_WATER_MARK) \
+              if (hw_compare_and_store(0,1,(u_int *)&mappingrecurse)) \
+                thread_call_enter(mapping_adjust_call); \
+       } \
+       simple_unlock(&pv_hashed_free_list_lock); \
+}
+
+#define        PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt) {   \
+       simple_lock(&pv_hashed_free_list_lock); \
+       pvh_et->qlink.next = (queue_entry_t)pv_hashed_free_list;        \
+       pv_hashed_free_list = pvh_eh; \
+        pv_hashed_free_count += pv_cnt; \
+       simple_unlock(&pv_hashed_free_list_lock); \
+}
+
+#define        PV_HASHED_KERN_ALLOC(pvh_e) { \
+       simple_lock(&pv_hashed_kern_free_list_lock); \
+       if ((pvh_e = pv_hashed_kern_free_list) != 0) { \
+         pv_hashed_kern_free_list = (pv_hashed_entry_t)pvh_e->qlink.next;      \
+            pv_hashed_kern_free_count--; \
+            if (pv_hashed_kern_free_count < PV_HASHED_KERN_LOW_WATER_MARK) \
               if (hw_compare_and_store(0,1,(u_int *)&mappingrecurse)) \
                 thread_call_enter(mapping_adjust_call); \
        } \
-       simple_unlock(&pv_free_list_lock); \
+       simple_unlock(&pv_hashed_kern_free_list_lock); \
 }
 
-#define        PV_FREE(pv_e) { \
-       simple_lock(&pv_free_list_lock); \
-       pv_e->next = pv_free_list; \
-       pv_free_list = pv_e; \
-        pv_free_count++; \
-       simple_unlock(&pv_free_list_lock); \
+#define        PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt) {       \
+       simple_lock(&pv_hashed_kern_free_list_lock); \
+       pvh_et->qlink.next = (queue_entry_t)pv_hashed_kern_free_list;   \
+       pv_hashed_kern_free_list = pvh_eh; \
+        pv_hashed_kern_free_count += pv_cnt; \
+       simple_unlock(&pv_hashed_kern_free_list_lock); \
 }
 
-zone_t         pv_list_zone;           /* zone of pv_entry structures */
+zone_t         pv_hashed_list_zone;    /* zone of pv_hashed_entry structures */
 
 static zone_t pdpt_zone;
 
@@ -272,41 +453,49 @@ static zone_t pdpt_zone;
 char   *pv_lock_table;         /* pointer to array of bits */
 #define pv_lock_table_size(n)  (((n)+BYTE_SIZE-1)/BYTE_SIZE)
 
+char    *pv_hash_lock_table;
+#define pv_hash_lock_table_size(n)  (((n)+BYTE_SIZE-1)/BYTE_SIZE)
+
 /*
  *     First and last physical addresses that we maintain any information
  *     for.  Initialized to zero so that pmap operations done before
  *     pmap_init won't touch any non-existent structures.
  */
-pmap_paddr_t   vm_first_phys = (pmap_paddr_t) 0;
-pmap_paddr_t   vm_last_phys  = (pmap_paddr_t) 0;
 boolean_t      pmap_initialized = FALSE;/* Has pmap_init completed? */
 
 static struct vm_object kptobj_object_store;
 static vm_object_t kptobj;
 
 /*
- *     Index into pv_head table, its lock bits, and the modify/reference
- *     bits starting at vm_first_phys.
+ *     Index into pv_head table, its lock bits, and the modify/reference and managed bits
  */
 
-#define pa_index(pa)   (i386_btop(pa - vm_first_phys))
+#define pa_index(pa)   (i386_btop(pa))
+#define ppn_to_pai(ppn)        ((int)ppn)
 
 #define pai_to_pvh(pai)                (&pv_head_table[pai])
 #define lock_pvh_pai(pai)      bit_lock(pai, (void *)pv_lock_table)
 #define unlock_pvh_pai(pai)    bit_unlock(pai, (void *)pv_lock_table)
 
+#define pvhashidx(pmap, va) (((uint32_t)pmap ^ ((uint32_t)((uint64_t)va >> PAGE_SHIFT) & 0xFFFFFFFF)) & npvhash)
+#define pvhash(idx)         (&pv_hash_table[idx])
+
+#define lock_hash_hash(hash)           bit_lock(hash, (void *)pv_hash_lock_table)
+#define unlock_hash_hash(hash) bit_unlock(hash, (void *)pv_hash_lock_table)
+
 /*
  *     Array of physical page attribites for managed pages.
  *     One byte per physical page.
  */
 char   *pmap_phys_attributes;
+unsigned int   last_managed_page = 0;
 
 /*
  *     Physical page attributes.  Copy bits from PTE definition.
  */
 #define        PHYS_MODIFIED   INTEL_PTE_MOD   /* page modified */
 #define        PHYS_REFERENCED INTEL_PTE_REF   /* page referenced */
-#define PHYS_NCACHE    INTEL_PTE_NCACHE
+#define PHYS_MANAGED   INTEL_PTE_VALID /* page is managed */
 
 /*
  *     Amount of virtual memory mapped by one
@@ -320,7 +509,7 @@ uint64_t pde_mapped_size;
  */
 
 /*
- *     Locking Protocols:
+ *     Locking Protocols: (changed 2/2007 JK)
  *
  *     There are two structures in the pmap module that need locking:
  *     the pmaps themselves, and the per-page pv_lists (which are locked
@@ -330,87 +519,55 @@ uint64_t pde_mapped_size;
  *     pmap_remove_all and pmap_copy_on_write operate on a physical page
  *     basis and want to do the locking in the reverse order, i.e. lock
  *     a pv_list and then go through all the pmaps referenced by that list.
- *     To protect against deadlock between these two cases, the pmap_lock
- *     is used.  There are three different locking protocols as a result:
- *
- *  1.  pmap operations only (pmap_extract, pmap_access, ...)  Lock only
- *             the pmap.
- *
- *  2.  pmap-based operations (pmap_enter, pmap_remove, ...)  Get a read
- *             lock on the pmap_lock (shared read), then lock the pmap
- *             and finally the pv_lists as needed [i.e. pmap lock before
- *             pv_list lock.]
  *
- *  3.  pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
- *             Get a write lock on the pmap_lock (exclusive write); this
- *             also guaranteees exclusive access to the pv_lists.  Lock the
- *             pmaps as needed.
- *
- *     At no time may any routine hold more than one pmap lock or more than
- *     one pv_list lock.  Because interrupt level routines can allocate
- *     mbufs and cause pmap_enter's, the pmap_lock and the lock on the
- *     kernel_pmap can only be held at splhigh.
- */
-
-/*
- *     We raise the interrupt level to splvm, to block interprocessor
- *     interrupts during pmap operations.  We mark the cpu's cr3 inactive
- *     while interrupts are blocked.
+ *      The system wide pmap lock has been removed. Now, paths take a lock
+ *      on the pmap before changing its 'shape' and the reverse order lockers
+ *      (coming in by phys ppn) take a lock on the corresponding pv and then
+ *      retest to be sure nothing changed during the window before they locked
+ *      and can then run up/down the pv lists holding the list lock. This also
+ *      lets the pmap layer run (nearly completely) interrupt enabled, unlike
+ *      previously.
  */
-#define SPLVM(spl)     {                                               \
-       spl = splhigh();                                                \
-       CPU_CR3_MARK_INACTIVE();                                        \
-}
 
-#define SPLX(spl)      {                                               \
-       if (current_cpu_datap()->cpu_tlb_invalid)                       \
-           process_pmap_updates();                                     \
-       CPU_CR3_MARK_ACTIVE();                                          \
-       splx(spl);                                                      \
-}
-           
 /*
- *     Lock on pmap system
+ * pmap locking
  */
-lock_t pmap_system_lock;
 
-#define PMAP_READ_LOCK(pmap, spl) {    \
-       SPLVM(spl);                     \
-       lock_read(&pmap_system_lock);   \
+#define PMAP_LOCK(pmap) {      \
        simple_lock(&(pmap)->lock);     \
 }
 
-#define PMAP_WRITE_LOCK(spl) {         \
-       SPLVM(spl);                     \
-       lock_write(&pmap_system_lock);  \
-}
-
-#define PMAP_READ_UNLOCK(pmap, spl) {          \
+#define PMAP_UNLOCK(pmap) {            \
        simple_unlock(&(pmap)->lock);           \
-       lock_read_done(&pmap_system_lock);      \
-       SPLX(spl);                              \
 }
 
-#define PMAP_WRITE_UNLOCK(spl) {               \
-       lock_write_done(&pmap_system_lock);     \
-       SPLX(spl);                              \
+/*
+ * PV locking
+ */
+
+#define LOCK_PVH(index)                {       \
+    mp_disable_preemption();           \
+    lock_pvh_pai(index);               \
 }
 
-#define PMAP_WRITE_TO_READ_LOCK(pmap) {                \
-       simple_lock(&(pmap)->lock);             \
-       lock_write_to_read(&pmap_system_lock);  \
+#define UNLOCK_PVH(index)  {      \
+    unlock_pvh_pai(index);        \
+    mp_enable_preemption();       \
 }
 
-#define LOCK_PVH(index)                lock_pvh_pai(index)
+/*
+ * PV hash locking
+ */
+
+#define LOCK_PV_HASH(hash)         lock_hash_hash(hash)
 
-#define UNLOCK_PVH(index)      unlock_pvh_pai(index)
+#define UNLOCK_PV_HASH(hash)       unlock_hash_hash(hash)
 
 #if    USLOCK_DEBUG
 extern int     max_lock_loops;
-extern int     disableSerialOuput;
 #define LOOP_VAR                                                       \
        unsigned int    loop_count;                                     \
-       loop_count = disableSerialOuput ? max_lock_loops                \
+       loop_count = disable_serial_output ? max_lock_loops             \
                                        : max_lock_loops*100
 #define LOOP_CHECK(msg, pmap)                                          \
        if (--loop_count == 0) {                                        \
@@ -453,7 +610,7 @@ struct zone *pmap_zone;             /* zone of pmap structures */
 
 int            pmap_debug = 0;         /* flag for debugging prints */
 
-unsigned int   inuse_ptepages_count = 0;       /* debugging */
+unsigned int   inuse_ptepages_count = 0;
 
 addr64_t       kernel64_cr3;
 boolean_t      no_shared_cr3 = FALSE;  /* -no_shared_cr3 boot arg */
@@ -472,25 +629,47 @@ extern char end;
 
 static int nkpt;
 extern uint32_t lowGlo;
-extern void *version;
 
 pt_entry_t     *DMAP1, *DMAP2;
 caddr_t         DADDR1;
 caddr_t         DADDR2;
 
-#if  DEBUG_ALIAS
-#define PMAP_ALIAS_MAX 32
-struct pmap_alias {
-        vm_offset_t rpc;
-        pmap_t pmap;
-        vm_map_offset_t va;
-        int cookie;
-#define PMAP_ALIAS_COOKIE 0xdeadbeef
-} pmap_aliasbuf[PMAP_ALIAS_MAX];
-int pmap_alias_index = 0;
-extern vm_offset_t get_rpc();
+static inline
+void pmap_pvh_unlink(pv_hashed_entry_t pv);
+
+/*
+ * unlinks the pv_hashed_entry_t pvh from the singly linked hash chain.
+ * properly deals with the anchor.
+ * must be called with the hash locked, does not unlock it
+ */
+
+static inline
+void pmap_pvh_unlink(pv_hashed_entry_t pvh)
+{
+  pv_hashed_entry_t curh;
+  pv_hashed_entry_t *pprevh;
+  int pvhash_idx;
+
+  CHK_NPVHASH();
+  pvhash_idx = pvhashidx(pvh->pmap, pvh->va);
+
+  pprevh = pvhash(pvhash_idx);
 
-#endif  /* DEBUG_ALIAS */
+#if PV_DEBUG
+  if (NULL == *pprevh) panic("pvh_unlink null anchor"); /* JK DEBUG */
+#endif
+  curh = *pprevh;
+
+  while (PV_HASHED_ENTRY_NULL != curh) {
+    if (pvh == curh)
+      break;
+    pprevh = &curh->nexth;
+    curh = curh->nexth;
+  }
+  if (PV_HASHED_ENTRY_NULL == curh) panic("pmap_pvh_unlink no pvh");
+  *pprevh = pvh->nexth;
+  return;
+}
 
 /*
  * for legacy, returns the address of the pde entry.
@@ -567,7 +746,7 @@ pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr)
                          ((vm_offset_t)((vaddr>>PDPTSHIFT)&(NPDPTPG-1))));
        }
 
-       return (0);
+       return (NULL);
 }
 
 /*
@@ -612,9 +791,18 @@ pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr)
                          ((vm_offset_t)((vaddr>>PDSHIFT)&(NPDPG-1))));
        }
 
-       return (0);
+       return (NULL);
 }
 
+/*
+ * Because the page tables (top 3 levels) are mapped into per cpu windows,
+ * callers must either disable interrupts or disable preemption before calling
+ * one of the pte mapping routines (e.g. pmap_pte()) as the returned vaddr
+ * is in one of those mapped windows and that cannot be allowed to change until
+ * the caller is done using the returned pte pointer. When done, the caller
+ * restores interrupts or preemption to its previous state after which point the
+ * vaddr for the returned pte can no longer be used
+ */
 
 
 /*
@@ -640,10 +828,12 @@ pmap_pte(pmap_t pmap, vm_map_offset_t vaddr)
        pde = pmap_pde(pmap,vaddr);
 
        if (pde && ((*pde & INTEL_PTE_VALID))) {
-         if (pmap == kernel_pmap) {
-           return (vtopte(vaddr)); /* compat kernel still has pte's mapped */
-         }
-
+           if (pmap == kernel_pmap)
+               return (vtopte(vaddr)); /* compat kernel still has pte's mapped */
+#if TESTING
+           if (ml_get_interrupts_enabled() && get_preemption_level() == 0)
+               panic("pmap_pte: unsafe call");
+#endif
                assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
 
                newpf = *pde & PG_FRAME;
@@ -666,9 +856,9 @@ pmap_pte(pmap_t pmap, vm_map_offset_t vaddr)
                          ((vm_offset_t)i386_btop(vaddr) & (NPTEPG-1)));
        }
 
-       return(0);
+       return(NULL);
 }
-
+       
 
 /*
  *     Map memory at initialization.  The physical addresses being
@@ -701,7 +891,6 @@ pmap_map(
  *     Back-door routine for mapping kernel VM at initialization.  
  *     Useful for mapping memory outside the range
  *      Sets no-cache, A, D.
- *     [vm_first_phys, vm_last_phys) (i.e., devices).
  *     Otherwise like pmap_map.
  */
 vm_offset_t
@@ -714,6 +903,7 @@ pmap_map_bd(
 {
        pt_entry_t      template;
        pt_entry_t      *pte;
+       spl_t           spl;
 
        template = pa_to_pte(start_addr)
                | INTEL_PTE_REF
@@ -731,11 +921,13 @@ pmap_map_bd(
            template |= INTEL_PTE_WRITE;
 
        while (start_addr < end_addr) {
+               spl = splhigh();
                pte = pmap_pte(kernel_pmap, (vm_map_offset_t)virt);
                if (pte == PT_ENTRY_NULL) {
                        panic("pmap_map_bd: Invalid kernel address\n");
                }
                pmap_store_pte(pte, template);
+               splx(spl);
                pte_increment_pa(template);
                virt += PAGE_SIZE;
                start_addr += PAGE_SIZE;
@@ -752,6 +944,25 @@ extern  vm_offset_t     etext;
 extern  void            *sectHIBB;
 extern  int             sectSizeHIB;
 
+void
+pmap_cpu_init(void)
+{
+       /*
+        * Here early in the life of a processor (from cpu_mode_init()).
+        * If we're not in 64-bit mode, enable the global TLB feature.
+        * Note: regardless of mode we continue to set the global attribute
+        * bit in ptes for all (32-bit) global pages such as the commpage.
+        */
+       if (!cpu_64bit) {
+               set_cr4(get_cr4() | CR4_PGE);
+       }
+
+       /*
+        * Initialize the per-cpu, TLB-related fields.
+        */
+       current_cpu_datap()->cpu_active_cr3 = kernel_pmap->pm_cr3;
+       current_cpu_datap()->cpu_tlb_invalid = FALSE;
+}
 
 vm_offset_t
 pmap_high_shared_remap(enum high_fixed_addresses e, vm_offset_t va, int sz)
@@ -760,8 +971,10 @@ pmap_high_shared_remap(enum high_fixed_addresses e, vm_offset_t va, int sz)
   pt_entry_t *ptep;
   pmap_paddr_t pa;
   int i;
+  spl_t s;
 
   assert(0 == (va & PAGE_MASK));  /* expecting page aligned */
+  s = splhigh();
   ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)ve);
 
   for (i=0; i< sz; i++) {
@@ -775,6 +988,7 @@ pmap_high_shared_remap(enum high_fixed_addresses e, vm_offset_t va, int sz)
     va+= PAGE_SIZE;
     ptep++;
   }
+  splx(s);
   return ve;
 }
 
@@ -806,13 +1020,16 @@ pmap_init_high_shared(void)
        vm_offset_t haddr;
         struct __gdt_desc_struct gdt_desc = {0,0,0};
        struct __idt_desc_struct idt_desc = {0,0,0};
+       spl_t s;
 #if MACH_KDB
        struct i386_tss *ttss;
 #endif
 
        kprintf("HIGH_MEM_BASE 0x%x fixed per-cpu begin 0x%x\n", 
                HIGH_MEM_BASE,pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN));
+       s = splhigh();
        pte_unique_base = pmap_pte(kernel_pmap, (vm_map_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN));
+       splx(s);
 
        if (i386_btop(&hi_remap_etext - &hi_remap_text + 1) >
                                HIGH_FIXED_TRAMPS_END - HIGH_FIXED_TRAMPS + 1)
@@ -917,6 +1134,7 @@ pmap_bootstrap(
        int i;
        int wpkernel, boot_arg;
        pdpt_entry_t *pdpt;
+       spl_t s;
 
        vm_last_addr = VM_MAX_KERNEL_ADDRESS;   /* Set the highest address
                                                 * known to VM */
@@ -930,7 +1148,7 @@ pmap_bootstrap(
        kernel_pmap = &kernel_pmap_store;
        kernel_pmap->ref_count = 1;
        kernel_pmap->nx_enabled = FALSE;
-       kernel_pmap->pm_64bit = 0;
+       kernel_pmap->pm_task_map = TASK_MAP_32BIT;
        kernel_pmap->pm_obj = (vm_object_t) NULL;
        kernel_pmap->dirbase = (pd_entry_t *)((unsigned int)IdlePTD | KERNBASE);
        kernel_pmap->pdirbase = (pmap_paddr_t)((int)IdlePTD);
@@ -959,7 +1177,9 @@ pmap_bootstrap(
        high_shared_pde = *pmap_pde(kernel_pmap, HIGH_MEM_BASE);
        /* make sure G bit is on for high shared pde entry */
        high_shared_pde |= INTEL_PTE_GLOBAL;
+       s = splhigh();
        pmap_store_pte(pmap_pde(kernel_pmap, HIGH_MEM_BASE), high_shared_pde);
+       splx(s);
 
        nkpt = NKPT;
        inuse_ptepages_count += NKPT;
@@ -989,21 +1209,28 @@ pmap_bootstrap(
        SYSMAP(caddr_t, DMAP1, DADDR1, 1);
        SYSMAP(caddr_t, DMAP2, DADDR2, 1);  /* XXX temporary - can remove */
 
-
-       lock_init(&pmap_system_lock,
-                 FALSE,                /* NOT a sleep lock */
-                 0, 0);
-
        virtual_avail = va;
 
+       if (PE_parse_boot_arg("npvhash", &npvhash)) {
+         if (0 != ((npvhash+1) & npvhash)) {
+           kprintf("invalid hash %d, must be ((2^N)-1), using default %d\n",npvhash,NPVHASH);
+           npvhash = NPVHASH;
+         }
+       } else {
+         npvhash = NPVHASH;
+       }
+       printf("npvhash=%d\n",npvhash);
+
        wpkernel = 1;
        if (PE_parse_boot_arg("wpkernel", &boot_arg)) {
                if (boot_arg == 0)
                        wpkernel = 0;
        }
 
+       s = splhigh();
+
        /* Remap kernel text readonly unless the "wpkernel" boot-arg is present
-        * and set to 0.
+        * and set to 0.
         */
        if (wpkernel)
        {
@@ -1025,14 +1252,17 @@ pmap_bootstrap(
 
        /* map lowmem global page into fixed addr 0x2000 */
        if (0 == (pte = pmap_pte(kernel_pmap,0x2000))) panic("lowmem pte");
-
+       assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK)); /* make sure it is defined on page boundary */
        pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)|INTEL_PTE_VALID|INTEL_PTE_REF|INTEL_PTE_MOD|INTEL_PTE_WIRED|INTEL_PTE_RW);
+       splx(s);
        flush_tlb();
 
        simple_lock_init(&kernel_pmap->lock, 0);
-       simple_lock_init(&pv_free_list_lock, 0);
+       simple_lock_init(&pv_hashed_free_list_lock, 0);
+       simple_lock_init(&pv_hashed_kern_free_list_lock, 0);
+       simple_lock_init(&pv_hash_table_lock,0);
 
-        pmap_init_high_shared();
+       pmap_init_high_shared();
 
        pde_mapped_size = PDE_MAPPED_SIZE;
 
@@ -1068,20 +1298,20 @@ pmap_bootstrap(
          pmap_store_pte((ppml4+KERNEL_UBER_PML4_INDEX), *(ppml4+0));
 
          kernel64_cr3 = (addr64_t) kernel_pmap->pm_cr3;
-         cpu_IA32e_enable(current_cpu_datap());
-         current_cpu_datap()->cpu_is64bit = TRUE;
-         /* welcome to a 64 bit world */
 
-         /* Re-initialize and load descriptors */
+         /* Re-initialize descriptors and prepare to switch modes */
          cpu_desc_init64(&cpu_data_master, TRUE);
-         cpu_desc_load64(&cpu_data_master);
-         fast_syscall_init64();
+         current_cpu_datap()->cpu_is64bit = TRUE;
+         current_cpu_datap()->cpu_active_cr3 = kernel64_cr3;
 
          pde_mapped_size = 512*4096 ; 
 
          ml_set_interrupts_enabled(istate);
-
        }
+
+       /* Set 64-bit mode if required. */
+       cpu_mode_init(&cpu_data_master);
+
        kernel_pmap->pm_hold = (vm_offset_t)kernel_pmap->pm_pml4;
 
        kprintf("Kernel virtual space from 0x%x to 0x%x.\n",
@@ -1099,7 +1329,13 @@ pmap_bootstrap(
         */
        if (PE_parse_boot_arg("-no_shared_cr3", &no_shared_cr3)) {
                kprintf("Shared kernel address space disabled\n");
-       }
+       }       
+
+#ifdef PMAP_TRACES
+       if (PE_parse_boot_arg("-pmap_trace", &pmap_trace)) {
+               kprintf("Kernel traces for pmap operations enabled\n");
+       }       
+#endif /* PMAP_TRACES */
 }
 
 void
@@ -1123,18 +1359,23 @@ pmap_init(void)
        vm_offset_t             addr;
        register vm_size_t      s;
        vm_map_offset_t         vaddr;
-       ppnum_t                 ppn;
+       ppnum_t ppn;
 
        /*
         *      Allocate memory for the pv_head_table and its lock bits,
         *      the modify bit array, and the pte_page table.
         */
 
-       /* zero bias all these arrays now instead of off avail_start
-          so we cover all memory */
+       /*
+        * zero bias all these arrays now instead of off avail_start
+        * so we cover all memory
+        */
+
        npages = i386_btop(avail_end);
-       s = (vm_size_t) (sizeof(struct pv_entry) * npages
-                               + pv_lock_table_size(npages)
+       s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
+                        + (sizeof (struct pv_hashed_entry_t *) * (npvhash+1))
+                        + pv_lock_table_size(npages)
+                        + pv_hash_lock_table_size((npvhash+1))
                                + npages);
 
        s = round_page(s);
@@ -1143,16 +1384,47 @@ pmap_init(void)
 
        memset((char *)addr, 0, s);
 
+#if PV_DEBUG
+       if (0 == npvhash) panic("npvhash not initialized");
+#endif
+
        /*
         *      Allocate the structures first to preserve word-alignment.
         */
-       pv_head_table = (pv_entry_t) addr;
+       pv_head_table = (pv_rooted_entry_t) addr;
        addr = (vm_offset_t) (pv_head_table + npages);
 
+       pv_hash_table = (pv_hashed_entry_t *)addr;
+       addr = (vm_offset_t) (pv_hash_table + (npvhash + 1));
+
        pv_lock_table = (char *) addr;
        addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
 
+       pv_hash_lock_table = (char *) addr;
+       addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhash+1)));
+
        pmap_phys_attributes = (char *) addr;
+       {
+               unsigned int i;
+               unsigned int pn;
+               ppnum_t  last_pn;
+               pmap_memory_region_t *pmptr = pmap_memory_regions;
+
+               last_pn = i386_btop(avail_end);
+
+               for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
+                       if (pmptr->type == kEfiConventionalMemory) {
+                               for (pn = pmptr->base; pn <= pmptr->end; pn++) {
+                                       if (pn < last_pn) {
+                                               pmap_phys_attributes[pn] |= PHYS_MANAGED;
+
+                                               if (pn > last_managed_page)
+                                                       last_managed_page = pn;
+                                       }
+                               }
+                       }
+               }
+       }
 
        /*
         *      Create the zone of physical maps,
@@ -1160,24 +1432,13 @@ pmap_init(void)
         */
        s = (vm_size_t) sizeof(struct pmap);
        pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
-       s = (vm_size_t) sizeof(struct pv_entry);
-       pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
+       s = (vm_size_t) sizeof(struct pv_hashed_entry);
+       pv_hashed_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
        s = 63;
        pdpt_zone = zinit(s, 400*s, 4096, "pdpt"); /* XXX */
 
-       /*
-        *      Only now, when all of the data structures are allocated,
-        *      can we set vm_first_phys and vm_last_phys.  If we set them
-        *      too soon, the kmem_alloc_wired above will try to use these
-        *      data structures and blow up.
-        */
-
-       /* zero bias this now so we cover all memory */
-       vm_first_phys = 0;
-       vm_last_phys = avail_end;
-
        kptobj = &kptobj_object_store;
-       _vm_object_allocate((vm_object_size_t)NKPDE, kptobj);
+       _vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG), kptobj);
        kernel_pmap->pm_obj = kptobj;
 
        /* create pv entries for kernel pages mapped by low level
@@ -1186,24 +1447,26 @@ pmap_init(void)
 
        vaddr = (vm_map_offset_t)0;
        for (ppn = 0; ppn < i386_btop(avail_start) ; ppn++ ) {
-         pv_entry_t    pv_e;
+         pv_rooted_entry_t     pv_e;
 
          pv_e = pai_to_pvh(ppn);
          pv_e->va = vaddr;
          vaddr += PAGE_SIZE;
-         kernel_pmap->stats.resident_count++;
          pv_e->pmap = kernel_pmap;
-         pv_e->next = PV_ENTRY_NULL;
+         queue_init(&pv_e->qlink);
        }
 
        pmap_initialized = TRUE;
 
        /*
-        *      Initializie pmap cache.
+        *      Initialize pmap cache.
         */
        pmap_cache_list = PMAP_NULL;
        pmap_cache_count = 0;
        simple_lock_init(&pmap_cache_lock, 0);
+
+       max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t);
+
 }
 
 void
@@ -1219,37 +1482,77 @@ x86_lowmem_free(void)
 }
 
 
-#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
+#define managed_page(x) ( (unsigned int)x <= last_managed_page && (pmap_phys_attributes[x] & PHYS_MANAGED) )
 
+/*
+ * this function is only used for debugging fron the vm layer
+ */
 boolean_t
 pmap_verify_free(
                 ppnum_t pn)
 {
-        pmap_paddr_t   phys;
-       pv_entry_t      pv_h;
+       pv_rooted_entry_t       pv_h;
        int             pai;
-       spl_t           spl;
        boolean_t       result;
 
        assert(pn != vm_page_fictitious_addr);
-       phys = (pmap_paddr_t)i386_ptob(pn);
+
        if (!pmap_initialized)
                return(TRUE);
 
-       if (!pmap_valid_page(pn))
-               return(FALSE);
+       if (pn == vm_page_guard_addr)
+               return TRUE;
 
-       PMAP_WRITE_LOCK(spl);
+       pai = ppn_to_pai(pn);
+       if (!managed_page(pai))
+               return(FALSE);
+       pv_h = pai_to_pvh(pn);
+       result = (pv_h->pmap == PMAP_NULL);
+       return(result);
+}
 
-       pai = pa_index(phys);
-       pv_h = pai_to_pvh(pai);
+boolean_t
+pmap_is_empty(
+       pmap_t          pmap,
+       vm_map_offset_t vstart,
+       vm_map_offset_t vend)
+{
+       vm_map_offset_t offset;
+       ppnum_t         phys_page;
 
-       result = (pv_h->pmap == PMAP_NULL);
-       PMAP_WRITE_UNLOCK(spl);
+       if (pmap == PMAP_NULL) {
+               return TRUE;
+       }
+       for (offset = vstart;
+            offset < vend;
+            offset += PAGE_SIZE_64) {
+               phys_page = pmap_find_phys(pmap, offset);
+               if (phys_page) {
+                       if (pmap != kernel_pmap &&
+                           pmap->pm_task_map == TASK_MAP_32BIT &&
+                           offset >= HIGH_MEM_BASE) {
+                               /*
+                                * The "high_shared_pde" is used to share
+                                * the entire top-most 2MB of address space
+                                * between the kernel and all 32-bit tasks.
+                                * So none of this can be removed from 32-bit
+                                * tasks.
+                                * Let's pretend there's nothing up
+                                * there...
+                                */
+                               return TRUE;
+                       }
+                       kprintf("pmap_is_empty(%p,0x%llx,0x%llx): "
+                               "page %d at 0x%llx\n",
+                               pmap, vstart, vend, phys_page, offset);
+                       return FALSE;
+               }
+       }
 
-       return(result);
+       return TRUE;
 }
 
+
 /*
  *     Create and return a physical map.
  *
@@ -1265,18 +1568,21 @@ pmap_verify_free(
 pmap_t
 pmap_create(
            vm_map_size_t       sz,
-           boolean_t           is_64bit)
+           boolean_t           is_64bit)
 {
-        register pmap_t                        p;
+       pmap_t                  p;
        int             i;
        vm_offset_t     va;
        vm_size_t       size;
        pdpt_entry_t    *pdpt;
        pml4_entry_t    *pml4p;
-       int template;
        pd_entry_t      *pdp;
+       int template;
        spl_t s;
 
+       PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
+                  (int) (sz>>32), (int) sz, (int) is_64bit, 0, 0);
+
        size = (vm_size_t) sz;
 
        /*
@@ -1289,101 +1595,149 @@ pmap_create(
 
        p = (pmap_t) zalloc(pmap_zone);
        if (PMAP_NULL == p)
-         panic("pmap_create zalloc");
+               panic("pmap_create zalloc");
 
        /* init counts now since we'll be bumping some */
        simple_lock_init(&p->lock, 0);
        p->stats.resident_count = 0;
+       p->stats.resident_max = 0;
        p->stats.wired_count = 0;
        p->ref_count = 1;
        p->nx_enabled = 1;
-       p->pm_64bit = is_64bit;
-       p->pm_kernel_cr3 = FALSE;
        p->pm_shared = FALSE;
 
+       assert(!is_64bit || cpu_64bit);
+       p->pm_task_map = is_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT;;
+
        if (!cpu_64bit) {
-         /* legacy 32 bit setup */
-         /* in the legacy case the pdpt layer is hardwired to 4 entries and each
-          * entry covers 1GB of addr space */
-         if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->dirbase), NBPTD))
-           panic("pmap_create kmem_alloc_wired");
-         p->pm_hold = (vm_offset_t)zalloc(pdpt_zone);
-         if ((vm_offset_t)NULL == p->pm_hold) {
-           panic("pdpt zalloc");
-         }
-         pdpt = (pdpt_entry_t *) (( p->pm_hold + 31) & ~31);
-         p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)pdpt);
-         if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG))))
-           panic("pmap_create vm_object_allocate");
+               /* legacy 32 bit setup */
+               /* in the legacy case the pdpt layer is hardwired to 4 entries and each
+                * entry covers 1GB of addr space */
+               if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->dirbase), NBPTD))
+                       panic("pmap_create kmem_alloc_wired");
+               p->pm_hold = (vm_offset_t)zalloc(pdpt_zone);
+               if ((vm_offset_t)NULL == p->pm_hold) {
+                       panic("pdpt zalloc");
+               }
+               pdpt = (pdpt_entry_t *) (( p->pm_hold + 31) & ~31);
+               p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)pdpt);
+               if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG))))
+                       panic("pmap_create vm_object_allocate");
 
-         memset((char *)p->dirbase, 0, NBPTD);
+               memset((char *)p->dirbase, 0, NBPTD);
 
-         va = (vm_offset_t)p->dirbase;
-         p->pdirbase = kvtophys(va);
+               va = (vm_offset_t)p->dirbase;
+               p->pdirbase = kvtophys(va);
 
-         template = cpu_64bit ? INTEL_PTE_VALID|INTEL_PTE_RW|INTEL_PTE_USER|INTEL_PTE_REF : INTEL_PTE_VALID;
-         for (i = 0; i< NPGPTD; i++, pdpt++) {
-           pmap_paddr_t pa;
-           pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i));
-           pmap_store_pte(pdpt, pa | template);
-         }
+               template = cpu_64bit ? INTEL_PTE_VALID|INTEL_PTE_RW|INTEL_PTE_USER|INTEL_PTE_REF : INTEL_PTE_VALID;
+               for (i = 0; i< NPGPTD; i++, pdpt++ ) {
+                       pmap_paddr_t pa;
+                       pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i));
+                       pmap_store_pte(pdpt, pa | template);
+               }
 
-         /* map the high shared pde */
-         pmap_store_pte(pmap_pde(p, HIGH_MEM_BASE), high_shared_pde);
+               /* map the high shared pde */
+               s = splhigh();
+               pmap_store_pte(pmap_pde(p, HIGH_MEM_BASE), high_shared_pde);
+               splx(s);
 
        } else {
+               /* 64 bit setup  */
 
-         /* 64 bit setup  */
+               /* alloc the pml4 page in kernel vm */
+               if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->pm_hold), PAGE_SIZE))
+                       panic("pmap_create kmem_alloc_wired pml4");
 
-         /* alloc the pml4 page in kernel vm */
-         if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->pm_hold), PAGE_SIZE))
-           panic("pmap_create kmem_alloc_wired pml4");
+               memset((char *)p->pm_hold, 0, PAGE_SIZE);
+               p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_hold);
 
-         memset((char *)p->pm_hold, 0, PAGE_SIZE);
-         p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_hold);
+               vm_page_lock_queues();
+               inuse_ptepages_count++;
+               vm_page_unlock_queues();
 
-         inuse_ptepages_count++;
-         p->stats.resident_count++;
-         p->stats.wired_count++;
+               /* allocate the vm_objs to hold the pdpt, pde and pte pages */
 
-       /* allocate the vm_objs to hold the pdpt, pde and pte pages */
+               if (NULL == (p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS))))
+                       panic("pmap_create pdpt obj");
 
-       if (NULL == (p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS))))
-         panic("pmap_create pdpt obj");
+               if (NULL == (p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS))))
+                       panic("pmap_create pdpt obj");
 
-       if (NULL == (p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS))))
-         panic("pmap_create pdpt obj");
+               if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS))))
+                       panic("pmap_create pte obj");
 
-       if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS))))
-         panic("pmap_create pte obj");
+               /* uber space points to uber mapped kernel */
+               s = splhigh();
+               pml4p = pmap64_pml4(p, 0ULL);
+               pmap_store_pte((pml4p+KERNEL_UBER_PML4_INDEX),*kernel_pmap->pm_pml4);
 
-       /* uber space points to uber mapped kernel */
-       s = splhigh();
-       pml4p = pmap64_pml4(p, 0ULL);
-       pmap_store_pte((pml4p+KERNEL_UBER_PML4_INDEX),*kernel_pmap->pm_pml4);
-       if (!is_64bit) {
-         while ((pdp = pmap64_pde(p, (uint64_t)HIGH_MEM_BASE)) == PD_ENTRY_NULL) {
-           splx(s);
-           pmap_expand_pdpt(p, (uint64_t)HIGH_MEM_BASE); /* need room for another pde entry */
-           s = splhigh();
-         }
-         pmap_store_pte(pdp, high_shared_pde);
-       }
 
-       splx(s);
+               if (!is_64bit) {
+                       while ((pdp = pmap64_pde(p, (uint64_t)HIGH_MEM_BASE)) == PD_ENTRY_NULL) {
+                               splx(s);
+                               pmap_expand_pdpt(p, (uint64_t)HIGH_MEM_BASE); /* need room for another pde entry */
+                               s = splhigh();
+                       }
+                       pmap_store_pte(pdp, high_shared_pde);
+               }
+               splx(s);
        }
 
+       PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
+                  (int) p, is_64bit, 0, 0, 0);
+
        return(p);
 }
 
+/*
+ * The following routines implement the shared address optmization for 64-bit
+ * users with a 4GB page zero.
+ *
+ * pmap_set_4GB_pagezero()
+ *     is called in the exec and fork paths to mirror the kernel's
+ *     mapping in the bottom 4G of the user's pmap. The task mapping changes
+ *     from TASK_MAP_64BIT to TASK_MAP_64BIT_SHARED. This routine returns
+ *     without doing anything if the -no_shared_cr3 boot-arg is set.
+ *
+ * pmap_clear_4GB_pagezero()
+ *     is called in the exec/exit paths to undo this mirror. The task mapping
+ *     reverts to TASK_MAP_64BIT. In addition, we switch to the kernel's
+ *     CR3 by calling pmap_load_kernel_cr3(). 
+ *
+ * pmap_load_kernel_cr3()
+ *     loads cr3 with the kernel's page table. In addition to being called
+ *     by pmap_clear_4GB_pagezero(), it is used both prior to teardown and
+ *     when we go idle in the context of a shared map.
+ *
+ * Further notes on per-cpu data used:
+ *
+ *     cpu_kernel_cr3  is the cr3 for the kernel's pmap.
+ *                     This is loaded in a trampoline on entering the kernel
+ *                     from a 32-bit user (or non-shared-cr3 64-bit user).
+ *     cpu_task_cr3    is the cr3 for the current thread.
+ *                     This is loaded in a trampoline as we exit the kernel.
+ *     cpu_active_cr3  reflects the cr3 currently loaded.
+ *                     However, the low order bit is set when the
+ *                     processor is idle or interrupts are disabled
+ *                     while the system pmap lock is held. It is used by
+ *                     tlb shoot-down.
+ *     cpu_task_map    indicates whether the task cr3 belongs to
+ *                     a 32-bit, a 64-bit or a 64-bit shared map.
+ *                     The latter allows the avoidance of the cr3 load
+ *                     on kernel entry and exit.
+ *     cpu_tlb_invalid set TRUE when a tlb flush is requested.
+ *                     If the cr3 is "inactive" (the cpu is idle or the
+ *                     system-wide pmap lock is held) this not serviced by
+ *                     an IPI but at time when the cr3 becomes "active".
+ */ 
+
 void
 pmap_set_4GB_pagezero(pmap_t p)
 {
-       int             spl;
        pdpt_entry_t    *user_pdptp;
        pdpt_entry_t    *kern_pdptp;
 
-       assert(p->pm_64bit);
+       assert(p->pm_task_map != TASK_MAP_32BIT);
 
        /* Kernel-shared cr3 may be disabled by boot arg. */
        if (no_shared_cr3)
@@ -1392,64 +1746,59 @@ pmap_set_4GB_pagezero(pmap_t p)
        /*
         * Set the bottom 4 3rd-level pte's to be the kernel's.
         */
-       spl = splhigh();
+       PMAP_LOCK(p);
        while ((user_pdptp = pmap64_pdpt(p, 0x0)) == PDPT_ENTRY_NULL) {
-               splx(spl);
+               PMAP_UNLOCK(p);
                pmap_expand_pml4(p, 0x0);
-               spl = splhigh();
+               PMAP_LOCK(p);
        }
        kern_pdptp = kernel_pmap->pm_pdpt;
        pmap_store_pte(user_pdptp+0, *(kern_pdptp+0));
        pmap_store_pte(user_pdptp+1, *(kern_pdptp+1));
        pmap_store_pte(user_pdptp+2, *(kern_pdptp+2));
        pmap_store_pte(user_pdptp+3, *(kern_pdptp+3));
-
-       p->pm_kernel_cr3 = TRUE;
-
-       splx(spl);
-
-}
-
-void
-pmap_load_kernel_cr3(void)
-{
-       uint32_t        kernel_cr3;
-
-       assert(!ml_get_interrupts_enabled());
-
-       /*
-        * Reload cr3 with the true kernel cr3.
-        * Note: kernel's pml4 resides below 4GB physical.
-        */
-       kernel_cr3 = current_cpu_datap()->cpu_kernel_cr3;
-       set_cr3(kernel_cr3);
-       current_cpu_datap()->cpu_active_cr3 = kernel_cr3;
-       current_cpu_datap()->cpu_task_map = TASK_MAP_32BIT;
-       current_cpu_datap()->cpu_tlb_invalid = FALSE;
-       __asm__ volatile("mfence");
+       p->pm_task_map = TASK_MAP_64BIT_SHARED;
+       PMAP_UNLOCK(p);
 }
 
 void
 pmap_clear_4GB_pagezero(pmap_t p)
 {
-       int             spl;
        pdpt_entry_t    *user_pdptp;
 
-       if (!p->pm_kernel_cr3)
+       if (p->pm_task_map != TASK_MAP_64BIT_SHARED)
                return;
 
-       spl = splhigh();
+       PMAP_LOCK(p);
+
+       p->pm_task_map = TASK_MAP_64BIT;
+
+       pmap_load_kernel_cr3();
+
        user_pdptp = pmap64_pdpt(p, 0x0);
        pmap_store_pte(user_pdptp+0, 0);
        pmap_store_pte(user_pdptp+1, 0);
        pmap_store_pte(user_pdptp+2, 0);
        pmap_store_pte(user_pdptp+3, 0);
 
-       p->pm_kernel_cr3 = FALSE;
+       PMAP_UNLOCK(p);
+}
 
-       pmap_load_kernel_cr3();
+void
+pmap_load_kernel_cr3(void)
+{
+       uint64_t        kernel_cr3;
 
-       splx(spl);
+       assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+       /*
+        * Reload cr3 with the true kernel cr3.
+        */
+       kernel_cr3 = current_cpu_datap()->cpu_kernel_cr3;
+       set64_cr3(kernel_cr3);
+       current_cpu_datap()->cpu_active_cr3 = kernel_cr3;
+       current_cpu_datap()->cpu_tlb_invalid = FALSE;
+       __asm__ volatile("mfence");
 }
 
 /*
@@ -1463,17 +1812,17 @@ pmap_destroy(
        register pmap_t p)
 {
        register int            c;
-       spl_t                   s;
-#if 0
-       register pt_entry_t     *pdep;
-       register vm_page_t      m;
-#endif
 
        if (p == PMAP_NULL)
                return;
-       SPLVM(s);
-       simple_lock(&p->lock);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START,
+                  (int) p, 0, 0, 0, 0);
+
+       PMAP_LOCK(p);
+
        c = --p->ref_count;
+
        if (c == 0) {
                /* 
                 * If some cpu is not using the physical pmap pointer that it
@@ -1482,84 +1831,57 @@ pmap_destroy(
                 * physically on the right pmap:
                 */
                PMAP_UPDATE_TLBS(p,
-                                VM_MIN_ADDRESS,
-                                VM_MAX_KERNEL_ADDRESS);
-
+                                0x0ULL,
+                                0xFFFFFFFFFFFFF000ULL);
        }
-       simple_unlock(&p->lock);
-       SPLX(s);
+
+       PMAP_UNLOCK(p);
 
        if (c != 0) {
-           return;     /* still in use */
+               PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
+                          (int) p, 1, 0, 0, 0);
+               return; /* still in use */
        }
 
        /*
         *      Free the memory maps, then the
         *      pmap structure.
         */
-
        if (!cpu_64bit) {
-#if 0
-       pdep = (pt_entry_t *)p->dirbase;
+               vm_page_lock_queues();
+               inuse_ptepages_count -= p->pm_obj->resident_page_count;
+               vm_page_unlock_queues();
 
-       while (pdep < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)]) {
-           int ind;
+               kmem_free(kernel_map, (vm_offset_t)p->dirbase, NBPTD);
+               zfree(pdpt_zone, (void *)p->pm_hold);
 
-           if (*pdep & INTEL_PTE_VALID) {
-               ind = pdep - (pt_entry_t *)&p->dirbase[0];
+               vm_object_deallocate(p->pm_obj);
+       } else {
+               /* 64 bit */
+               int inuse_ptepages = 0;
 
-               vm_object_lock(p->pm_obj);
-               m = vm_page_lookup(p->pm_obj, (vm_object_offset_t)ind);
-               if (m == VM_PAGE_NULL) {
-                   panic("pmap_destroy: pte page not in object");
-               }
-               vm_page_lock_queues();
-               vm_page_free(m);
-               inuse_ptepages_count--;
+               /* free 64 bit mode structs */
+               inuse_ptepages++;
+               kmem_free(kernel_map, (vm_offset_t)p->pm_hold, PAGE_SIZE);
 
-               vm_object_unlock(p->pm_obj);
-               vm_page_unlock_queues();
+               inuse_ptepages += p->pm_obj_pml4->resident_page_count;
+               vm_object_deallocate(p->pm_obj_pml4);
 
-               /*
-                *      Clear pdes, this might be headed for the cache.
-                */
-               pmap_store_pte(pdep, 0);
-               pdep++;
-           }
-           else {
-             pmap_store_pte(pdep, 0);
-             pdep++;
-           }
-       
-       }
-#else
-       inuse_ptepages_count -= p->pm_obj->resident_page_count;
-#endif
-       vm_object_deallocate(p->pm_obj);
-         kmem_free(kernel_map, (vm_offset_t)p->dirbase, NBPTD);
-         zfree(pdpt_zone, (void *)p->pm_hold);
-       } else {
-
-         /* 64 bit */
-
-         pmap_unmap_sharedpage(p);
-
-         /* free 64 bit mode structs */
-         inuse_ptepages_count--;
-         kmem_free(kernel_map, (vm_offset_t)p->pm_hold, PAGE_SIZE);
-
-         inuse_ptepages_count -= p->pm_obj_pml4->resident_page_count;
-         vm_object_deallocate(p->pm_obj_pml4);
-
-         inuse_ptepages_count -= p->pm_obj_pdpt->resident_page_count;
-         vm_object_deallocate(p->pm_obj_pdpt);
+               inuse_ptepages += p->pm_obj_pdpt->resident_page_count;
+               vm_object_deallocate(p->pm_obj_pdpt);
 
-         inuse_ptepages_count -= p->pm_obj->resident_page_count;
-         vm_object_deallocate(p->pm_obj);
+               inuse_ptepages += p->pm_obj->resident_page_count;
+               vm_object_deallocate(p->pm_obj);
 
+               vm_page_lock_queues();
+               inuse_ptepages_count -= inuse_ptepages;
+               vm_page_unlock_queues();
        }
-
        zfree(pmap_zone, p);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
+
 }
 
 /*
@@ -1570,14 +1892,11 @@ void
 pmap_reference(
        register pmap_t p)
 {
-       spl_t   s;
 
        if (p != PMAP_NULL) {
-               SPLVM(s);
-               simple_lock(&p->lock);
+               PMAP_LOCK(p);
                p->ref_count++;
-               simple_unlock(&p->lock);
-               SPLX(s);
+               PMAP_UNLOCK(p);;
        }
 }
 
@@ -1593,108 +1912,218 @@ pmap_reference(
  *     Assumes that the pte-page exists.
  */
 
-static void
+void
 pmap_remove_range(
        pmap_t                  pmap,
-       vm_map_offset_t         vaddr,
+       vm_map_offset_t         start_vaddr,
        pt_entry_t              *spte,
        pt_entry_t              *epte)
 {
        register pt_entry_t     *cpte;
-       int                     num_removed, num_unwired;
+       pv_hashed_entry_t       pvh_et = PV_HASHED_ENTRY_NULL;
+       pv_hashed_entry_t       pvh_eh = PV_HASHED_ENTRY_NULL;
+       pv_hashed_entry_t       pvh_e;
+       int                     pvh_cnt = 0;
+       int                     num_removed, num_unwired, num_found;
        int                     pai;
        pmap_paddr_t            pa;
+       vm_map_offset_t         vaddr;
+       int                     pvhash_idx;
+       uint32_t                pv_cnt;
 
        num_removed = 0;
        num_unwired = 0;
+       num_found   = 0;
 
-       for (cpte = spte; cpte < epte;
-            cpte++, vaddr += PAGE_SIZE) {
+       if (pmap != kernel_pmap &&
+           pmap->pm_task_map == TASK_MAP_32BIT &&
+           start_vaddr >= HIGH_MEM_BASE) {
+               /*
+                * The range is in the "high_shared_pde" which is shared
+                * between the kernel and all 32-bit tasks.  It holds
+                * the 32-bit commpage but also the trampolines, GDT, etc...
+                * so we can't let user tasks remove anything from it.
+                */
+               return;
+       }
+
+       /* invalidate the PTEs first to "freeze" them */
+       for (cpte = spte, vaddr = start_vaddr;
+            cpte < epte;
+            cpte++, vaddr += PAGE_SIZE_64) {
 
            pa = pte_to_pa(*cpte);
            if (pa == 0)
                continue;
+           num_found++;
 
            if (iswired(*cpte))
                num_unwired++;
 
-           if (!valid_page(i386_btop(pa))) {
+           pai = pa_index(pa);
 
+           if (!managed_page(pai)) {
                /*
                 *      Outside range of managed physical memory.
                 *      Just remove the mappings.
                 */
-               register pt_entry_t     *lpte = cpte;
-
-               pmap_store_pte(lpte, 0);
+               pmap_store_pte(cpte, 0);
                continue;
            }
-           num_removed++;
+
+           /* invalidate the PTE */ 
+           pmap_update_pte(cpte, *cpte, (*cpte & ~INTEL_PTE_VALID));
+       }
+
+       if (num_found == 0) {
+               /* nothing was changed: we're done */
+               goto update_counts;
+       }
+
+       /* propagate the invalidates to other CPUs */
+
+       PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
+
+       for (cpte = spte, vaddr = start_vaddr;
+            cpte < epte;
+            cpte++, vaddr += PAGE_SIZE_64) {
+
+           pa = pte_to_pa(*cpte);
+           if (pa == 0)
+               continue;
 
            pai = pa_index(pa);
+
            LOCK_PVH(pai);
 
+           pa = pte_to_pa(*cpte);
+           if (pa == 0) {
+             UNLOCK_PVH(pai);
+             continue;
+           }
+             
+           num_removed++;
+
            /*
-            *  Get the modify and reference bits.
+            *  Get the modify and reference bits, then
+            *  nuke the entry in the page table
             */
-           {
-               register pt_entry_t     *lpte;
-
-               lpte = cpte;
-               pmap_phys_attributes[pai] |=
-                       *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
-               pmap_store_pte(lpte, 0);
-
-           }
+           /* remember reference and change */
+           pmap_phys_attributes[pai] |=
+                   (char)(*cpte & (PHYS_MODIFIED | PHYS_REFERENCED));
+           /* completely invalidate the PTE */
+           pmap_store_pte(cpte, 0);
 
            /*
             *  Remove the mapping from the pvlist for
             *  this physical page.
             */
            {
-               register pv_entry_t     pv_h, prev, cur;
+             pv_rooted_entry_t pv_h;
+             pv_hashed_entry_t *pprevh;
+             ppnum_t ppn = (ppnum_t)pai;
 
                pv_h = pai_to_pvh(pai);
-               if (pv_h->pmap == PMAP_NULL) {
-                   panic("pmap_remove: null pv_list!");
-               }
-               if (pv_h->va == vaddr && pv_h->pmap == pmap) {
+               pvh_e = PV_HASHED_ENTRY_NULL;
+               if (pv_h->pmap == PMAP_NULL)
+                   panic("pmap_remove_range: null pv_list!");
+
+               if (pv_h->va == vaddr && pv_h->pmap == pmap) { /* rooted or not */
                    /*
-                    * Header is the pv_entry.  Copy the next one
-                    * to header and free the next one (we cannot
-                    * free the header)
+                    * Header is the pv_rooted_entry. We can't free that. If there is a queued
+                    * entry after this one we remove that
+                    * from the ppn queue, we remove it from the hash chain
+                    * and copy it to the rooted entry. Then free it instead.
                     */
-                   cur = pv_h->next;
-                   if (cur != PV_ENTRY_NULL) {
-                       *pv_h = *cur;
-                       PV_FREE(cur);
+
+                 pvh_e = (pv_hashed_entry_t)queue_next(&pv_h->qlink);
+                 if (pv_h != (pv_rooted_entry_t)pvh_e) {  /* any queued after rooted? */
+                   CHK_NPVHASH();
+                   pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
+                   LOCK_PV_HASH(pvhash_idx);
+                   remque(&pvh_e->qlink);
+                   {
+                     pprevh = pvhash(pvhash_idx);
+                     if (PV_HASHED_ENTRY_NULL == *pprevh) {
+                       panic("pmap_remove_range empty hash removing rooted pv");
+                     }
                    }
-                   else {
-                       pv_h->pmap = PMAP_NULL;
+                   pmap_pvh_unlink(pvh_e);
+                   UNLOCK_PV_HASH(pvhash_idx);
+                   pv_h->pmap = pvh_e->pmap;
+                   pv_h->va = pvh_e->va;   /* dispose of pvh_e */
+                 } else {  /* none queued after rooted */
+                   pv_h->pmap = PMAP_NULL;
+                   pvh_e = PV_HASHED_ENTRY_NULL;
+                 }   /* any queued after rooted */
+
+               } else { /* rooted or not */
+                 /* not removing rooted pv. find it on hash chain, remove from ppn queue and
+                  * hash chain and free it */
+                 CHK_NPVHASH();
+                 pvhash_idx = pvhashidx(pmap,vaddr);
+                 LOCK_PV_HASH(pvhash_idx);
+                 pprevh = pvhash(pvhash_idx);
+                 if (PV_HASHED_ENTRY_NULL == *pprevh) {
+                   panic("pmap_remove_range empty hash removing hashed pv");
                    }
-               }
-               else {
-                   cur = pv_h;
-                   do {
-                       prev = cur;
-                       if ((cur = prev->next) == PV_ENTRY_NULL) {
-                         panic("pmap-remove: mapping not in pv_list!");
-                       }
-                   } while (cur->va != vaddr || cur->pmap != pmap);
-                   prev->next = cur->next;
-                   PV_FREE(cur);
-               }
+                 pvh_e = *pprevh;
+                 pmap_pv_hashlist_walks++;
+                 pv_cnt = 0;
+                 while (PV_HASHED_ENTRY_NULL != pvh_e) {
+                       pv_cnt++;
+                       if (pvh_e->pmap == pmap && pvh_e->va == vaddr && pvh_e->ppn == ppn) break;
+                       pprevh = &pvh_e->nexth;
+                       pvh_e = pvh_e->nexth;
+                 }
+                 pmap_pv_hashlist_cnts += pv_cnt;
+                 if (pmap_pv_hashlist_max < pv_cnt) pmap_pv_hashlist_max = pv_cnt;
+                 if (PV_HASHED_ENTRY_NULL == pvh_e) panic("pmap_remove_range pv not on hash");
+                 *pprevh = pvh_e->nexth;
+                 remque(&pvh_e->qlink);
+                 UNLOCK_PV_HASH(pvhash_idx);
+
+               } /* rooted or not */
+
                UNLOCK_PVH(pai);
-           }
+
+               if (pvh_e != PV_HASHED_ENTRY_NULL) {
+                 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                 pvh_eh = pvh_e;
+
+                 if (pvh_et == PV_HASHED_ENTRY_NULL) {
+                   pvh_et = pvh_e;
+                 }
+
+                 pvh_cnt++;
+               }
+
+           } /* removing mappings for this phy page */
+       } /* for loop */
+       
+       if (pvh_eh != PV_HASHED_ENTRY_NULL) {
+           PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
        }
 
+update_counts:
        /*
         *      Update the counts
         */
+#if TESTING
+       if (pmap->stats.resident_count < num_removed)
+               panic("pmap_remove_range: resident_count");
+#endif
        assert(pmap->stats.resident_count >= num_removed);
-       pmap->stats.resident_count -= num_removed;
+       OSAddAtomic(-num_removed, (SInt32 *) &pmap->stats.resident_count);
+
+#if TESTING
+       if (pmap->stats.wired_count < num_unwired)
+               panic("pmap_remove_range: wired_count");
+#endif
        assert(pmap->stats.wired_count >= num_unwired);
-       pmap->stats.wired_count -= num_unwired;
+       OSAddAtomic(-num_unwired, (SInt32 *) &pmap->stats.wired_count);
+
+       return;
 }
 
 /*
@@ -1726,36 +2155,91 @@ pmap_remove(
        addr64_t        s64,
        addr64_t        e64)
 {
-       spl_t                   spl;
-       register pt_entry_t     *pde;
-       register pt_entry_t     *spte, *epte;
-       addr64_t                l64;
-       addr64_t                orig_s64;
+       pt_entry_t      *pde;
+       pt_entry_t      *spte, *epte;
+       addr64_t        l64;
+       addr64_t        orig_s64;
+       uint64_t        deadline;
+
+       pmap_intr_assert();
 
        if (map == PMAP_NULL || s64 == e64)
                return;
+       PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START,
+                  (int) map,
+                  (int) (s64>>32), (int) s64,
+                  (int) (e64>>32), (int) e64);
+
+       PMAP_LOCK(map);
+
+#if 0
+       /*
+        * Check that address range in the kernel does not overlap the stacks.
+        * We initialize local static min/max variables once to avoid making
+        * 2 function calls for every remove. Note also that these functions
+        * both return 0 before kernel stacks have been initialized, and hence
+        * the panic is not triggered in this case.
+        */
+       if (map == kernel_pmap) {
+               static vm_offset_t      kernel_stack_min = 0;
+               static vm_offset_t      kernel_stack_max = 0;
+
+               if (kernel_stack_min == 0) {
+                       kernel_stack_min = min_valid_stack_address();
+                       kernel_stack_max = max_valid_stack_address();
+               }
+               if  ((kernel_stack_min <= s64 && s64 <  kernel_stack_max) ||
+                    (kernel_stack_min <  e64 && e64 <= kernel_stack_max))
+                       panic("pmap_remove() attempted in kernel stack");
+       }
+#else
 
-       PMAP_READ_LOCK(map, spl);
+       /*
+        * The values of kernel_stack_min and kernel_stack_max are no longer
+        * relevant now that we allocate kernel stacks anywhere in the kernel map,
+        * so the old code above no longer applies.  If we wanted to check that
+        * we weren't removing a mapping of a page in a kernel stack we'd have to
+        * mark the PTE with an unused bit and check that here.
+        */
+
+#endif
+
+       deadline = rdtsc64() + max_preemption_latency_tsc;
 
        orig_s64 = s64;
 
        while (s64 < e64) {
+
            l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size-1);
            if (l64 > e64)
                l64 = e64;
            pde = pmap_pde(map, s64);
+
            if (pde && (*pde & INTEL_PTE_VALID)) {
                spte = (pt_entry_t *)pmap_pte(map, (s64 & ~(pde_mapped_size-1)));
                spte = &spte[ptenum(s64)];
                epte = &spte[intel_btop(l64-s64)];
+
                pmap_remove_range(map, s64, spte, epte);
            }
            s64 = l64;
            pde++;
+
+           if (s64 < e64 && rdtsc64() >= deadline) {
+             PMAP_UNLOCK(map)
+               PMAP_LOCK(map)
+
+             deadline = rdtsc64() + max_preemption_latency_tsc;
+           }
+
        }
-       PMAP_UPDATE_TLBS(map, orig_s64, e64);
 
-       PMAP_READ_UNLOCK(map, spl);
+       PMAP_UNLOCK(map);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_END,
+                  (int) map, 0, 0, 0, 0);
+
 }
 
 /*
@@ -1770,24 +2254,36 @@ pmap_page_protect(
         ppnum_t         pn,
        vm_prot_t       prot)
 {
-       pv_entry_t              pv_h, prev;
-       register pv_entry_t     pv_e;
-       register pt_entry_t     *pte;
+       pv_hashed_entry_t               pvh_eh = PV_HASHED_ENTRY_NULL;
+       pv_hashed_entry_t               pvh_et = PV_HASHED_ENTRY_NULL;
+       pv_hashed_entry_t       nexth;
+       int                     pvh_cnt = 0;
+       pv_rooted_entry_t               pv_h;
+       pv_rooted_entry_t               pv_e;
+       pv_hashed_entry_t       pvh_e;
+       pt_entry_t              *pte;
        int                     pai;
        register pmap_t         pmap;
-       spl_t                   spl;
        boolean_t               remove;
-       pmap_paddr_t            phys;
+       int                     pvhash_idx;
 
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
+       if (pn == vm_page_guard_addr)
+               return;
 
-       if (!valid_page(pn)) {
+       pai = ppn_to_pai(pn);
+
+       if (!managed_page(pai)) {
            /*
             *  Not a managed page.
             */
            return;
        }
 
+       PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_START,
+                  (int) pn, (int) prot, 0, 0, 0);
+
        /*
         * Determine the new protection.
         */
@@ -1802,116 +2298,132 @@ pmap_page_protect(
                remove = TRUE;
                break;
        }
-       phys = (pmap_paddr_t)i386_ptob(pn);
-       pai = pa_index(phys);
-       pv_h = pai_to_pvh(pai);
 
+       pv_h = pai_to_pvh(pai);
 
-       /*
-        *      Lock the pmap system first, since we will be changing
-        *      several pmaps.
-        */
-       PMAP_WRITE_LOCK(spl);
+       LOCK_PVH(pai);
 
        /*
         * Walk down PV list, changing or removing all mappings.
-        * We do not have to lock the pv_list because we have
-        * the entire pmap system locked.
         */
        if (pv_h->pmap != PMAP_NULL) {
 
-               prev = pv_e = pv_h;
+           pv_e = pv_h;
+           pvh_e = (pv_hashed_entry_t)pv_e; /* cheat */
 
-               do {
-                       register vm_map_offset_t vaddr;
+           do {
+               register vm_map_offset_t vaddr;
+               pmap = pv_e->pmap;
 
-                       pmap = pv_e->pmap;
-                       /*
-                        * Lock the pmap to block pmap_extract and similar routines.
-                        */
-                       simple_lock(&pmap->lock);
+               vaddr = pv_e->va;
+               pte = pmap_pte(pmap, vaddr);
+               
+               if (0 == pte) {
+                   kprintf("pmap_page_protect pmap %p pn 0x%x vaddr 0x%llx\n",pmap, pn, vaddr);
+                   panic("pmap_page_protect");
+               }
 
-                       vaddr = pv_e->va;
-                       pte = pmap_pte(pmap, vaddr);
-                       if(0 == pte) {
-                         kprintf("pmap_page_protect pmap 0x%x pn 0x%x vaddr 0x%llx\n",pmap, pn, vaddr);
-                         panic("pmap_page_protect");
-                       }
-                       /*
-                        * Consistency checks.
-                        */
-                       /* assert(*pte & INTEL_PTE_VALID); XXX */
-                       /* assert(pte_to_phys(*pte) == phys); */
+               nexth = (pv_hashed_entry_t)queue_next(&pvh_e->qlink);  /* if there is one */
 
+               /*
+                * Remove the mapping if new protection is NONE
+                * or if write-protecting a kernel mapping.
+                */
+               if (remove || pmap == kernel_pmap) {
+                   /*
+                    * Remove the mapping, collecting any modify bits.
+                    */
+                   pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
 
-                       /*
-                        * Remove the mapping if new protection is NONE
-                        * or if write-protecting a kernel mapping.
-                        */
-                       if (remove || pmap == kernel_pmap) {
-                               /*
-                                * Remove the mapping, collecting any modify bits.
-                                */
-                               pmap_store_pte(pte, *pte & ~INTEL_PTE_VALID);
+                   PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
 
-                               PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+                   pmap_phys_attributes[pai] |= *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
 
-                               pmap_phys_attributes[pai] |= *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+                   pmap_store_pte(pte, 0);
 
-                               pmap_store_pte(pte, 0);
+#if TESTING
+                   if (pmap->stats.resident_count < 1)
+                       panic("pmap_page_protect: resident_count");
+#endif
+                   assert(pmap->stats.resident_count >= 1);
+                   OSAddAtomic(-1, (SInt32 *) &pmap->stats.resident_count);
 
+                   /*
+                    * Deal with the pv_rooted_entry.
+                    */
 
-                               //XXX breaks DEBUG build                    assert(pmap->stats.resident_count >= 1);
-                               pmap->stats.resident_count--;
+                   if (pv_e == pv_h) {
+                       /*
+                        * Fix up head later.
+                        */
+                       pv_h->pmap = PMAP_NULL;
+                   }
+                   else {
+                       /*
+                        * Delete this entry.
+                        */
+                     CHK_NPVHASH();
+                     pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
+                     LOCK_PV_HASH(pvhash_idx);
+                     remque(&pvh_e->qlink);
+                     pmap_pvh_unlink(pvh_e);
+                     UNLOCK_PV_HASH(pvhash_idx);
+
+                     pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                       pvh_eh = pvh_e;
+
+                       if (pvh_et == PV_HASHED_ENTRY_NULL)
+                           pvh_et = pvh_e;
+                       pvh_cnt++;
+                   }
+               } else {
+                   /*
+                    * Write-protect.
+                    */
+                   pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WRITE));
+                   PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+               }
 
-                               /*
-                                * Remove the pv_entry.
-                                */
-                               if (pv_e == pv_h) {
-                                       /*
-                                        * Fix up head later.
-                                        */
-                                       pv_h->pmap = PMAP_NULL;
-                               }
-                               else {
-                                       /*
-                                        * Delete this entry.
-                                        */
-                                       prev->next = pv_e->next;
-                                       PV_FREE(pv_e);
-                               }
-                       } else {
-                               /*
-                                * Write-protect.
-                                */
-                               pmap_store_pte(pte, *pte & ~INTEL_PTE_WRITE);
+               pvh_e = nexth;
+           } while ((pv_e = (pv_rooted_entry_t)nexth) != pv_h);
 
-                               PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
-                               /*
-                                * Advance prev.
-                                */
-                               prev = pv_e;
-                       }
+           /*
+            * If pv_head mapping was removed, fix it up.
+            */
 
-                       simple_unlock(&pmap->lock);
+           if (pv_h->pmap == PMAP_NULL) {
+             pvh_e = (pv_hashed_entry_t)queue_next(&pv_h->qlink);
+
+             if (pvh_e != (pv_hashed_entry_t)pv_h) {
+               CHK_NPVHASH();
+               pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
+               LOCK_PV_HASH(pvhash_idx);
+               remque(&pvh_e->qlink);
+               pmap_pvh_unlink(pvh_e);
+               UNLOCK_PV_HASH(pvhash_idx);
+                 pv_h->pmap = pvh_e->pmap;
+                 pv_h->va = pvh_e->va;
+                 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                   pvh_eh = pvh_e;
+
+                   if (pvh_et == PV_HASHED_ENTRY_NULL)
+                       pvh_et = pvh_e;
+                   pvh_cnt++;
+               }
+           }
+       }
+       if (pvh_eh != PV_HASHED_ENTRY_NULL) {
+           PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
+       }
 
-               } while ((pv_e = prev->next) != PV_ENTRY_NULL);
+       UNLOCK_PVH(pai);
 
-               /*
-                * If pv_head mapping was removed, fix it up.
-                */
-               if (pv_h->pmap == PMAP_NULL) {
-                       pv_e = pv_h->next;
+       PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
 
-                       if (pv_e != PV_ENTRY_NULL) {
-                               *pv_h = *pv_e;
-                               PV_FREE(pv_e);
-                       }
-               }
-       }
-       PMAP_WRITE_UNLOCK(spl);
 }
 
+
 /*
  *     Routine:
  *             pmap_disconnect
@@ -1924,7 +2436,7 @@ pmap_page_protect(
 unsigned int pmap_disconnect(
        ppnum_t pa)
 {
-       pmap_page_protect(pa, 0);                               /* disconnect the page */
+       pmap_page_protect(pa, 0);                       /* disconnect the page */
        return (pmap_get_refmod(pa));                   /* return ref/chg status */
 }
 
@@ -1944,8 +2456,10 @@ pmap_protect(
        register pt_entry_t     *spte, *epte;
        vm_map_offset_t         lva;
        vm_map_offset_t         orig_sva;
-       spl_t           spl;
        boolean_t       set_NX;
+       int             num_found = 0;
+
+       pmap_intr_assert();
 
        if (map == PMAP_NULL)
                return;
@@ -1955,13 +2469,17 @@ pmap_protect(
                return;
        }
 
+       PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
+                  (int) map,
+                  (int) (sva>>32), (int) sva,
+                  (int) (eva>>32), (int) eva);
+
        if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled )
                set_NX = FALSE;
        else
                set_NX = TRUE;
 
-       SPLVM(spl);
-       simple_lock(&map->lock);
+       PMAP_LOCK(map);
 
        orig_sva = sva;
        while (sva < eva) {
@@ -1975,29 +2493,34 @@ pmap_protect(
                epte = &spte[intel_btop(lva-sva)];
 
                while (spte < epte) {
+
                    if (*spte & INTEL_PTE_VALID) {
                      
                        if (prot & VM_PROT_WRITE)
-                           pmap_store_pte(spte, *spte | INTEL_PTE_WRITE);
+                           pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_WRITE));
                        else
-                           pmap_store_pte(spte, *spte & ~INTEL_PTE_WRITE);
+                           pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_WRITE));
 
                        if (set_NX == TRUE)
-                           pmap_store_pte(spte, *spte | INTEL_PTE_NX);
+                           pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_NX));
                        else
-                           pmap_store_pte(spte, *spte & ~INTEL_PTE_NX);
+                           pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_NX));
 
+                       num_found++;
                    }
                    spte++;
                }
            }
            sva = lva;
-           pde++;
        }
-       PMAP_UPDATE_TLBS(map, orig_sva, eva);
+       if (num_found)
+           PMAP_UPDATE_TLBS(map, orig_sva, eva);
+
+       PMAP_UNLOCK(map);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
 
-       simple_unlock(&map->lock);
-       SPLX(spl);
 }
 
 /* Map a (possibly) autogenned block */
@@ -2011,13 +2534,13 @@ pmap_map_block(
        int             attr,
        __unused unsigned int   flags)
 {
-        uint32_t page;
+    uint32_t page;
 
-       for (page = 0; page < size; page++) {
-               pmap_enter(pmap, va, pa, prot, attr, TRUE);
-               va += PAGE_SIZE;
-               pa++;
-       }
+    for (page = 0; page < size; page++) {
+       pmap_enter(pmap, va, pa, prot, attr, TRUE);
+       va += PAGE_SIZE;
+       pa++;
+    }
 }
 
 
@@ -2036,33 +2559,41 @@ pmap_map_block(
 void
 pmap_enter(
        register pmap_t         pmap,
-       vm_map_offset_t         vaddr,
+       vm_map_offset_t         vaddr,
        ppnum_t                 pn,
        vm_prot_t               prot,
        unsigned int            flags,
        boolean_t               wired)
 {
        register pt_entry_t     *pte;
-       register pv_entry_t     pv_h;
+       register pv_rooted_entry_t      pv_h;
        register int            pai;
-       pv_entry_t              pv_e;
+       pv_hashed_entry_t               pvh_e;
+       pv_hashed_entry_t               pvh_new;
+       pv_hashed_entry_t       *hashp;
        pt_entry_t              template;
-       spl_t                   spl;
        pmap_paddr_t            old_pa;
-       pmap_paddr_t            pa = (pmap_paddr_t)i386_ptob(pn);
+       pmap_paddr_t             pa = (pmap_paddr_t)i386_ptob(pn);
        boolean_t               need_tlbflush = FALSE;
        boolean_t               set_NX;
+       char                    oattr;
+       int                     pvhash_idx;
+       uint32_t                pv_cnt;
+       boolean_t               old_pa_locked;
 
-       XPR(0x80000000, "%x/%x: pmap_enter %x/%qx/%x\n",
-           current_thread(),
-           current_thread(), 
-           pmap, vaddr, pn);
-
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
        if (pmap_debug)
                printf("pmap(%qx, %x)\n", vaddr, pn);
        if (pmap == PMAP_NULL)
                return;
+       if (pn == vm_page_guard_addr)
+               return;
+
+       PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START,
+                  (int) pmap,
+                  (int) (vaddr>>32), (int) vaddr,
+                  (int) pn, prot);
 
        if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled )
                set_NX = FALSE;
@@ -2076,9 +2607,12 @@ pmap_enter(
         *      and allocate one.  Then we will retry, throughing away
         *      the allocated entry later (if we no longer need it).
         */
-       pv_e = PV_ENTRY_NULL;
 
-       PMAP_READ_LOCK(pmap, spl);
+       pvh_new = PV_HASHED_ENTRY_NULL;
+Retry:
+       pvh_e = PV_HASHED_ENTRY_NULL;
+
+       PMAP_LOCK(pmap);
 
        /*
         *      Expand pmap to include this pte.  Assume that
@@ -2090,22 +2624,42 @@ pmap_enter(
                /*
                 *      Must unlock to expand the pmap.
                 */
-               PMAP_READ_UNLOCK(pmap, spl);
-
+               PMAP_UNLOCK(pmap);
                pmap_expand(pmap, vaddr); /* going to grow pde level page(s) */
+               PMAP_LOCK(pmap);
+       }
+
+       old_pa = pte_to_pa(*pte);
+       pai = pa_index(old_pa);
+       old_pa_locked = FALSE;
+
+       /*
+        * if we have a previous managed page, lock the pv entry now. after
+        * we lock it, check to see if someone beat us to the lock and if so
+        * drop the lock
+        */
 
-               PMAP_READ_LOCK(pmap, spl);
+       if ((0 != old_pa) && managed_page(pai)) {
+         LOCK_PVH(pai);
+         old_pa_locked = TRUE;
+         old_pa = pte_to_pa(*pte);
+         if (0 == old_pa) {
+           UNLOCK_PVH(pai);  /* some other path beat us to it */
+           old_pa_locked = FALSE;
+         }
        }
+
+
        /*
-        *      Special case if the physical page is already mapped
+        *      Special case if the incoming physical page is already mapped
         *      at this address.
         */
-       old_pa = pte_to_pa(*pte);
        if (old_pa == pa) {
+
            /*
             *  May be changing its wired attribute or protection
             */
-       
+
            template = pa_to_pte(pa) | INTEL_PTE_VALID;
 
            if(VM_MEM_NOT_CACHEABLE == (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
@@ -2125,22 +2679,22 @@ pmap_enter(
            if (wired) {
                template |= INTEL_PTE_WIRED;
                if (!iswired(*pte))
-                   pmap->stats.wired_count++;
+                   OSAddAtomic(+1, (SInt32 *) &pmap->stats.wired_count);
            }
            else {
                if (iswired(*pte)) {
                    assert(pmap->stats.wired_count >= 1);
-                   pmap->stats.wired_count--;
+                   OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count);
                }
            }
 
-               if (*pte & INTEL_PTE_MOD)
-                   template |= INTEL_PTE_MOD;
-
-               pmap_store_pte(pte, template);
-               pte++;
-
-               need_tlbflush = TRUE;
+           /* store modified PTE and preserve RC bits */ 
+           pmap_update_pte(pte, *pte, template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
+           if (old_pa_locked) {
+             UNLOCK_PVH(pai);
+             old_pa_locked = FALSE;
+           }
+           need_tlbflush = TRUE;
            goto Done;
        }
 
@@ -2151,12 +2705,6 @@ pmap_enter(
         *         2) Add pvlist entry for new mapping
         *         3) Enter new mapping.
         *
-        *      SHARING FAULTS IS HORRIBLY BROKEN
-        *      SHARING_FAULTS complicates this slightly in that it cannot
-        *      replace the mapping, but must remove it (because adding the
-        *      pvlist entry for the new mapping may remove others), and
-        *      hence always enters the new mapping at step 3)
-        *
         *      If the old physical page is not managed step 1) is skipped
         *      (except for updating the TLBs), and the mapping is
         *      overwritten at step 3).  If the new physical page is not
@@ -2171,255 +2719,211 @@ pmap_enter(
             *  to overwrite the old one.
             */
 
-           if (valid_page(i386_btop(old_pa))) {
+           /* invalidate the PTE */ 
+           pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
+           /* propagate invalidate everywhere */
+           PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+           /* remember reference and change */
+           oattr = (char)(*pte & (PHYS_MODIFIED | PHYS_REFERENCED));
+           /* completely invalidate the PTE */
+           pmap_store_pte(pte, 0);
 
-               pai = pa_index(old_pa);
-               LOCK_PVH(pai);
+           if (managed_page(pai)) {
 
+#if TESTING
+               if (pmap->stats.resident_count < 1)
+                   panic("pmap_enter: resident_count");
+#endif
                assert(pmap->stats.resident_count >= 1);
-               pmap->stats.resident_count--;
+               OSAddAtomic(-1, (SInt32 *) &pmap->stats.resident_count);
+
                if (iswired(*pte)) {
+
+#if TESTING
+                   if (pmap->stats.wired_count < 1)
+                       panic("pmap_enter: wired_count");
+#endif
                    assert(pmap->stats.wired_count >= 1);
-                   pmap->stats.wired_count--;
+                   OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count);
                }
 
-                   pmap_phys_attributes[pai] |=
-                       *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-
-               pmap_store_pte(pte, 0);
+               pmap_phys_attributes[pai] |= oattr;
                /*
                 *      Remove the mapping from the pvlist for
                 *      this physical page.
+                *      We'll end up with either a rooted pv or a
+                *      hashed pv
                 */
                {
-                   register pv_entry_t prev, cur;
 
                    pv_h = pai_to_pvh(pai);
+
                    if (pv_h->pmap == PMAP_NULL) {
                        panic("pmap_enter: null pv_list!");
                    }
 
                    if (pv_h->va == vaddr && pv_h->pmap == pmap) {
                        /*
-                        * Header is the pv_entry.  Copy the next one
-                        * to header and free the next one (we cannot
+                        * Header is the pv_rooted_entry.  
+                        * If there is a next one, copy it to the
+                        * header and free the next one (we cannot
                         * free the header)
                         */
-                       cur = pv_h->next;
-                       if (cur != PV_ENTRY_NULL) {
-                           *pv_h = *cur;
-                           pv_e = cur;
-                       }
-                       else {
-                           pv_h->pmap = PMAP_NULL;
+                     pvh_e = (pv_hashed_entry_t)queue_next(&pv_h->qlink);
+                     if (pvh_e != (pv_hashed_entry_t)pv_h) {
+                       pvhash_idx = pvhashidx(pvh_e->pmap, pvh_e->va);
+                       LOCK_PV_HASH(pvhash_idx);
+                         remque(&pvh_e->qlink);
+                         pmap_pvh_unlink(pvh_e);
+                         UNLOCK_PV_HASH(pvhash_idx);
+                         pv_h->pmap = pvh_e->pmap;
+                         pv_h->va = pvh_e->va;
                        }
+                     else {
+                       pv_h->pmap = PMAP_NULL;
+                       pvh_e = PV_HASHED_ENTRY_NULL;
+                     }
                    }
                    else {
-                       cur = pv_h;
-                       do {
-                           prev = cur;
-                           if ((cur = prev->next) == PV_ENTRY_NULL) {
-                               panic("pmap_enter: mapping not in pv_list!");
-                           }
-                       } while (cur->va != vaddr || cur->pmap != pmap);
-                       prev->next = cur->next;
-                       pv_e = cur;
+                     pv_hashed_entry_t *pprevh;
+                     ppnum_t old_ppn;
+                     /* wasn't the rooted pv - hash, find it, and unlink it */
+                     old_ppn = (ppnum_t)pa_index(old_pa);
+                     CHK_NPVHASH();
+                     pvhash_idx = pvhashidx(pmap,vaddr);
+                     LOCK_PV_HASH(pvhash_idx);
+                     pprevh = pvhash(pvhash_idx);
+#if PV_DEBUG
+                     if (NULL==pprevh)panic("pmap enter 1");
+#endif
+                     pvh_e = *pprevh;
+                     pmap_pv_hashlist_walks++;
+                     pv_cnt = 0;
+                     while (PV_HASHED_ENTRY_NULL != pvh_e) {
+                       pv_cnt++;
+                       if (pvh_e->pmap == pmap && pvh_e->va == vaddr && pvh_e->ppn == old_ppn) break;
+                       pprevh = &pvh_e->nexth;
+                       pvh_e = pvh_e->nexth;
+                     }
+                     pmap_pv_hashlist_cnts += pv_cnt;
+                     if (pmap_pv_hashlist_max < pv_cnt) pmap_pv_hashlist_max = pv_cnt;
+                     if (PV_HASHED_ENTRY_NULL == pvh_e) panic("pmap_enter: pv not in hash list");
+                     if(NULL==pprevh)panic("pmap enter 2");
+                     *pprevh = pvh_e->nexth;
+                     remque(&pvh_e->qlink);
+                     UNLOCK_PV_HASH(pvhash_idx);
                    }
                }
-               UNLOCK_PVH(pai);
            }
            else {
 
                /*
-                *      old_pa is not managed.  Pretend it's zero so code
-                *      at Step 3) will enter new mapping (overwriting old
-                *      one).  Do removal part of accounting.
+                *      old_pa is not managed.
+                *      Do removal part of accounting.
                 */
-               old_pa = (pmap_paddr_t) 0;
 
                if (iswired(*pte)) {
                    assert(pmap->stats.wired_count >= 1);
-                   pmap->stats.wired_count--;
+                   OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count);
                }
            }
-           need_tlbflush = TRUE;
-        
        }
 
-       if (valid_page(i386_btop(pa))) {
+       /*
+        * if we had a previously managed paged locked, unlock it now
+        */
+
+       if (old_pa_locked) {
+         UNLOCK_PVH(pai);
+         old_pa_locked = FALSE;
+       }
+
+       pai = pa_index(pa);     /* now working with new incoming phys page */
+       if (managed_page(pai)) {
 
            /*
             *  Step 2) Enter the mapping in the PV list for this
             *  physical page.
             */
+           pv_h = pai_to_pvh(pai);
 
-           pai = pa_index(pa);
-
-
-#if SHARING_FAULTS /* this is horribly broken , do not enable */
-RetryPvList:
-           /*
-            * We can return here from the sharing fault code below
-            * in case we removed the only entry on the pv list and thus
-            * must enter the new one in the list header.
-            */
-#endif /* SHARING_FAULTS */
            LOCK_PVH(pai);
-           pv_h = pai_to_pvh(pai);
 
            if (pv_h->pmap == PMAP_NULL) {
                /*
-                *      No mappings yet
+                *      No mappings yet, use  rooted pv
                 */
                pv_h->va = vaddr;
                pv_h->pmap = pmap;
-               pv_h->next = PV_ENTRY_NULL;
+               queue_init(&pv_h->qlink);
            }
            else {
-#if    DEBUG
-               {
-                   /*
-                    * check that this mapping is not already there
-                    * or there is no alias for this mapping in the same map
-                    */
-                   pv_entry_t  e = pv_h;
-                   while (e != PV_ENTRY_NULL) {
-                       if (e->pmap == pmap && e->va == vaddr)
-                            panic("pmap_enter: already in pv_list");
-                       e = e->next;
-                   }
-               }
-#endif /* DEBUG */
-#if SHARING_FAULTS /* broken, do not enable */
-                {
-                    /*
-                     * do sharing faults.
-                     * if we find an entry on this pv list in the same address
-                    * space, remove it.  we know there will not be more
-                    * than one. 
-                    */
-                   pv_entry_t  e = pv_h;
-                    pt_entry_t      *opte;
-
-                   while (e != PV_ENTRY_NULL) {
-                       if (e->pmap == pmap) {
-                            /*
-                            *  Remove it, drop pv list lock first.
-                            */
-                            UNLOCK_PVH(pai);
-
-                            opte = pmap_pte(pmap, e->va);
-                            assert(opte != PT_ENTRY_NULL);
-                            /*
-                            *  Invalidate the translation buffer,
-                            *  then remove the mapping.
-                            */
-                             pmap_remove_range(pmap, e->va, opte,
-                                                      opte + 1);
-
-                            PMAP_UPDATE_TLBS(pmap, e->va, e->va + PAGE_SIZE);
-
-                            /*
-                             * We could have remove the head entry,
-                             * so there could be no more entries
-                             * and so we have to use the pv head entry.
-                             * so, go back to the top and try the entry
-                             * again.
-                             */
-                            goto RetryPvList;
-                       }
-                        e = e->next;
-                   }
-
-                   /*
-                     * check that this mapping is not already there
-                     */
-                   e = pv_h;
-                   while (e != PV_ENTRY_NULL) {
-                       if (e->pmap == pmap)
-                            panic("pmap_enter: alias in pv_list");
-                       e = e->next;
-                   }
-               }
-#endif /* SHARING_FAULTS */
-#if DEBUG_ALIAS
-                {
-                    /*
-                     * check for aliases within the same address space.
-                     */
-                   pv_entry_t  e = pv_h;
-                    vm_offset_t     rpc = get_rpc();
-
-                   while (e != PV_ENTRY_NULL) {
-                       if (e->pmap == pmap) {
-                            /*
-                             * log this entry in the alias ring buffer
-                            * if it's not there already.
-                             */
-                            struct pmap_alias *pma;
-                            int ii, logit;
-
-                            logit = TRUE;
-                            for (ii = 0; ii < pmap_alias_index; ii++) {
-                                if (pmap_aliasbuf[ii].rpc == rpc) {
-                                    /* found it in the log already */
-                                    logit = FALSE;
-                                    break;
-                               }
-                           }
-                            if (logit) {
-                                pma = &pmap_aliasbuf[pmap_alias_index];
-                                pma->pmap = pmap;
-                                pma->va = vaddr;
-                                pma->rpc = rpc;
-                                pma->cookie = PMAP_ALIAS_COOKIE;
-                                if (++pmap_alias_index >= PMAP_ALIAS_MAX)
-                                    panic("pmap_enter: exhausted alias log");
-                           }
-                       }
-                        e = e->next;
-                   }
-               }
-#endif /* DEBUG_ALIAS */
                /*
-                *      Add new pv_entry after header.
+                *      Add new pv_hashed_entry after header.
                 */
-               if (pv_e == PV_ENTRY_NULL) {
-                   PV_ALLOC(pv_e);
-                   if (pv_e == PV_ENTRY_NULL) {
-                     panic("pmap no pv_e's");
+               if ((PV_HASHED_ENTRY_NULL == pvh_e) && pvh_new) {
+                 pvh_e = pvh_new;
+                 pvh_new = PV_HASHED_ENTRY_NULL;  /* show we used it */
+               } else if (PV_HASHED_ENTRY_NULL == pvh_e) {
+                 PV_HASHED_ALLOC(pvh_e);
+                 if (PV_HASHED_ENTRY_NULL == pvh_e) {
+                   /* the pv list is empty.
+                    * if we are on the kernel pmap we'll use one of the special private
+                    * kernel pv_e's, else, we need to unlock everything, zalloc a pv_e,
+                    * and restart bringing in the pv_e with us.
+                    */
+                   if (kernel_pmap == pmap) {
+                     PV_HASHED_KERN_ALLOC(pvh_e);
+                   } else {
+                     UNLOCK_PVH(pai);
+                     PMAP_UNLOCK(pmap);
+                     pvh_new = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+                     goto Retry;
                    }
+                 }
                }
-               pv_e->va = vaddr;
-               pv_e->pmap = pmap;
-               pv_e->next = pv_h->next;
-               pv_h->next = pv_e;
+
+               if (PV_HASHED_ENTRY_NULL == pvh_e) panic("pvh_e exhaustion");
+               pvh_e->va = vaddr;
+               pvh_e->pmap = pmap;
+               pvh_e->ppn = pn;
+               CHK_NPVHASH();
+               pvhash_idx = pvhashidx(pmap,vaddr);
+               LOCK_PV_HASH(pvhash_idx);
+               insque(&pvh_e->qlink, &pv_h->qlink);
+               hashp = pvhash(pvhash_idx);
+#if PV_DEBUG
+               if(NULL==hashp)panic("pmap_enter 4");
+#endif
+               pvh_e->nexth = *hashp;
+               *hashp = pvh_e;
+               UNLOCK_PV_HASH(pvhash_idx);
+
                /*
                 *      Remember that we used the pvlist entry.
                 */
-               pv_e = PV_ENTRY_NULL;
+               pvh_e = PV_HASHED_ENTRY_NULL;
            }
-           UNLOCK_PVH(pai);
 
            /*
             * only count the mapping
             * for 'managed memory'
             */
-           pmap->stats.resident_count++;
+           OSAddAtomic(+1, (SInt32 *) &pmap->stats.resident_count);
+           if (pmap->stats.resident_count > pmap->stats.resident_max) {
+                   pmap->stats.resident_max = pmap->stats.resident_count;
+           }
        }
 
        /*
         * Step 3) Enter the mapping.
-        */
-
-
-       /*
+        *
         *      Build a template to speed up entering -
         *      only the pfn changes.
         */
        template = pa_to_pte(pa) | INTEL_PTE_VALID;
 
-       if(flags & VM_MEM_NOT_CACHEABLE) {
+       if (flags & VM_MEM_NOT_CACHEABLE) {
                if(!(flags & VM_MEM_GUARDED))
                        template |= INTEL_PTE_PTA;
                template |= INTEL_PTE_NCACHE;
@@ -2435,19 +2939,32 @@ RetryPvList:
 
        if (wired) {
                template |= INTEL_PTE_WIRED;
-               pmap->stats.wired_count++;
+               OSAddAtomic(+1, (SInt32 *) &pmap->stats.wired_count);
        }
        pmap_store_pte(pte, template);
 
+       /* if this was a managed page we delayed unlocking the pv until here
+        * to prevent pmap_page_protect et al from finding it until the pte
+        * has been stored */
+
+       if (managed_page(pai)) {
+         UNLOCK_PVH(pai);
+       }
+
 Done:
        if (need_tlbflush == TRUE)
                PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
 
-       if (pv_e != PV_ENTRY_NULL) {
-               PV_FREE(pv_e);
+       if (pvh_e != PV_HASHED_ENTRY_NULL) {
+               PV_HASHED_FREE_LIST(pvh_e, pvh_e, 1);
        }
 
-       PMAP_READ_UNLOCK(pmap, spl);
+       if (pvh_new != PV_HASHED_ENTRY_NULL) {
+         PV_HASHED_KERN_FREE_LIST(pvh_new, pvh_new, 1);
+       }
+
+       PMAP_UNLOCK(pmap);
+       PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
 }
 
 /*
@@ -2464,14 +2981,12 @@ pmap_change_wiring(
        boolean_t       wired)
 {
        register pt_entry_t     *pte;
-       spl_t                   spl;
 
-#if 1
        /*
         *      We must grab the pmap system lock because we may
         *      change a pte_page queue.
         */
-       PMAP_READ_LOCK(map, spl);
+       PMAP_LOCK(map);
 
        if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL)
                panic("pmap_change_wiring: pte missing");
@@ -2480,26 +2995,19 @@ pmap_change_wiring(
            /*
             *  wiring down mapping
             */
-           map->stats.wired_count++;
-           pmap_store_pte(pte, *pte | INTEL_PTE_WIRED);
-           pte++;
+           OSAddAtomic(+1, (SInt32 *) &map->stats.wired_count);
+           pmap_update_pte(pte, *pte, (*pte | INTEL_PTE_WIRED));
        }
        else if (!wired && iswired(*pte)) {
            /*
             *  unwiring mapping
             */
            assert(map->stats.wired_count >= 1);
-           map->stats.wired_count--;
-           pmap_store_pte(pte, *pte & ~INTEL_PTE_WIRED);
-           pte++;
+           OSAddAtomic(-1, (SInt32 *) &map->stats.wired_count);
+           pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WIRED));
        }
 
-       PMAP_READ_UNLOCK(map, spl);
-
-#else
-       return;
-#endif
-
+       PMAP_UNLOCK(map);
 }
 
 ppnum_t
@@ -2541,6 +3049,7 @@ pmap_extract(
 
        paddr = (vm_offset_t)0;
        ppn = pmap_find_phys(pmap, vaddr);
+
        if (ppn) {
                paddr = ((vm_offset_t)i386_ptob(ppn)) | (vaddr & INTEL_OFFMASK);
        }
@@ -2562,9 +3071,9 @@ pmap_expand_pml4(
        if (kernel_pmap == map) panic("expand kernel pml4");
 
        spl = splhigh();
-         pml4p = pmap64_pml4(map, vaddr);
-         splx(spl);
-         if (PML4_ENTRY_NULL == pml4p) panic("pmap_expand_pml4 no pml4p");
+       pml4p = pmap64_pml4(map, vaddr);
+       splx(spl);
+       if (PML4_ENTRY_NULL == pml4p) panic("pmap_expand_pml4 no pml4p");
 
        /*
         *      Allocate a VM page for the pml4 page
@@ -2580,53 +3089,47 @@ pmap_expand_pml4(
        pa = i386_ptob(pn);
        i = pml4idx(map, vaddr);
 
-       vm_object_lock(map->pm_obj_pml4);
-#if 0 /* DEBUG */
-       if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i)) {
-         kprintf("pmap_expand_pml4: obj_pml4 not empty, pmap 0x%x pm_obj_pml4 0x%x vaddr 0x%llx i 0x%llx\n",
-                 map, map->pm_obj_pml4, vaddr, i);
-       }
-#endif
-       vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i);
+       /*
+        *      Zero the page.
+        */
+       pmap_zero_page(pn);
 
        vm_page_lock_queues();
        vm_page_wire(m);
-
-       vm_page_unlock_queues();
-       vm_object_unlock(map->pm_obj_pml4);
        inuse_ptepages_count++;
-       map->stats.resident_count++;
-       map->stats.wired_count++;
+       vm_page_unlock_queues();
 
-       /*
-        *      Zero the page.
-        */
-       pmap_zero_page(pn);
+       /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
+       vm_object_lock(map->pm_obj_pml4);
 
-       PMAP_READ_LOCK(map, spl);
+       PMAP_LOCK(map);
        /*
         *      See if someone else expanded us first
         */
        if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) {
-               PMAP_READ_UNLOCK(map, spl);
-               vm_object_lock(map->pm_obj_pml4);
+               PMAP_UNLOCK(map);
+               vm_object_unlock(map->pm_obj_pml4);
+
                vm_page_lock_queues();
                vm_page_free(m);
                inuse_ptepages_count--;
-               map->stats.resident_count--;
-               map->stats.wired_count--;
-
                vm_page_unlock_queues();
-               vm_object_unlock(map->pm_obj_pml4);
+
                return;
        }
 
+#if 0 /* DEBUG */
+       if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i)) {
+              panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
+                    map, map->pm_obj_pml4, vaddr, i);
+       }
+#endif
+       vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i);
+       vm_object_unlock(map->pm_obj_pml4);
+
        /*
         *      Set the page directory entry for this page table.
-        *      If we have allocated more than one hardware page,
-        *      set several page directory entries.
         */
-
        pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
 
        pmap_store_pte(pml4p, pa_to_pte(pa)
@@ -2634,7 +3137,7 @@ pmap_expand_pml4(
                                | INTEL_PTE_USER
                                | INTEL_PTE_WRITE);
 
-       PMAP_READ_UNLOCK(map, spl);
+       PMAP_UNLOCK(map);
 
        return;
 
@@ -2655,13 +3158,12 @@ pmap_expand_pdpt(
        if (kernel_pmap == map) panic("expand kernel pdpt");
 
        spl = splhigh();
-         while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
-           splx(spl);
-           pmap_expand_pml4(map, vaddr); /* need room for another pdpt entry */
-           spl = splhigh();
-         }
-         splx(spl);
-
+       while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
+               splx(spl);
+               pmap_expand_pml4(map, vaddr); /* need room for another pdpt entry */
+               spl = splhigh();
+       }
+       splx(spl);
 
        /*
         *      Allocate a VM page for the pdpt page
@@ -2677,53 +3179,47 @@ pmap_expand_pdpt(
        pa = i386_ptob(pn);
        i = pdptidx(map, vaddr);
 
-       vm_object_lock(map->pm_obj_pdpt);
-#if 0 /* DEBUG */
-       if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i)) {
-         kprintf("pmap_expand_pdpt: obj_pdpt not empty, pmap 0x%x pm_obj_pdpt 0x%x vaddr 0x%llx i 0x%llx\n",
-                 map, map->pm_obj_pdpt, vaddr, i);
-       }
-#endif
-       vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i);
+       /*
+        *      Zero the page.
+        */
+       pmap_zero_page(pn);
 
        vm_page_lock_queues();
        vm_page_wire(m);
-
-       vm_page_unlock_queues();
-       vm_object_unlock(map->pm_obj_pdpt);
        inuse_ptepages_count++;
-       map->stats.resident_count++;
-       map->stats.wired_count++;
+       vm_page_unlock_queues();
 
-       /*
-        *      Zero the page.
-        */
-       pmap_zero_page(pn);
+       /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
+       vm_object_lock(map->pm_obj_pdpt);
 
-       PMAP_READ_LOCK(map, spl);
+       PMAP_LOCK(map);
        /*
         *      See if someone else expanded us first
         */
        if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) {
-               PMAP_READ_UNLOCK(map, spl);
-               vm_object_lock(map->pm_obj_pdpt);
+               PMAP_UNLOCK(map);
+               vm_object_unlock(map->pm_obj_pdpt);
+
                vm_page_lock_queues();
                vm_page_free(m);
                inuse_ptepages_count--;
-               map->stats.resident_count--;
-               map->stats.wired_count--;
-
                vm_page_unlock_queues();
-               vm_object_unlock(map->pm_obj_pdpt);
+
                return;
        }
 
+#if 0 /* DEBUG */
+       if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i)) {
+              panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
+                    map, map->pm_obj_pdpt, vaddr, i);
+       }
+#endif
+       vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i);
+       vm_object_unlock(map->pm_obj_pdpt);
+
        /*
         *      Set the page directory entry for this page table.
-        *      If we have allocated more than one hardware page,
-        *      set several page directory entries.
         */
-
        pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
 
        pmap_store_pte(pdptp, pa_to_pte(pa)
@@ -2731,7 +3227,7 @@ pmap_expand_pdpt(
                                | INTEL_PTE_USER
                                | INTEL_PTE_WRITE);
 
-       PMAP_READ_UNLOCK(map, spl);
+       PMAP_UNLOCK(map);
 
        return;
 
@@ -2771,18 +3267,15 @@ pmap_expand(
         * and we are 64 bit, propagate expand upwards
         */
 
-       if (cpu_64bit && (map != kernel_pmap)) {
-         spl = splhigh();
-         while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
-           splx(spl);
-           pmap_expand_pdpt(map, vaddr); /* need room for another pde entry */
-           spl = splhigh();
-         }
-         splx(spl);
-       } else {
-         pdp = pmap_pde(map, vaddr);
-       }
-
+       if (cpu_64bit && (map != kernel_pmap)) {
+               spl = splhigh();
+               while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
+                       splx(spl);
+                       pmap_expand_pdpt(map, vaddr); /* need room for another pde entry */
+                       spl = splhigh();
+               }
+               splx(spl);
+       }
 
        /*
         *      Allocate a VM page for the pde entries.
@@ -2798,59 +3291,60 @@ pmap_expand(
        pa = i386_ptob(pn);
        i = pdeidx(map, vaddr);
 
-       vm_object_lock(map->pm_obj);
-#if 0 /* DEBUG */
-       if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i)) {
-         kprintf("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
-                 map, map->pm_obj, vaddr, i);
-       }
-#endif
-       vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i);
+       /*
+        *      Zero the page.
+        */
+       pmap_zero_page(pn);
 
        vm_page_lock_queues();
        vm_page_wire(m);
        inuse_ptepages_count++;
-
        vm_page_unlock_queues();
-       vm_object_unlock(map->pm_obj);
 
-       /*
-        *      Zero the page.
-        */
-       pmap_zero_page(pn);
+       /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
+       vm_object_lock(map->pm_obj);
 
-       PMAP_READ_LOCK(map, spl);
+       PMAP_LOCK(map);
        /*
         *      See if someone else expanded us first
         */
+
        if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) {
-               PMAP_READ_UNLOCK(map, spl);
-               vm_object_lock(map->pm_obj);
+               PMAP_UNLOCK(map);
+               vm_object_unlock(map->pm_obj);
 
                vm_page_lock_queues();
                vm_page_free(m);
                inuse_ptepages_count--;
-
                vm_page_unlock_queues();
-               vm_object_unlock(map->pm_obj);
+
                return;
        }
 
-       pdp = pmap_pde(map, vaddr); /* refetch while locked */
+#if 0 /* DEBUG */
+       if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i)) {
+              panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
+                    map, map->pm_obj, vaddr, i);
+       }
+#endif
+       vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i);
+       vm_object_unlock(map->pm_obj);
 
        /*
-        *      Set the page directory entry for this page table.
-        *      If we have allocated more than one hardware page,
-        *      set several page directory entries.
+        * refetch while locked 
         */
 
+       pdp = pmap_pde(map, vaddr);
+
+       /*
+        *      Set the page directory entry for this page table.
+        */
        pmap_store_pte(pdp, pa_to_pte(pa)
                                | INTEL_PTE_VALID
                                | INTEL_PTE_USER
                                | INTEL_PTE_WRITE);
-           
 
-       PMAP_READ_UNLOCK(map, spl);
+       PMAP_UNLOCK(map);
 
        return;
 }
@@ -2880,6 +3374,10 @@ pmap_sync_page_attributes_phys(ppnum_t pa)
        cache_flush_page_phys(pa);
 }
 
+
+
+#ifdef CURRENTLY_UNUSED_AND_UNTESTED
+
 int    collect_ref;
 int    collect_unref;
 
@@ -2901,7 +3399,6 @@ pmap_collect(
        register pt_entry_t     *pdp, *ptp;
        pt_entry_t              *eptp;
        int                     wired;
-       spl_t                   spl;
 
        if (p == PMAP_NULL)
                return;
@@ -2912,7 +3409,7 @@ pmap_collect(
        /*
         *      Garbage collect map.
         */
-       PMAP_READ_LOCK(p, spl);
+       PMAP_LOCK(p);
 
        for (pdp = (pt_entry_t *)p->dirbase;
             pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
@@ -2955,7 +3452,7 @@ pmap_collect(
                     */
                    pmap_store_pte(pdp, 0x0);
                 
-                   PMAP_READ_UNLOCK(p, spl);
+                   PMAP_UNLOCK(p);
 
                    /*
                     * And free the pte page itself.
@@ -2964,37 +3461,39 @@ pmap_collect(
                        register vm_page_t m;
 
                        vm_object_lock(p->pm_obj);
+
                        m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]));
                        if (m == VM_PAGE_NULL)
                            panic("pmap_collect: pte page not in object");
+
                        vm_page_lock_queues();
                        vm_page_free(m);
                        inuse_ptepages_count--;
                        vm_page_unlock_queues();
+
                        vm_object_unlock(p->pm_obj);
                    }
 
-                   PMAP_READ_LOCK(p, spl);
+                   PMAP_LOCK(p);
                }
              }
           }
        }
-       PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
 
-       PMAP_READ_UNLOCK(p, spl);
+       PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
+       PMAP_UNLOCK(p);
        return;
 
 }
+#endif
 
 
 void
-pmap_copy_page(src, dst)
-       ppnum_t src;
-       ppnum_t dst;
+pmap_copy_page(ppnum_t src, ppnum_t dst)
 {
-        bcopy_phys((addr64_t)i386_ptob(src),
-                  (addr64_t)i386_ptob(dst),
-                  PAGE_SIZE);
+  bcopy_phys((addr64_t)i386_ptob(src),
+            (addr64_t)i386_ptob(dst),
+            PAGE_SIZE);
 }
 
 
@@ -3029,35 +3528,36 @@ pmap_pageable(
  */
 void
 phys_attribute_clear(
-       ppnum_t pn,
+       ppnum_t         pn,
        int             bits)
 {
-       pv_entry_t              pv_h;
-       register pv_entry_t     pv_e;
+       pv_rooted_entry_t               pv_h;
+       register pv_hashed_entry_t      pv_e;
        register pt_entry_t     *pte;
        int                     pai;
        register pmap_t         pmap;
-       spl_t                   spl;
-       pmap_paddr_t            phys;
 
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
-       if (!valid_page(pn)) {
+       if (pn == vm_page_guard_addr)
+               return;
+
+       pai = ppn_to_pai(pn);
+
+       if (!managed_page(pai)) {
            /*
             *  Not a managed page.
             */
            return;
        }
 
-       /*
-        *      Lock the pmap system first, since we will be changing
-        *      several pmaps.
-        */
+       PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_START,
+                  (int) pn, bits, 0, 0, 0);
 
-       PMAP_WRITE_LOCK(spl);
-       phys = i386_ptob(pn);
-       pai = pa_index(phys);
        pv_h = pai_to_pvh(pai);
 
+       LOCK_PVH(pai);
+
        /*
         * Walk down PV list, clearing all modify or reference bits.
         * We do not have to lock the pv_list because we have
@@ -3067,72 +3567,74 @@ phys_attribute_clear(
            /*
             * There are some mappings.
             */
-           for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
 
+         pv_e = (pv_hashed_entry_t)pv_h;
+
+         do {
                pmap = pv_e->pmap;
-               /*
-                * Lock the pmap to block pmap_extract and similar routines.
-                */
-               simple_lock(&pmap->lock);
 
                {
-                   register vm_map_offset_t va;
+                   vm_map_offset_t va;
 
                    va = pv_e->va;
-                   pte = pmap_pte(pmap, va);
+                   /*
+                    * first make sure any processor actively
+                    * using this pmap, flushes its TLB state
+                    */
+
+                   PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
 
-#if    0
                    /*
-                    * Consistency checks.
+                    * Clear modify and/or reference bits.
                     */
-                   assert(*pte & INTEL_PTE_VALID);
-                   /* assert(pte_to_phys(*pte) == phys); */
-#endif
 
-               /*
-                * Clear modify or reference bits.
-                */
+                   pte = pmap_pte(pmap, va);
+                   pmap_update_pte(pte, *pte, (*pte & ~bits));
 
-                       pmap_store_pte(pte, *pte & ~bits);
-                       pte++;
-                       PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
                }
-               simple_unlock(&pmap->lock);
 
-           }
-       }
+               pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
 
+         } while (pv_e != (pv_hashed_entry_t)pv_h);
+       }
        pmap_phys_attributes[pai] &= ~bits;
 
-       PMAP_WRITE_UNLOCK(spl);
+       UNLOCK_PVH(pai);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
+
 }
 
 /*
  *     Check specified attribute bits.
  */
-boolean_t
+int
 phys_attribute_test(
-       ppnum_t pn,
+       ppnum_t         pn,
        int             bits)
 {
-       pv_entry_t              pv_h;
-       register pv_entry_t     pv_e;
+       pv_rooted_entry_t               pv_h;
+       register pv_hashed_entry_t      pv_e;
        register pt_entry_t     *pte;
        int                     pai;
        register pmap_t         pmap;
-       spl_t                   spl;
-       pmap_paddr_t            phys;
+       int                     attributes = 0;
 
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
-       if (!valid_page(pn)) {
+       if (pn == vm_page_guard_addr)
+               return 0;
+
+       pai = ppn_to_pai(pn);
+
+       if (!managed_page(pai)) {
            /*
             *  Not a managed page.
             */
-           return (FALSE);
+           return (0);
        }
 
-       phys = i386_ptob(pn);
-       pai = pa_index(phys);
        /*
         * super fast check...  if bits already collected
         * no need to take any locks...
@@ -3140,23 +3642,18 @@ phys_attribute_test(
         * the lock in case they got pulled in while
         * we were waiting for the lock
         */
-       if (pmap_phys_attributes[pai] & bits)
-           return (TRUE);
+       if ( (pmap_phys_attributes[pai] & bits) == bits)
+           return (bits);
+
        pv_h = pai_to_pvh(pai);
 
-       /*
-        *      Lock the pmap system first, since we will be checking
-        *      several pmaps.
-        */
-       PMAP_WRITE_LOCK(spl);
+       LOCK_PVH(pai);
 
-       if (pmap_phys_attributes[pai] & bits) {
-           PMAP_WRITE_UNLOCK(spl);
-           return (TRUE);
-       }
+       attributes = pmap_phys_attributes[pai] & bits;
 
        /*
-        * Walk down PV list, checking all mappings.
+        * Walk down PV list, checking the mappings until we
+        * reach the end or we've found the attributes we've asked for
         * We do not have to lock the pv_list because we have
         * the entire pmap system locked.
         */
@@ -3164,44 +3661,37 @@ phys_attribute_test(
            /*
             * There are some mappings.
             */
-           for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+         pv_e = (pv_hashed_entry_t)pv_h;
+         if (attributes != bits) do {
 
-               pmap = pv_e->pmap;
-               /*
-                * Lock the pmap to block pmap_extract and similar routines.
-                */
-               simple_lock(&pmap->lock);
+               pmap = pv_e->pmap;
 
                {
-                   register vm_map_offset_t va;
+                   vm_map_offset_t va;
 
                    va = pv_e->va;
-                   pte = pmap_pte(pmap, va);
+                   /*
+                    * first make sure any processor actively
+                    * using this pmap, flushes its TLB state
+                    */
+                   PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
 
-#if    0
                    /*
-                    * Consistency checks.
+                    * pick up modify and/or reference bits from this mapping
                     */
-                   assert(*pte & INTEL_PTE_VALID);
-                   /* assert(pte_to_phys(*pte) == phys); */
-#endif
-               }
 
-               /*
-                * Check modify or reference bits.
-                */
-               {
-                       if (*pte++ & bits) {
-                           simple_unlock(&pmap->lock);
-                           PMAP_WRITE_UNLOCK(spl);
-                           return (TRUE);
-                       }
+                   pte = pmap_pte(pmap, va);
+                   attributes |= *pte & bits;
+
                }
-               simple_unlock(&pmap->lock);
-           }
+
+               pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
+
+           } while ((attributes != bits) && (pv_e != (pv_hashed_entry_t)pv_h));
        }
-       PMAP_WRITE_UNLOCK(spl);
-       return (FALSE);
+
+       UNLOCK_PVH(pai);
+       return (attributes);
 }
 
 /*
@@ -3209,29 +3699,30 @@ phys_attribute_test(
  */
 void
 phys_attribute_set(
-       ppnum_t pn,
+       ppnum_t         pn,
        int             bits)
 {
-       int                     spl;
-       pmap_paddr_t   phys;
+       int             pai;
 
+       pmap_intr_assert();
        assert(pn != vm_page_fictitious_addr);
-       if (!valid_page(pn)) {
+       if (pn == vm_page_guard_addr)
+               return;
+
+       pai = ppn_to_pai(pn);
+
+       if (!managed_page(pai)) {
            /*
             *  Not a managed page.
             */
            return;
        }
 
-       /*
-        *      Lock the pmap system and set the requested bits in
-        *      the phys attributes array.  Don't need to bother with
-        *      ptes because the test routine looks here first.
-        */
-       phys = i386_ptob(pn);
-       PMAP_WRITE_LOCK(spl);
-       pmap_phys_attributes[pa_index(phys)] |= bits;
-       PMAP_WRITE_UNLOCK(spl);
+       LOCK_PVH(pai);
+
+       pmap_phys_attributes[pai] |= bits;
+
+       UNLOCK_PVH(pai);
 }
 
 /*
@@ -3266,7 +3757,10 @@ boolean_t
 pmap_is_modified(
                 ppnum_t pn)
 {
-       return (phys_attribute_test(pn, PHYS_MODIFIED));
+        if (phys_attribute_test(pn, PHYS_MODIFIED))
+               return TRUE;
+
+       return FALSE;
 }
 
 /*
@@ -3299,7 +3793,10 @@ boolean_t
 pmap_is_referenced(
                   ppnum_t pn)
 {
-       return (phys_attribute_test(pn, PHYS_REFERENCED));
+        if (phys_attribute_test(pn, PHYS_REFERENCED))
+               return TRUE;
+
+       return FALSE;
 }
 
 /*
@@ -3310,8 +3807,17 @@ pmap_is_referenced(
 unsigned int
 pmap_get_refmod(ppnum_t pa)
 {
-       return (   ((phys_attribute_test(pa,   PHYS_MODIFIED))?   VM_MEM_MODIFIED : 0)
-                        | ((phys_attribute_test(pa, PHYS_REFERENCED))? VM_MEM_REFERENCED : 0));
+        int    refmod;
+       unsigned int retval = 0;
+
+       refmod = phys_attribute_test(pa, PHYS_MODIFIED | PHYS_REFERENCED);
+
+       if (refmod & PHYS_MODIFIED)
+               retval |= VM_MEM_MODIFIED;
+       if (refmod & PHYS_REFERENCED)
+               retval |= VM_MEM_REFERENCED;
+
+       return (retval);
 }
 
 /*
@@ -3329,63 +3835,6 @@ pmap_clear_refmod(ppnum_t pa, unsigned int mask)
        phys_attribute_clear(pa, x86Mask);
 }
 
-/*
- *     Set the modify bit on the specified range
- *     of this map as requested.
- *
- *     This optimization stands only if each time the dirty bit
- *     in vm_page_t is tested, it is also tested in the pmap.
- */
-void
-pmap_modify_pages(
-       pmap_t          map,
-       vm_map_offset_t sva,
-       vm_map_offset_t eva)
-{
-       spl_t                   spl;
-       register pt_entry_t     *pde;
-       register pt_entry_t     *spte, *epte;
-       vm_map_offset_t         lva;
-       vm_map_offset_t         orig_sva;
-
-       if (map == PMAP_NULL)
-               return;
-
-       PMAP_READ_LOCK(map, spl);
-
-       orig_sva = sva;
-       while (sva && sva < eva) {
-           lva = (sva + pde_mapped_size) & ~(pde_mapped_size-1);
-           if (lva > eva)
-               lva = eva;
-           pde = pmap_pde(map, sva);
-           if (pde && (*pde & INTEL_PTE_VALID)) {
-             spte = (pt_entry_t *)pmap_pte(map, (sva & ~(pde_mapped_size-1)));
-               if (lva) {
-                  spte = &spte[ptenum(sva)];
-                  epte = &spte[intel_btop(lva-sva)];
-               } else {
-                  epte = &spte[intel_btop(pde_mapped_size)];
-                  spte = &spte[ptenum(sva)];
-               }
-               while (spte < epte) {
-                   if (*spte & INTEL_PTE_VALID) {
-                       pmap_store_pte(spte, *spte
-                                               | INTEL_PTE_MOD
-                                               | INTEL_PTE_WRITE);
-                   }
-                   spte++;
-               }
-           }
-           sva = lva;
-           pde++;
-       }
-       PMAP_UPDATE_TLBS(map, orig_sva, eva);
-
-       PMAP_READ_UNLOCK(map, spl);
-}
-
-
 void 
 invalidate_icache(__unused vm_offset_t addr,
                  __unused unsigned     cnt,
@@ -3401,12 +3850,38 @@ flush_dcache(__unused vm_offset_t       addr,
        return;
 }
 
+#if CONFIG_DTRACE
+/*
+ * Constrain DTrace copyin/copyout actions
+ */
+extern kern_return_t dtrace_copyio_preflight(addr64_t);
+extern kern_return_t dtrace_copyio_postflight(addr64_t);
+
+kern_return_t dtrace_copyio_preflight(__unused addr64_t va)
+{
+       thread_t thread = current_thread();
+
+       if (current_map() == kernel_map)
+               return KERN_FAILURE;
+       else if (thread->machine.specFlags & CopyIOActive)
+               return KERN_FAILURE;
+       else
+               return KERN_SUCCESS;
+}
+kern_return_t dtrace_copyio_postflight(__unused addr64_t va)
+{
+       return KERN_SUCCESS;
+}
+#endif /* CONFIG_DTRACE */
+
 #if    MACH_KDB
 
 /* show phys page mappings and attributes */
 
 extern void    db_show_page(pmap_paddr_t pa);
 
+#if 0
 void
 db_show_page(pmap_paddr_t pa)
 {
@@ -3418,7 +3893,7 @@ db_show_page(pmap_paddr_t pa)
        pv_h = pai_to_pvh(pai);
 
        attr = pmap_phys_attributes[pai];
-       printf("phys page %x ", pa);
+       printf("phys page %llx ", pa);
        if (attr & PHYS_MODIFIED)
                printf("modified, ");
        if (attr & PHYS_REFERENCED)
@@ -3429,12 +3904,14 @@ db_show_page(pmap_paddr_t pa)
                printf(" not mapped\n");
        for (; pv_h; pv_h = pv_h->next)
                if (pv_h->pmap)
-                       printf("%x in pmap %x\n", pv_h->va, pv_h->pmap);
+                       printf("%llx in pmap %p\n", pv_h->va, pv_h->pmap);
 }
+#endif
 
 #endif /* MACH_KDB */
 
 #if    MACH_KDB
+#if 0
 void db_kvtophys(vm_offset_t);
 void db_show_vaddrs(pt_entry_t  *);
 
@@ -3476,7 +3953,7 @@ db_show_vaddrs(
                        continue;
                }
                pdecnt++;
-               ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK);
+               ptep = (pt_entry_t *) ((unsigned long)(*pdep) & ~INTEL_OFFMASK);
                db_printf("dir[%4d]: 0x%x\n", y, *pdep);
                for (x = 0; x < NPTEPG; x++, ptep++) {
                        if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) {
@@ -3494,6 +3971,7 @@ db_show_vaddrs(
        db_printf("total: %d tables, %d page table entries.\n", pdecnt, ptecnt);
 
 }
+#endif
 #endif /* MACH_KDB */
 
 #include <mach_vm_debug.h>
@@ -3533,37 +4011,67 @@ boolean_t
 phys_page_exists(
                 ppnum_t pn)
 {
-       pmap_paddr_t     phys;
-
        assert(pn != vm_page_fictitious_addr);
 
        if (!pmap_initialized)
                return (TRUE);
-       phys = (pmap_paddr_t) i386_ptob(pn);
-       if (!pmap_valid_page(pn))
+
+       if (pn == vm_page_guard_addr)
+               return FALSE;
+
+       if (!managed_page(ppn_to_pai(pn)))
                return (FALSE);
 
        return TRUE;
 }
 
 void
-mapping_free_prime()
+mapping_free_prime(void)
 {
        int             i;
-       pv_entry_t      pv_e;
+       pv_hashed_entry_t      pvh_e;
+       pv_hashed_entry_t      pvh_eh;
+       pv_hashed_entry_t      pvh_et;
+       int             pv_cnt;
+
+       pv_cnt = 0;
+       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
+       for (i = 0; i < (5 * PV_HASHED_ALLOC_CHUNK); i++) {
+               pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+
+               pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+               pvh_eh = pvh_e;
+
+               if (pvh_et == PV_HASHED_ENTRY_NULL)
+                       pvh_et = pvh_e;
+               pv_cnt++;
+       }
+       PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
 
-       for (i = 0; i < (5 * PV_ALLOC_CHUNK); i++) {
-               pv_e = (pv_entry_t) zalloc(pv_list_zone);
-               PV_FREE(pv_e);
+       pv_cnt = 0;
+       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
+       for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
+               pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+
+               pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+               pvh_eh = pvh_e;
+
+               if (pvh_et == PV_HASHED_ENTRY_NULL)
+                       pvh_et = pvh_e;
+               pv_cnt++;
        }
+       PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
+
 }
 
 void
-mapping_adjust()
+mapping_adjust(void)
 {
-       pv_entry_t      pv_e;
+       pv_hashed_entry_t      pvh_e;
+       pv_hashed_entry_t      pvh_eh;
+       pv_hashed_entry_t      pvh_et;
+       int             pv_cnt;
        int             i;
-       int             spl;
 
        if (mapping_adjust_call == NULL) {
                thread_call_setup(&mapping_adjust_call_data,
@@ -3571,14 +4079,37 @@ mapping_adjust()
                                  (thread_call_param_t) NULL);
                mapping_adjust_call = &mapping_adjust_call_data;
        }
-       /* XXX  rethink best way to do locking here */
-       if (pv_free_count < PV_LOW_WATER_MARK) {
-               for (i = 0; i < PV_ALLOC_CHUNK; i++) {
-                       pv_e = (pv_entry_t) zalloc(pv_list_zone);
-                       SPLVM(spl);
-                       PV_FREE(pv_e);
-                       SPLX(spl);
+
+       pv_cnt = 0;
+       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
+       if (pv_hashed_kern_free_count < PV_HASHED_KERN_LOW_WATER_MARK) {
+               for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
+                       pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+
+                       pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                       pvh_eh = pvh_e;
+
+                       if (pvh_et == PV_HASHED_ENTRY_NULL)
+                               pvh_et = pvh_e;
+                       pv_cnt++;
+               }
+               PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
+       }
+
+       pv_cnt = 0;
+       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
+       if (pv_hashed_free_count < PV_HASHED_LOW_WATER_MARK) {
+               for (i = 0; i < PV_HASHED_ALLOC_CHUNK; i++) {
+                       pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
+
+                       pvh_e->qlink.next = (queue_entry_t)pvh_eh;
+                       pvh_eh = pvh_e;
+
+                       if (pvh_et == PV_HASHED_ENTRY_NULL)
+                               pvh_et = pvh_e;
+                       pv_cnt++;
                }
+               PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
        }
        mappingrecurse = 0;
 }
@@ -3586,84 +4117,49 @@ mapping_adjust()
 void
 pmap_commpage32_init(vm_offset_t kernel_commpage, vm_offset_t user_commpage, int cnt)
 {
-  int i;
-  pt_entry_t *opte, *npte;
-  pt_entry_t pte;
-
-
-  for (i = 0; i < cnt; i++) {
-    opte = pmap_pte(kernel_pmap, (vm_map_offset_t)kernel_commpage);
-    if (0 == opte) panic("kernel_commpage");
-    pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
-    pte &= ~INTEL_PTE_WRITE; // ensure read only
-    npte = pmap_pte(kernel_pmap, (vm_map_offset_t)user_commpage);
-    if (0 == npte) panic("user_commpage");
-    pmap_store_pte(npte, pte);
-    kernel_commpage += INTEL_PGBYTES;
-    user_commpage += INTEL_PGBYTES;
-  }
+       int i;
+       pt_entry_t *opte, *npte;
+       pt_entry_t pte;
+       spl_t s;
+
+       for (i = 0; i < cnt; i++) {
+               s = splhigh();
+               opte = pmap_pte(kernel_pmap, (vm_map_offset_t)kernel_commpage);
+               if (0 == opte)
+                       panic("kernel_commpage");
+               pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
+               pte &= ~INTEL_PTE_WRITE; // ensure read only
+               npte = pmap_pte(kernel_pmap, (vm_map_offset_t)user_commpage);
+               if (0 == npte)
+                       panic("user_commpage");
+               pmap_store_pte(npte, pte);
+               splx(s);
+               kernel_commpage += INTEL_PGBYTES;
+               user_commpage += INTEL_PGBYTES;
+       }
 }
 
+
 #define PMAP_COMMPAGE64_CNT  (_COMM_PAGE64_AREA_USED/PAGE_SIZE)
 pt_entry_t pmap_commpage64_ptes[PMAP_COMMPAGE64_CNT];
 
 void
 pmap_commpage64_init(vm_offset_t kernel_commpage, __unused vm_map_offset_t user_commpage, int cnt)
 {
-  spl_t s;
-  int i;
-  pt_entry_t *kptep;
-
-  s = splhigh();
-  for (i = 0; i< cnt; i++) {
-    kptep = pmap_pte(kernel_pmap, (uint64_t)kernel_commpage + (i*PAGE_SIZE));
-    if ((0 == kptep) || (0 == (*kptep & INTEL_PTE_VALID))) panic("pmap_commpage64_init pte");
-    pmap_commpage64_ptes[i] = ((*kptep & ~INTEL_PTE_WRITE) | INTEL_PTE_USER);
-  }
-  splx(s);
-
-}
-
-void
-pmap_map_sharedpage(__unused task_t task, pmap_t p)
-{
-  pt_entry_t *ptep;
-  spl_t s;
-  int i;
-
-  if (!p->pm_64bit) return;
-  /* setup high 64 bit commpage */
-  s = splhigh();  
-  while ((ptep = pmap_pte(p, (uint64_t)_COMM_PAGE64_BASE_ADDRESS)) == PD_ENTRY_NULL) {
-    splx(s);
-    pmap_expand(p, (uint64_t)_COMM_PAGE64_BASE_ADDRESS);
-    s = splhigh();
-  }
+    int i;
+    pt_entry_t *kptep;
 
-  for (i = 0; i< PMAP_COMMPAGE64_CNT; i++) {
-    ptep = pmap_pte(p, (uint64_t)_COMM_PAGE64_BASE_ADDRESS + (i*PAGE_SIZE));
-    if (0 == ptep) panic("pmap_map_sharedpage");
-    pmap_store_pte(ptep, pmap_commpage64_ptes[i]);
-  }
-  splx(s);
+    PMAP_LOCK(kernel_pmap);
 
+    for (i = 0; i < cnt; i++) {
+        kptep = pmap_pte(kernel_pmap, (uint64_t)kernel_commpage + (i*PAGE_SIZE));
+       if ((0 == kptep) || (0 == (*kptep & INTEL_PTE_VALID)))
+           panic("pmap_commpage64_init pte");
+       pmap_commpage64_ptes[i] = ((*kptep & ~INTEL_PTE_WRITE) | INTEL_PTE_USER);
+    }
+    PMAP_UNLOCK(kernel_pmap);
 }
 
-void
-pmap_unmap_sharedpage(pmap_t pmap)
-{
-  spl_t s;
-  pt_entry_t *ptep;
-  int i;
-
-  if (!pmap->pm_64bit) return;
-  s = splhigh();
-  for (i = 0; i< PMAP_COMMPAGE64_CNT; i++) {
-    ptep = pmap_pte(pmap, (uint64_t)_COMM_PAGE64_BASE_ADDRESS + (i*PAGE_SIZE));
-  if (ptep) pmap_store_pte(ptep, 0);
-  }
-  splx(s);
-}
 
 static cpu_pmap_t              cpu_pmap_master;
 
@@ -3706,11 +4202,14 @@ pmap_cpu_alloc(boolean_t is_boot_cpu)
                address = (vm_offset_t)mapaddr;
 
                for (i = 0; i < PMAP_NWINDOWS; i++, address += PAGE_SIZE) {
+                 spl_t s;
+                       s = splhigh();
                        while ((pte = pmap_pte(kernel_pmap, (vm_map_offset_t)address)) == 0)
                                pmap_expand(kernel_pmap, (vm_map_offset_t)address);
                        * (int *) pte = 0; 
                        cp->mapwindow[i].prv_CADDR = (caddr_t) address;
                        cp->mapwindow[i].prv_CMAP = pte;
+                       splx(s);
                }
                vm_map_unlock(kernel_map);
        }
@@ -3736,13 +4235,8 @@ pmap_get_mapwindow(pt_entry_t pentry)
 {
     mapwindow_t *mp;
     int i;
-    boolean_t  istate;
 
-    /*
-     * can be called from hardware interrupt context
-     * so we need to protect the lookup process
-     */
-    istate = ml_set_interrupts_enabled(FALSE);
+    assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
 
     /*
      * Note: 0th map reserved for pmap_pte()
@@ -3751,18 +4245,33 @@ pmap_get_mapwindow(pt_entry_t pentry)
             mp = &current_cpu_datap()->cpu_pmap->mapwindow[i];
 
            if (*mp->prv_CMAP == 0) {
-                   *mp->prv_CMAP = pentry;
-                   break;
+                   pmap_store_pte(mp->prv_CMAP, pentry);
+
+                   invlpg((uintptr_t)mp->prv_CADDR);
+
+                   return (mp);
            }
     }
-    if (i >= PMAP_NWINDOWS)
-            mp = NULL;
-    (void) ml_set_interrupts_enabled(istate);
-    
-    return (mp);
+    panic("pmap_get_mapwindow: no windows available");
+
+    return NULL;
+}
+
+
+void
+pmap_put_mapwindow(mapwindow_t *mp)
+{
+    pmap_store_pte(mp->prv_CMAP, 0);
 }
 
 
+/*
+ * The Intel platform can nest at the PDE level, so NBPDE (i.e. 2MB) at a time,
+ * on a NBPDE boundary.
+ */
+uint64_t pmap_nesting_size_min = NBPDE;
+uint64_t pmap_nesting_size_max = 0 - (uint64_t)NBPDE; /* no limit, really... */
+
 /*
  *     kern_return_t pmap_nest(grand, subord, vstart, size)
  *
@@ -3786,85 +4295,76 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t n
                
         vm_map_offset_t        vaddr, nvaddr;
        pd_entry_t      *pde,*npde;
-       unsigned int    i, need_flush;
-       unsigned int    num_pde;
-       spl_t           s;
+       unsigned int    i;
+       uint64_t        num_pde;
 
        // do validity tests
-
-       if(size & 0x0FFFFFFFULL) return KERN_INVALID_VALUE;     /* We can only do this for multiples of 256MB */
+       if (size & (pmap_nesting_size_min-1)) return KERN_INVALID_VALUE;
+       if(vstart & (pmap_nesting_size_min-1)) return KERN_INVALID_VALUE;
+       if(nstart & (pmap_nesting_size_min-1)) return KERN_INVALID_VALUE;
        if((size >> 28) > 65536)  return KERN_INVALID_VALUE;    /* Max size we can nest is 16TB */
-       if(vstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE;   /* We can only do this aligned to 256MB */
-       if(nstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE;   /* We can only do this aligned to 256MB */
-       if(size == 0) {    
+       if(size == 0) {
                panic("pmap_nest: size is invalid - %016llX\n", size);
        }
-       if ((size >> 28) != 1) panic("pmap_nest: size 0x%llx must be 0x%x", size, NBPDE);
-
-       subord->pm_shared = TRUE;
 
-       // prepopulate subord pmap pde's if necessary
+       PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START,
+                  (int) grand, (int) subord,
+                  (int) (vstart>>32), (int) vstart, 0);
 
-       if (cpu_64bit) {
-         s = splhigh();
-         while (PD_ENTRY_NULL == (npde = pmap_pde(subord, nstart))) {
-           splx(s);
-           pmap_expand(subord, nstart);
-           s = splhigh();
-         }
-         splx(s);
-       }
-
-       PMAP_READ_LOCK(subord,s);
+       subord->pm_shared = TRUE;
        nvaddr = (vm_map_offset_t)nstart;
-       need_flush = 0;
        num_pde = size >> PDESHIFT;
 
-       for (i=0;i<num_pde;i++) {
+       PMAP_LOCK(subord);
+       for (i = 0; i < num_pde; i++) {
          npde = pmap_pde(subord, nvaddr);
-         if ((0 == npde) || (*npde++ & INTEL_PTE_VALID) == 0) {
-           PMAP_READ_UNLOCK(subord,s);
+         while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
+           PMAP_UNLOCK(subord);
            pmap_expand(subord, nvaddr); // pmap_expand handles races
-           PMAP_READ_LOCK(subord,s);
-           need_flush++;
+           PMAP_LOCK(subord);
+           npde = pmap_pde(subord, nvaddr);
          }
          nvaddr += NBPDE;
        }
 
-       if (need_flush) {
-         nvaddr = (vm_map_offset_t)nstart;
-         PMAP_UPDATE_TLBS(subord, nvaddr, nvaddr + (1 << 28) -1 );
-       }
-       PMAP_READ_UNLOCK(subord,s);
+       PMAP_UNLOCK(subord);
 
-       // copy pde's from subord pmap into grand pmap
+       vaddr = (vm_map_offset_t)vstart;
 
-       if (cpu_64bit) {
-         s = splhigh();
-         while (PD_ENTRY_NULL == (pde = pmap_pde(grand, vstart))) {
-           splx(s);
-           pmap_expand(grand, vstart);
-           s = splhigh();
-         }
-         splx(s);
-       }
+       PMAP_LOCK(grand);
 
-       PMAP_READ_LOCK(grand,s);
-       vaddr = (vm_map_offset_t)vstart;
-       for (i=0;i<num_pde;i++,pde++) {
+       for (i = 0;i < num_pde; i++) {
          pd_entry_t tpde;
+
          npde = pmap_pde(subord, nstart);
-         if (npde == 0) panic("pmap_nest: no npde, subord 0x%x nstart 0x%llx", subord, nstart);
+         if (npde == 0)
+                 panic("pmap_nest: no npde, subord %p nstart 0x%llx", subord, nstart);
          tpde = *npde;
          nstart += NBPDE;
          pde = pmap_pde(grand, vaddr);
-         if (pde == 0) panic("pmap_nest: no pde, grand  0x%x vaddr 0x%llx", grand, vaddr);
+/* Legacy mode does not require expansion.
+ * DRK: consider a debug mode test to verify that no PTEs are extant within
+ * this range.
+ */
+         if ((0 == pde) && cpu_64bit) {
+           PMAP_UNLOCK(grand);
+           pmap_expand_pdpt(grand, vaddr);
+           PMAP_LOCK(grand);
+           pde = pmap_pde(grand, vaddr);
+         }
+
+         if (pde == 0)
+                 panic("pmap_nest: no pde, grand  %p vaddr 0x%llx", grand, vaddr);
          vaddr += NBPDE;
          pmap_store_pte(pde, tpde);
        }
-       PMAP_UPDATE_TLBS(grand, vaddr, vaddr + (1 << 28) -1 );
 
-       PMAP_READ_UNLOCK(grand,s);
+       /* XXX FBDP: why do we need to flush here ? */
+       PMAP_UPDATE_TLBS(grand, vstart, vstart + size - 1);
+
+       PMAP_UNLOCK(grand);
+
+       PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, 0, 0, 0, 0, 0);
 
        return KERN_SUCCESS;
 }
@@ -3880,30 +4380,48 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t n
  *     segment sized ranges.
  */
 
-kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr) {
+kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) {
                        
-       spl_t s;
        pd_entry_t *pde;
        unsigned int i;
        unsigned int num_pde;
+       addr64_t vstart, vend;
+
+       PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START,
+                  (int) grand, 
+                  (int) (vaddr>>32), (int) vaddr, 0, 0);
 
-       PMAP_READ_LOCK(grand,s);
+       if ((size & (pmap_nesting_size_min-1)) ||
+           (vaddr & (pmap_nesting_size_min-1))) {
+               panic("pmap_unnest(%p,0x%llx,0x%llx): unaligned...\n",
+                     grand, vaddr, size);
+       }
+
+       /* align everything to PDE boundaries */
+       vstart = vaddr & ~(NBPDE-1);
+       vend = (vaddr + size + NBPDE - 1) & ~(NBPDE-1);
+       size = vend - vstart;
+
+       PMAP_LOCK(grand);
 
        // invalidate all pdes for segment at vaddr in pmap grand
 
-       num_pde = (1<<28) >> PDESHIFT;
+       num_pde = size >> PDESHIFT;
 
+       vaddr = vstart;
        for (i=0;i<num_pde;i++,pde++) {
          pde = pmap_pde(grand, (vm_map_offset_t)vaddr);
-         if (pde == 0) panic("pmap_unnest: no pde, grand 0x%x vaddr 0x%llx\n", grand, vaddr);
+         if (pde == 0) panic("pmap_unnest: no pde, grand %p vaddr 0x%llx\n", grand, vaddr);
          pmap_store_pte(pde, (pd_entry_t)0);
          vaddr += NBPDE;
        }
-       PMAP_UPDATE_TLBS(grand, vaddr, vaddr + (1<<28) -1 );
+       PMAP_UPDATE_TLBS(grand, vstart, vend);
 
-       PMAP_READ_UNLOCK(grand,s);
+       PMAP_UNLOCK(grand);
                
-       return KERN_SUCCESS;                                                            /* Bye, bye, butterfly... */
+       PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+       return KERN_SUCCESS;
 }
 
 void
@@ -3963,7 +4481,7 @@ vm_offset_t pmap_high_map(pt_entry_t pte, enum high_cpu_types e)
 
   a = e + HIGH_CPU_END * cpu_number();
   vaddr = (vm_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a);
-  *(pte_unique_base + a) = pte;
+  pmap_store_pte(pte_unique_base + a, pte);
 
   /* TLB flush for this page for this  cpu */
   invlpg((uintptr_t)vaddr);
@@ -3992,7 +4510,8 @@ pmap_flush_tlbs(pmap_t    pmap)
        boolean_t       flush_self = FALSE;
        uint64_t        deadline;
 
-       assert(!ml_get_interrupts_enabled());
+       assert((processor_avail_count < 2) ||
+              (ml_get_interrupts_enabled() && get_preemption_level() != 0));
 
        /*
         * Scan other cpus for matching active or task CR3.
@@ -4004,8 +4523,8 @@ pmap_flush_tlbs(pmap_t    pmap)
        for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
                if (!cpu_datap(cpu)->cpu_running)
                        continue;
-               if ((cpu_datap(cpu)->cpu_task_cr3   == pmap_cr3) ||
-                   (CPU_GET_ACTIVE_CR3(cpu) == pmap_cr3) ||
+               if ((cpu_datap(cpu)->cpu_task_cr3 == pmap_cr3) ||
+                   (CPU_GET_ACTIVE_CR3(cpu)      == pmap_cr3) ||
                    (pmap->pm_shared) ||
                    ((pmap == kernel_pmap) &&
                     (!CPU_CR3_IS_ACTIVE(cpu) ||
@@ -4024,9 +4543,10 @@ pmap_flush_tlbs(pmap_t   pmap)
                }
        }
 
-       if (cpus_to_signal) {
-               KERNEL_DEBUG(0xef800024 | DBG_FUNC_START, cpus_to_signal, 0, 0, 0, 0);
+       PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_START,
+                  (int) pmap, cpus_to_signal, flush_self, 0, 0);
 
+       if (cpus_to_signal) {
                deadline = mach_absolute_time() + LockTimeOut;
                /*
                 * Wait for those other cpus to acknowledge
@@ -4039,16 +4559,17 @@ pmap_flush_tlbs(pmap_t  pmap)
                                        cpus_to_signal &= ~cpu_bit;
                                        break;
                                }
-                               if (mach_absolute_time() > deadline)
-                                       panic("pmap_flush_tlbs() "
-                                             "timeout pmap=%p cpus_to_signal=%p",
-                                             pmap, cpus_to_signal);
+                               if (mach_absolute_time() > deadline) {
+                                       force_immediate_debugger_NMI = TRUE;
+                                       panic("pmap_flush_tlbs() timeout: "
+                                                               "cpu %d failing to respond to interrupts, pmap=%p cpus_to_signal=%lx",
+                                                               cpu, pmap, cpus_to_signal);
+                               }
                                cpu_pause();
                        }
                        if (cpus_to_signal == 0)
                                break;
                }
-               KERNEL_DEBUG(0xef800024 | DBG_FUNC_END, cpus_to_signal, 0, 0, 0, 0);
        }
 
        /*
@@ -4060,11 +4581,16 @@ pmap_flush_tlbs(pmap_t  pmap)
        if (flush_self)
                flush_tlb();
 
+
+       PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_END,
+                  (int) pmap, cpus_to_signal, flush_self, 0, 0);
 }
 
 void
 process_pmap_updates(void)
 {
+       assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
        flush_tlb();
 
        current_cpu_datap()->cpu_tlb_invalid = FALSE;
@@ -4074,19 +4600,19 @@ process_pmap_updates(void)
 void
 pmap_update_interrupt(void)
 {
-        KERNEL_DEBUG(0xef800028 | DBG_FUNC_START, 0, 0, 0, 0, 0);
-
-       assert(!ml_get_interrupts_enabled());
+        PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START,
+                  0, 0, 0, 0, 0);
 
        process_pmap_updates();
 
-        KERNEL_DEBUG(0xef800028 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+        PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END,
+                  0, 0, 0, 0, 0);
 }
 
 
 unsigned int pmap_cache_attributes(ppnum_t pn) {
 
-       if (!pmap_valid_page(pn))
+       if (!managed_page(ppn_to_pai(pn)))
                return (VM_WIMG_IO);
 
        return (VM_WIMG_COPYBACK);