]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/x86_64/pmap.c
xnu-3248.30.4.tar.gz
[apple/xnu.git] / osfmk / x86_64 / pmap.c
index 13c439a96e85d25404bc5f27619658b2a879a15e..45be582574e381f16b027ad6b772d5462cbb51d8 100644 (file)
@@ -1,6 +1,5 @@
-
 /*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -90,8 +89,6 @@
  */
 
 #include <string.h>
-#include <norma_vm.h>
-#include <mach_kdb.h>
 #include <mach_ldebug.h>
 
 #include <libkern/OSAtomic.h>
 #include <kern/thread.h>
 #include <kern/zalloc.h>
 #include <kern/queue.h>
+#include <kern/ledger.h>
+#include <kern/mach_param.h>
 
-#include <kern/lock.h>
 #include <kern/kalloc.h>
 #include <kern/spl.h>
 
 
 #include <kern/misc_protos.h>                  /* prototyping */
 #include <i386/misc_protos.h>
+#include <i386/i386_lowmem.h>
 #include <x86_64/lowglobals.h>
 
 #include <i386/cpuid.h>
 #include <i386/proc_reg.h>
 #include <i386/tsc.h>
 #include <i386/pmap_internal.h>
-
-#if    MACH_KDB
-#include <ddb/db_command.h>
-#include <ddb/db_output.h>
-#include <ddb/db_sym.h>
-#include <ddb/db_print.h>
-#endif /* MACH_KDB */
+#include <i386/pmap_pcid.h>
+#if CONFIG_VMX
+#include <i386/vmx/vmx_cpu.h>
+#endif
 
 #include <vm/vm_protos.h>
 
 #include <i386/mp.h>
 #include <i386/mp_desc.h>
+#include <libkern/kernel_mach_header.h>
 
+#include <pexpert/i386/efi.h>
 
-/* #define DEBUGINTERRUPTS 1  uncomment to ensure pmap callers have interrupts enabled */
-#ifdef DEBUGINTERRUPTS
-#define pmap_intr_assert() {                                                   \
-       if (processor_avail_count > 1 && !ml_get_interrupts_enabled())          \
-               panic("pmap interrupt assert %s, %d",__FILE__, __LINE__);       \
-}
-#else
-#define pmap_intr_assert()
-#endif
 
 #ifdef IWANTTODEBUG
 #undef DEBUG
 #include <i386/postcode.h>
 #endif /* IWANTTODEBUG */
 
-boolean_t pmap_trace = FALSE;
-
-#if PMAP_DBG
-#define DBG(x...)       kprintf("DBG: " x)
+#ifdef PMAP_DEBUG
+#define DBG(x...)      kprintf("DBG: " x)
 #else
 #define DBG(x...)
 #endif
-
-boolean_t      no_shared_cr3 = DEBUG;          /* TRUE for DEBUG by default */
-
-/*
- * Forward declarations for internal functions.
+/* Compile time assert to ensure adjacency/alignment of per-CPU data fields used
+ * in the trampolines for kernel/user boundary TLB coherency.
  */
+char pmap_cpu_data_assert[(((offsetof(cpu_data_t, cpu_tlb_invalid) - offsetof(cpu_data_t, cpu_active_cr3)) == 8) && (offsetof(cpu_data_t, cpu_active_cr3) % 64 == 0)) ? 1 : -1];
+boolean_t pmap_trace = FALSE;
 
-void           pmap_remove_range(
-                       pmap_t          pmap,
-                       vm_map_offset_t va,
-                       pt_entry_t      *spte,
-                       pt_entry_t      *epte);
-
-void           phys_attribute_clear(
-                       ppnum_t         phys,
-                       int             bits);
-
-int            phys_attribute_test(
-                       ppnum_t         phys,
-                       int             bits);
-
-void           phys_attribute_set(
-                       ppnum_t         phys,
-                       int             bits);
-
-void           pmap_set_reference(
-                       ppnum_t pn);
-
-boolean_t      phys_page_exists(
-                       ppnum_t pn);
-
+boolean_t      no_shared_cr3 = DEBUG;          /* TRUE for DEBUG by default */
 
 int nx_enabled = 1;                    /* enable no-execute protection */
 int allow_data_exec  = VM_ABI_32;      /* 32-bit apps may execute data by default, 64-bit apps may not */
@@ -209,165 +172,11 @@ int allow_stack_exec = 0;                /* No apps may execute from the stack by default */
 
 const boolean_t cpu_64bit  = TRUE; /* Mais oui! */
 
-/*
- * when spinning through pmap_remove
- * ensure that we don't spend too much
- * time with preemption disabled.
- * I'm setting the current threshold
- * to 20us
- */
-#define MAX_PREEMPTION_LATENCY_NS 20000
-
 uint64_t max_preemption_latency_tsc = 0;
 
-
-/*
- *     Private data structures.
- */
-
-/*
- *     For each vm_page_t, there is a list of all currently
- *     valid virtual mappings of that page.  An entry is
- *     a pv_rooted_entry_t; the list is the pv_table.
- *
- *      N.B.  with the new combo rooted/hashed scheme it is
- *      only possibly to remove individual non-rooted entries
- *      if they are found via the hashed chains as there is no
- *      way to unlink the singly linked hashed entries if navigated to
- *      via the queue list off the rooted entries.  Think of it as
- *      hash/walk/pull, keeping track of the prev pointer while walking
- *      the singly linked hash list.  All of this is to save memory and
- *      keep both types of pv_entries as small as possible.
- */
-
-/*
-
-PV HASHING Changes - JK 1/2007
-
-Pve's establish physical to virtual mappings.  These are used for aliasing of a 
-physical page to (potentially many) virtual addresses within pmaps. In the
-previous implementation the structure of the pv_entries (each 16 bytes in size) was
-
-typedef struct pv_entry {
-    struct pv_entry_t    next;
-    pmap_t                    pmap;
-    vm_map_offset_t   va;
-} *pv_entry_t;
-
-An initial array of these is created at boot time, one per physical page of
-memory, indexed by the physical page number. Additionally, a pool of entries
-is created from a pv_zone to be used as needed by pmap_enter() when it is
-creating new mappings.  Originally, we kept this pool around because the code
-in pmap_enter() was unable to block if it needed an entry and none were
-available - we'd panic.  Some time ago I restructured the pmap_enter() code
-so that for user pmaps it can block while zalloc'ing a pv structure and restart,
-removing a panic from the code (in the case of the kernel pmap we cannot block
-and still panic, so, we keep a separate hot pool for use only on kernel pmaps).
-The pool has not been removed since there is a large performance gain keeping
-freed pv's around for reuse and not suffering the overhead of zalloc for every
-new pv we need.
-
-As pmap_enter() created new mappings it linked the new pve's for them off the
-fixed pv array for that ppn (off the next pointer).  These pve's are accessed
-for several operations, one of them being address space teardown. In that case,
-we basically do this
-
-       for (every page/pte in the space) {
-               calc pve_ptr from the ppn in the pte
-               for (every pv in the list for the ppn) {
-                       if (this pv is for this pmap/vaddr) {
-                               do housekeeping
-                               unlink/free the pv
-                       }
-               }
-       }
-
-The problem arose when we were running, say 8000 (or even 2000) apache or
-other processes and one or all terminate. The list hanging off each pv array
-entry could have thousands of entries.  We were continuously linearly searching
-each of these lists as we stepped through the address space we were tearing
-down.  Because of the locks we hold, likely taking a cache miss for each node,
-and interrupt disabling for MP issues the system became completely unresponsive
-for many seconds while we did this.
-
-Realizing that pve's are accessed in two distinct ways (linearly running the
-list by ppn for operations like pmap_page_protect and finding and
-modifying/removing a single pve as part of pmap_enter processing) has led to
-modifying the pve structures and databases.
-
-There are now two types of pve structures.  A "rooted" structure which is
-basically the original structure accessed in an array by ppn, and a ''hashed''
-structure accessed on a hash list via a hash of [pmap, vaddr]. These have been
-designed with the two goals of minimizing wired memory and making the lookup of
-a ppn faster.  Since a vast majority of pages in the system are not aliased
-and hence represented by a single pv entry I've kept the rooted entry size as
-small as possible because there is one of these dedicated for every physical
-page of memory.  The hashed pve's are larger due to the addition of the hash
-link and the ppn entry needed for matching while running the hash list to find
-the entry we are looking for.  This way, only systems that have lots of
-aliasing (like 2000+ httpd procs) will pay the extra memory price. Both
-structures have the same first three fields allowing some simplification in
-the code.
-
-They have these shapes
-
-typedef struct pv_rooted_entry {
-       queue_head_t            qlink;
-        vm_map_offset_t                va;
-       pmap_t                  pmap;
-} *pv_rooted_entry_t;
-
-
-typedef struct pv_hashed_entry {
-       queue_head_t            qlink;
-       vm_map_offset_t         va;
-       pmap_t                  pmap;
-       ppnum_t                 ppn;
-       struct pv_hashed_entry *nexth;
-} *pv_hashed_entry_t;
-
-The main flow difference is that the code is now aware of the rooted entry and
-the hashed entries.  Code that runs the pv list still starts with the rooted
-entry and then continues down the qlink onto the hashed entries.  Code that is
-looking up a specific pv entry first checks the rooted entry and then hashes
-and runs the hash list for the match. The hash list lengths are much smaller
-than the original pv lists that contained all aliases for the specific ppn.
-
-*/
-
-typedef struct pv_rooted_entry {
-       /* first three entries must match pv_hashed_entry_t */
-        queue_head_t           qlink;
-       vm_map_offset_t         va;     /* virtual address for mapping */
-       pmap_t                  pmap;   /* pmap where mapping lies */
-} *pv_rooted_entry_t;
-
-#define PV_ROOTED_ENTRY_NULL   ((pv_rooted_entry_t) 0)
-
-pv_rooted_entry_t      pv_head_table;          /* array of entries, one per page */
-
-typedef struct pv_hashed_entry {
-       /* first three entries must match pv_rooted_entry_t */
-       queue_head_t            qlink;
-       vm_map_offset_t         va;
-       pmap_t                  pmap;
-       ppnum_t                 ppn;
-       struct pv_hashed_entry  *nexth;
-} *pv_hashed_entry_t;
-
-#define PV_HASHED_ENTRY_NULL ((pv_hashed_entry_t)0)
-
-#define NPVHASH 4095   /* MUST BE 2^N - 1 */
 pv_hashed_entry_t     *pv_hash_table;  /* hash lists */
 
-uint32_t npvhash = 0;
-
-//#define PV_DEBUG 1   /* uncomment to enable some PV debugging code */
-#ifdef PV_DEBUG
-#define CHK_NPVHASH() if(0 == npvhash) panic("npvhash uninitialized");
-#else
-#define CHK_NPVHASH(x)
-#endif
+uint32_t npvhashmask = 0, npvhashbuckets = 0;
 
 pv_hashed_entry_t      pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
 pv_hashed_entry_t      pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
@@ -375,72 +184,10 @@ decl_simple_lock_data(,pv_hashed_free_list_lock)
 decl_simple_lock_data(,pv_hashed_kern_free_list_lock)
 decl_simple_lock_data(,pv_hash_table_lock)
 
-int                    pv_hashed_free_count = 0;
-int                    pv_hashed_kern_free_count = 0;
-#define PV_HASHED_LOW_WATER_MARK 5000
-#define PV_HASHED_KERN_LOW_WATER_MARK 100
-#define PV_HASHED_ALLOC_CHUNK 2000
-#define PV_HASHED_KERN_ALLOC_CHUNK 50
-thread_call_t          mapping_adjust_call;
-static thread_call_data_t mapping_adjust_call_data;
-uint32_t               mappingrecurse = 0;
-
-#define        PV_HASHED_ALLOC(pvh_e) {                                        \
-       simple_lock(&pv_hashed_free_list_lock);                         \
-       if ((pvh_e = pv_hashed_free_list) != 0) {                       \
-         pv_hashed_free_list = (pv_hashed_entry_t)pvh_e->qlink.next;   \
-          pv_hashed_free_count--;                                      \
-          if (pv_hashed_free_count < PV_HASHED_LOW_WATER_MARK)         \
-            if (hw_compare_and_store(0,1,(u_int *)&mappingrecurse))    \
-              thread_call_enter(mapping_adjust_call);                  \
-       }                                                               \
-       simple_unlock(&pv_hashed_free_list_lock);                       \
-}
-
-#define        PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt) {                   \
-       simple_lock(&pv_hashed_free_list_lock);                         \
-       pvh_et->qlink.next = (queue_entry_t)pv_hashed_free_list;        \
-       pv_hashed_free_list = pvh_eh;                                   \
-        pv_hashed_free_count += pv_cnt;                                        \
-       simple_unlock(&pv_hashed_free_list_lock);                       \
-}
-
-#define        PV_HASHED_KERN_ALLOC(pvh_e) {                                   \
-       simple_lock(&pv_hashed_kern_free_list_lock);                    \
-       if ((pvh_e = pv_hashed_kern_free_list) != 0) {                  \
-         pv_hashed_kern_free_list = (pv_hashed_entry_t)pvh_e->qlink.next; \
-          pv_hashed_kern_free_count--;                                 \
-          if (pv_hashed_kern_free_count < PV_HASHED_KERN_LOW_WATER_MARK)\
-            if (hw_compare_and_store(0,1,(u_int *)&mappingrecurse))    \
-              thread_call_enter(mapping_adjust_call);                  \
-       }                                                               \
-       simple_unlock(&pv_hashed_kern_free_list_lock);                  \
-}
-
-#define        PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt) {              \
-       simple_lock(&pv_hashed_kern_free_list_lock);                    \
-       pvh_et->qlink.next = (queue_entry_t)pv_hashed_kern_free_list;   \
-       pv_hashed_kern_free_list = pvh_eh;                              \
-        pv_hashed_kern_free_count += pv_cnt;                           \
-       simple_unlock(&pv_hashed_kern_free_list_lock);                  \
-}
+decl_simple_lock_data(,phys_backup_lock)
 
 zone_t         pv_hashed_list_zone;    /* zone of pv_hashed_entry structures */
 
-static zone_t pdpt_zone;
-
-/*
- *     Each entry in the pv_head_table is locked by a bit in the
- *     pv_lock_table.  The lock bits are accessed by the physical
- *     address of the page they lock.
- */
-
-char   *pv_lock_table;         /* pointer to array of bits */
-#define pv_lock_table_size(n)  (((n)+BYTE_SIZE-1)/BYTE_SIZE)
-
-char    *pv_hash_lock_table;
-#define pv_hash_lock_table_size(n)  (((n)+BYTE_SIZE-1)/BYTE_SIZE)
-
 /*
  *     First and last physical addresses that we maintain any information
  *     for.  Initialized to zero so that pmap operations done before
@@ -452,97 +199,19 @@ static struct vm_object kptobj_object_store;
 static struct vm_object kpml4obj_object_store;
 static struct vm_object kpdptobj_object_store;
 
-/*
- *     Index into pv_head table, its lock bits, and the modify/reference and managed bits
- */
-
-#define pa_index(pa)           (i386_btop(pa))
-#define ppn_to_pai(ppn)                ((int)ppn)
-
-#define pai_to_pvh(pai)                (&pv_head_table[pai])
-#define lock_pvh_pai(pai)      bit_lock(pai, (void *)pv_lock_table)
-#define unlock_pvh_pai(pai)    bit_unlock(pai, (void *)pv_lock_table)
-
-static inline uint32_t
-pvhashidx(pmap_t pmap, vm_offset_t va)
-{
-       return ((uint32_t)(uint64_t)pmap ^
-               ((uint32_t)((uint64_t)va >> PAGE_SHIFT) & 0xFFFFFFFF)) &
-              npvhash;
-}
-#define pvhash(idx)            (&pv_hash_table[idx])
-
-#define lock_hash_hash(hash)   bit_lock(hash, (void *)pv_hash_lock_table)
-#define unlock_hash_hash(hash) bit_unlock(hash, (void *)pv_hash_lock_table)
-
 /*
  *     Array of physical page attribites for managed pages.
  *     One byte per physical page.
  */
 char           *pmap_phys_attributes;
-unsigned int   last_managed_page = 0;
-#define IS_MANAGED_PAGE(x)                             \
-       ((unsigned int)(x) <= last_managed_page &&      \
-        (pmap_phys_attributes[x] & PHYS_MANAGED))
-
-/*
- *     Physical page attributes.  Copy bits from PTE definition.
- */
-#define        PHYS_MODIFIED   INTEL_PTE_MOD   /* page modified */
-#define        PHYS_REFERENCED INTEL_PTE_REF   /* page referenced */
-#define PHYS_MANAGED   INTEL_PTE_VALID /* page is managed */
+ppnum_t                last_managed_page = 0;
 
 /*
  *     Amount of virtual memory mapped by one
  *     page-directory entry.
  */
-#define        PDE_MAPPED_SIZE         (pdetova(1))
-uint64_t pde_mapped_size = PDE_MAPPED_SIZE;
-
-/*
- *     Locking and TLB invalidation
- */
-
-/*
- *     Locking Protocols: (changed 2/2007 JK)
- *
- *     There are two structures in the pmap module that need locking:
- *     the pmaps themselves, and the per-page pv_lists (which are locked
- *     by locking the pv_lock_table entry that corresponds to the pv_head
- *     for the list in question.)  Most routines want to lock a pmap and
- *     then do operations in it that require pv_list locking -- however
- *     pmap_remove_all and pmap_copy_on_write operate on a physical page
- *     basis and want to do the locking in the reverse order, i.e. lock
- *     a pv_list and then go through all the pmaps referenced by that list.
- *
- *      The system wide pmap lock has been removed. Now, paths take a lock
- *      on the pmap before changing its 'shape' and the reverse order lockers
- *      (coming in by phys ppn) take a lock on the corresponding pv and then
- *      retest to be sure nothing changed during the window before they locked
- *      and can then run up/down the pv lists holding the list lock. This also
- *      lets the pmap layer run (nearly completely) interrupt enabled, unlike
- *      previously.
- */
-
-/*
- * PV locking
- */
-
-#define LOCK_PVH(index)        {               \
-       mp_disable_preemption();        \
-       lock_pvh_pai(index);            \
-}
-
-#define UNLOCK_PVH(index) {            \
-       unlock_pvh_pai(index);          \
-       mp_enable_preemption();         \
-}
-/*
- * PV hash locking
- */
 
-#define LOCK_PV_HASH(hash)         lock_hash_hash(hash)
-#define UNLOCK_PV_HASH(hash)       unlock_hash_hash(hash)
+uint64_t pde_mapped_size = PDE_MAPPED_SIZE;
 
 unsigned pmap_memory_region_count;
 unsigned pmap_memory_region_current;
@@ -557,26 +226,20 @@ pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
 struct pmap    kernel_pmap_store;
 pmap_t         kernel_pmap;
 
-pd_entry_t     high_shared_pde;
-pd_entry_t     commpage64_pde;
-
 struct zone    *pmap_zone;             /* zone of pmap structures */
 
+struct zone    *pmap_anchor_zone;
 int            pmap_debug = 0;         /* flag for debugging prints */
 
 unsigned int   inuse_ptepages_count = 0;
+long long      alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */
+unsigned int   bootstrap_wired_pages = 0;
+int            pt_fake_zone_index = -1;
 
-addr64_t       kernel64_cr3;
+extern         long    NMIPI_acks;
 
-/*
- *     Pmap cache.  Cache is threaded through ref_count field of pmap.
- *     Max will eventually be constant -- variable for experimentation.
- */
-int            pmap_cache_max = 32;
-int            pmap_alloc_chunk = 8;
-pmap_t         pmap_cache_list;
-int            pmap_cache_count;
-decl_simple_lock_data(,pmap_cache_lock)
+boolean_t      kernel_text_ps_4K = TRUE;
+boolean_t      wpkernel = TRUE;
 
 extern char    end;
 
@@ -586,279 +249,14 @@ pt_entry_t     *DMAP1, *DMAP2;
 caddr_t         DADDR1;
 caddr_t         DADDR2;
 
-/*
- * unlinks the pv_hashed_entry_t pvh from the singly linked hash chain.
- * properly deals with the anchor.
- * must be called with the hash locked, does not unlock it
- */
-
-static inline void 
-pmap_pvh_unlink(pv_hashed_entry_t pvh)
-{
-       pv_hashed_entry_t       curh;
-       pv_hashed_entry_t       *pprevh;
-       int                     pvhash_idx;
-
-       CHK_NPVHASH();
-       pvhash_idx = pvhashidx(pvh->pmap, pvh->va);
-
-       pprevh = pvhash(pvhash_idx);
-
-#if PV_DEBUG
-       if (NULL == *pprevh)
-               panic("pvh_unlink null anchor"); /* JK DEBUG */
-#endif
-       curh = *pprevh;
-
-       while (PV_HASHED_ENTRY_NULL != curh) {
-               if (pvh == curh)
-                       break;
-               pprevh = &curh->nexth;
-               curh = curh->nexth;
-       }
-       if (PV_HASHED_ENTRY_NULL == curh) panic("pmap_pvh_unlink no pvh");
-       *pprevh = pvh->nexth;
-       return;
-}
-
-static inline void
-pv_hash_add(pv_hashed_entry_t  pvh_e,
-           pv_rooted_entry_t   pv_h)
-{
-       pv_hashed_entry_t       *hashp;
-       int                     pvhash_idx;
-
-       CHK_NPVHASH();
-       pvhash_idx = pvhashidx(pvh_e->pmap, pvh_e->va);
-       LOCK_PV_HASH(pvhash_idx);
-       insque(&pvh_e->qlink, &pv_h->qlink);
-       hashp = pvhash(pvhash_idx);
-#if PV_DEBUG
-       if (NULL==hashp)
-               panic("pv_hash_add(%p) null hash bucket", pvh_e);
-#endif
-       pvh_e->nexth = *hashp;
-       *hashp = pvh_e;
-       UNLOCK_PV_HASH(pvhash_idx);
-}
-
-static inline void
-pv_hash_remove(pv_hashed_entry_t pvh_e)
-{
-       int                     pvhash_idx;
-
-       CHK_NPVHASH();
-       pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
-       LOCK_PV_HASH(pvhash_idx);
-       remque(&pvh_e->qlink);
-       pmap_pvh_unlink(pvh_e);
-       UNLOCK_PV_HASH(pvhash_idx);
-} 
-
-/*
- * Remove pv list entry.
- * Called with pv_head_table entry locked.
- * Returns pv entry to be freed (or NULL).
- */
-static inline pv_hashed_entry_t
-pmap_pv_remove(pmap_t          pmap,
-              vm_map_offset_t  vaddr,
-              ppnum_t          ppn)
-{
-       pv_hashed_entry_t       pvh_e;
-       pv_rooted_entry_t       pv_h;
-       pv_hashed_entry_t       *pprevh;
-       int                     pvhash_idx;
-       uint32_t                pv_cnt;
-
-       pvh_e = PV_HASHED_ENTRY_NULL;
-       pv_h = pai_to_pvh(ppn_to_pai(ppn));
-       if (pv_h->pmap == PMAP_NULL)
-               panic("pmap_pv_remove(%p,%llu,%u): null pv_list!",
-                     pmap, vaddr, ppn);
-
-       if (pv_h->va == vaddr && pv_h->pmap == pmap) {
-               /*
-                * Header is the pv_rooted_entry.
-                * We can't free that. If there is a queued
-                * entry after this one we remove that
-                * from the ppn queue, we remove it from the hash chain
-                * and copy it to the rooted entry. Then free it instead.
-                */
-               pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink);
-               if (pv_h != (pv_rooted_entry_t) pvh_e) {
-                       /*
-                        * Entry queued to root, remove this from hash
-                        * and install as nem root.
-                        */
-                       CHK_NPVHASH();
-                       pvhash_idx = pvhashidx(pvh_e->pmap, pvh_e->va);
-                       LOCK_PV_HASH(pvhash_idx);
-                       remque(&pvh_e->qlink);
-                       pprevh = pvhash(pvhash_idx);
-                       if (PV_HASHED_ENTRY_NULL == *pprevh) {
-                               panic("pmap_pv_remove(%p,%llu,%u): "
-                                     "empty hash, removing rooted",
-                                     pmap, vaddr, ppn);
-                       }
-                       pmap_pvh_unlink(pvh_e);
-                       UNLOCK_PV_HASH(pvhash_idx);
-                       pv_h->pmap = pvh_e->pmap;
-                       pv_h->va = pvh_e->va;   /* dispose of pvh_e */
-               } else {
-                       /* none queued after rooted */
-                       pv_h->pmap = PMAP_NULL;
-                       pvh_e = PV_HASHED_ENTRY_NULL;
-               }
-       } else {
-               /*
-                * not removing rooted pv. find it on hash chain, remove from
-                * ppn queue and hash chain and free it
-                */
-               CHK_NPVHASH();
-               pvhash_idx = pvhashidx(pmap, vaddr);
-               LOCK_PV_HASH(pvhash_idx);
-               pprevh = pvhash(pvhash_idx);
-               if (PV_HASHED_ENTRY_NULL == *pprevh) {
-                       panic("pmap_pv_remove(%p,%llu,%u): empty hash",
-                             pmap, vaddr, ppn);
-               }
-               pvh_e = *pprevh;
-               pmap_pv_hashlist_walks++;
-               pv_cnt = 0;
-               while (PV_HASHED_ENTRY_NULL != pvh_e) {
-                       pv_cnt++;
-                       if (pvh_e->pmap == pmap &&
-                           pvh_e->va == vaddr &&
-                           pvh_e->ppn == ppn)
-                               break;
-                       pprevh = &pvh_e->nexth;
-                       pvh_e = pvh_e->nexth;
-               }
-               if (PV_HASHED_ENTRY_NULL == pvh_e)
-                       panic("pmap_pv_remove(%p,%llu,%u): pv not on hash",
-                        pmap, vaddr, ppn);
-               pmap_pv_hashlist_cnts += pv_cnt;
-               if (pmap_pv_hashlist_max < pv_cnt)
-                       pmap_pv_hashlist_max = pv_cnt;
-               *pprevh = pvh_e->nexth;
-               remque(&pvh_e->qlink);
-               UNLOCK_PV_HASH(pvhash_idx);
-       }
-
-       return pvh_e;
-}
-
-/*
- * for legacy, returns the address of the pde entry.
- * for 64 bit, causes the pdpt page containing the pde entry to be mapped,
- * then returns the mapped address of the pde entry in that page
- */
-pd_entry_t     *
-pmap_pde(pmap_t m, vm_map_offset_t v)
-{
-       pd_entry_t     *pde;
-
-       assert(m);
-#if 0
-       if (m == kernel_pmap)
-               pde = (&((m)->dirbase[(vm_offset_t)(v) >> PDESHIFT]));
-       else
-#endif
-               pde = pmap64_pde(m, v);
-
-       return pde;
-}
-
-/*
- * the single pml4 page per pmap is allocated at pmap create time and exists
- * for the duration of the pmap. we allocate this page in kernel vm.
- * this returns the address of the requested pml4 entry in the top level page.
- */
-static inline
-pml4_entry_t *
-pmap64_pml4(pmap_t pmap, vm_map_offset_t vaddr)
-{
-       return &pmap->pm_pml4[(vaddr >> PML4SHIFT) & (NPML4PG-1)];
-}
-
-/*
- * maps in the pml4 page, if any, containing the pdpt entry requested
- * and returns the address of the pdpt entry in that mapped page
- */
-pdpt_entry_t *
-pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr)
-{
-       pml4_entry_t    newpf;
-       pml4_entry_t    *pml4;
-
-       assert(pmap);
-       if ((vaddr > 0x00007FFFFFFFFFFFULL) &&
-           (vaddr < 0xFFFF800000000000ULL)) {
-               return (0);
-       }
-
-       pml4 = pmap64_pml4(pmap, vaddr);
-       if (pml4 && ((*pml4 & INTEL_PTE_VALID))) {
-               newpf = *pml4 & PG_FRAME;
-               return &((pdpt_entry_t *) PHYSMAP_PTOV(newpf))
-                       [(vaddr >> PDPTSHIFT) & (NPDPTPG-1)];
-       }
-       return (NULL);
-}
-/*
- * maps in the pdpt page, if any, containing the pde entry requested
- * and returns the address of the pde entry in that mapped page
- */
-pd_entry_t *
-pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr)
-{
-       pdpt_entry_t    newpf;
-       pdpt_entry_t    *pdpt;
-
-       assert(pmap);
-       if ((vaddr > 0x00007FFFFFFFFFFFULL) &&
-           (vaddr < 0xFFFF800000000000ULL)) {
-               return (0);
-       }
-
-       pdpt = pmap64_pdpt(pmap, vaddr);
-
-       if (pdpt && ((*pdpt & INTEL_PTE_VALID))) {
-               newpf = *pdpt & PG_FRAME;
-               return &((pd_entry_t *) PHYSMAP_PTOV(newpf))
-                       [(vaddr >> PDSHIFT) & (NPDPG-1)];
-       }
-       return (NULL);
-}
+boolean_t      pmap_disable_kheap_nx = FALSE;
+boolean_t      pmap_disable_kstack_nx = FALSE;
+extern boolean_t doconstro_override;
 
-/*
- * return address of mapped pte for vaddr va in pmap pmap.
- *
- * physically maps the pde page, if any, containing the pte in and returns
- * the address of the pte in that mapped page
- *
- * In case the pde maps a superpage, return the pde, which, in this case
- * is the actual page table entry.
- */
-pt_entry_t *
-pmap_pte(pmap_t pmap, vm_map_offset_t vaddr)
-{
-       pd_entry_t      *pde;
-       pd_entry_t      newpf;
+extern long __stack_chk_guard[];
 
-       assert(pmap);
-       pde = pmap_pde(pmap, vaddr);
+boolean_t pmap_ept_support_ad = FALSE;
 
-       if (pde && ((*pde & INTEL_PTE_VALID))) {
-               if (*pde & INTEL_PTE_PS) 
-                       return pde;
-               newpf = *pde & PG_FRAME;
-               return &((pt_entry_t *)PHYSMAP_PTOV(newpf))
-                       [i386_btop(vaddr) & (ppnum_t)(NPTEPG-1)];
-       }
-       return (NULL);
-}
 
 /*
  *     Map memory at initialization.  The physical addresses being
@@ -880,64 +278,13 @@ pmap_map(
        ps = PAGE_SIZE;
        while (start_addr < end_addr) {
                pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
-                          (ppnum_t) i386_btop(start_addr), prot, flags, FALSE);
+                          (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE);
                virt += ps;
                start_addr += ps;
        }
        return(virt);
 }
 
-/*
- *     Back-door routine for mapping kernel VM at initialization.  
- *     Useful for mapping memory outside the range
- *      Sets no-cache, A, D.
- *     Otherwise like pmap_map.
- */
-vm_offset_t
-pmap_map_bd(
-       vm_offset_t     virt,
-       vm_map_offset_t start_addr,
-       vm_map_offset_t end_addr,
-       vm_prot_t       prot,
-       unsigned int    flags)
-{
-       pt_entry_t      template;
-       pt_entry_t      *pte;
-       spl_t           spl;
-
-       template = pa_to_pte(start_addr)
-               | INTEL_PTE_REF
-               | INTEL_PTE_MOD
-               | INTEL_PTE_WIRED
-               | INTEL_PTE_VALID;
-
-       if (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)) {
-               template |= INTEL_PTE_NCACHE;
-               if (!(flags & (VM_MEM_GUARDED | VM_WIMG_USE_DEFAULT)))
-                       template |= INTEL_PTE_PTA;
-       }
-       if (prot & VM_PROT_WRITE)
-               template |= INTEL_PTE_WRITE;
-
-
-       while (start_addr < end_addr) {
-               spl = splhigh();
-               pte = pmap_pte(kernel_pmap, (vm_map_offset_t)virt);
-               if (pte == PT_ENTRY_NULL) {
-                       panic("pmap_map_bd: Invalid kernel address\n");
-               }
-               pmap_store_pte(pte, template);
-               splx(spl);
-               pte_increment_pa(template);
-               virt += PAGE_SIZE;
-               start_addr += PAGE_SIZE;
-       }
-
-
-       flush_tlb();
-       return(virt);
-}
-
 extern char                    *first_avail;
 extern vm_offset_t             virtual_avail, virtual_end;
 extern pmap_paddr_t            avail_start, avail_end;
@@ -945,26 +292,66 @@ extern  vm_offset_t               sHIB;
 extern  vm_offset_t            eHIB;
 extern  vm_offset_t            stext;
 extern  vm_offset_t            etext;
-extern  vm_offset_t            sdata;
+extern  vm_offset_t            sdata, edata;
+extern  vm_offset_t            sconstdata, econstdata;
+
+extern void                    *KPTphys;
+
+boolean_t pmap_smep_enabled = FALSE;
+boolean_t pmap_smap_enabled = FALSE;
 
 void
 pmap_cpu_init(void)
 {
+       cpu_data_t      *cdp = current_cpu_datap();
        /*
         * Here early in the life of a processor (from cpu_mode_init()).
-        * Ensure global page feature is disabled.
+        * Ensure global page feature is disabled at this point.
         */
+
        set_cr4(get_cr4() &~ CR4_PGE);
 
        /*
         * Initialize the per-cpu, TLB-related fields.
         */
-       current_cpu_datap()->cpu_kernel_cr3 = kernel_pmap->pm_cr3;
-       current_cpu_datap()->cpu_active_cr3 = kernel_pmap->pm_cr3;
-       current_cpu_datap()->cpu_tlb_invalid = FALSE;
+       cdp->cpu_kernel_cr3 = kernel_pmap->pm_cr3;
+       cdp->cpu_active_cr3 = kernel_pmap->pm_cr3;
+       cdp->cpu_tlb_invalid = FALSE;
+       cdp->cpu_task_map = TASK_MAP_64BIT;
+       pmap_pcid_configure();
+       if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMEP) {
+               boolean_t nsmep;
+               if (!PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) {
+                       set_cr4(get_cr4() | CR4_SMEP);
+                       pmap_smep_enabled = TRUE;
+               }
+       }
+       if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMAP) {
+               boolean_t nsmap;
+               if (!PE_parse_boot_argn("-pmap_smap_disable", &nsmap, sizeof(nsmap))) {
+                       set_cr4(get_cr4() | CR4_SMAP);
+                       pmap_smap_enabled = TRUE;
+               }
+       }
+
+       if (cdp->cpu_fixed_pmcs_enabled) {
+               boolean_t enable = TRUE;
+               cpu_pmc_control(&enable);
+       }
 }
 
+static uint32_t pmap_scale_shift(void) {
+       uint32_t scale = 0;
 
+       if (sane_size <= 8*GB) {
+               scale = (uint32_t)(sane_size / (2 * GB));
+       } else if (sane_size <= 32*GB) {
+               scale = 4 + (uint32_t)((sane_size - (8 * GB))/ (4 * GB)); 
+       } else {
+               scale = 10 + (uint32_t)MIN(4, ((sane_size - (32 * GB))/ (8 * GB))); 
+       }
+       return scale;
+}
 
 /*
  *     Bootstrap the system enough to run with virtual memory.
@@ -981,7 +368,6 @@ pmap_bootstrap(
        vm_offset_t     va;
        int i;
 #endif
-
        assert(IA32e);
 
        vm_last_addr = VM_MAX_KERNEL_ADDRESS;   /* Set the highest address
@@ -994,19 +380,24 @@ pmap_bootstrap(
 
        kernel_pmap = &kernel_pmap_store;
        kernel_pmap->ref_count = 1;
-       kernel_pmap->nx_enabled = FALSE;
+       kernel_pmap->nx_enabled = TRUE;
        kernel_pmap->pm_task_map = TASK_MAP_64BIT;
        kernel_pmap->pm_obj = (vm_object_t) NULL;
        kernel_pmap->dirbase = (pd_entry_t *)((uintptr_t)IdlePTD);
        kernel_pmap->pm_pdpt = (pd_entry_t *) ((uintptr_t)IdlePDPT);
        kernel_pmap->pm_pml4 = IdlePML4;
        kernel_pmap->pm_cr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
+       kernel_pmap->pm_eptp = 0;
+       pmap_pcid_initialize_kernel(kernel_pmap);
 
+       
 
        current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
 
        nkpt = NKPT;
        OSAddAtomic(NKPT,  &inuse_ptepages_count);
+       OSAddAtomic64(NKPT,  &alloc_ptepages_count);
+       bootstrap_wired_pages = NKPT;
 
        virtual_avail = (vm_offset_t)(VM_MIN_KERNEL_ADDRESS) + (vm_offset_t)first_avail;
        virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS);
@@ -1046,26 +437,60 @@ pmap_bootstrap(
 
        virtual_avail = va;
 #endif
+       if (!PE_parse_boot_argn("npvhash", &npvhashmask, sizeof (npvhashmask))) {
+               npvhashmask = ((NPVHASHBUCKETS) << pmap_scale_shift()) - 1;
 
-       if (PE_parse_boot_argn("npvhash", &npvhash, sizeof (npvhash))) {
-               if (0 != ((npvhash + 1) & npvhash)) {
-                       kprintf("invalid hash %d, must be ((2^N)-1), "
-                               "using default %d\n", npvhash, NPVHASH);
-                       npvhash = NPVHASH;
-               }
-       } else {
-               npvhash = NPVHASH;
        }
 
-       printf("npvhash=%d\n", npvhash);
+       npvhashbuckets = npvhashmask + 1;
+
+       if (0 != ((npvhashbuckets) & npvhashmask)) {
+               panic("invalid hash %d, must be ((2^N)-1), "
+                   "using default %d\n", npvhashmask, NPVHASHMASK);
+       }
 
        simple_lock_init(&kernel_pmap->lock, 0);
        simple_lock_init(&pv_hashed_free_list_lock, 0);
        simple_lock_init(&pv_hashed_kern_free_list_lock, 0);
        simple_lock_init(&pv_hash_table_lock,0);
+       simple_lock_init(&phys_backup_lock, 0);
 
        pmap_cpu_init();
 
+       if (pmap_pcid_ncpus)
+               printf("PMAP: PCID enabled\n");
+
+       if (pmap_smep_enabled)
+               printf("PMAP: Supervisor Mode Execute Protection enabled\n");
+       if (pmap_smap_enabled)
+               printf("PMAP: Supervisor Mode Access Protection enabled\n");
+
+#if    DEBUG
+       printf("Stack canary: 0x%lx\n", __stack_chk_guard[0]);
+       printf("early_random(): 0x%qx\n", early_random());
+#endif
+       boolean_t ptmp;
+       /* Check if the user has requested disabling stack or heap no-execute
+        * enforcement. These are "const" variables; that qualifier is cast away
+        * when altering them. The TEXT/DATA const sections are marked
+        * write protected later in the kernel startup sequence, so altering
+        * them is possible at this point, in pmap_bootstrap().
+        */
+       if (PE_parse_boot_argn("-pmap_disable_kheap_nx", &ptmp, sizeof(ptmp))) {
+               boolean_t *pdknxp = (boolean_t *) &pmap_disable_kheap_nx;
+               *pdknxp = TRUE;
+       }
+
+       if (PE_parse_boot_argn("-pmap_disable_kstack_nx", &ptmp, sizeof(ptmp))) {
+               boolean_t *pdknhp = (boolean_t *) &pmap_disable_kstack_nx;
+               *pdknhp = TRUE;
+       }
+
+       boot_args *args = (boot_args *)PE_state.bootArgs;
+       if (args->efiMode == kBootArgsEfiMode32) {
+               printf("EFI32: kernel virtual space limited to 4GB\n");
+               virtual_end = VM_MAX_KERNEL_ADDRESS_EFI32;
+       }
        kprintf("Kernel virtual space from 0x%lx to 0x%lx.\n",
                        (long)KERNEL_BASE, (long)virtual_end);
        kprintf("Available physical space from 0x%llx to 0x%llx\n",
@@ -1099,73 +524,203 @@ pmap_virtual_space(
        *endp = virtual_end;
 }
 
-/*
- *     Initialize the pmap module.
- *     Called by vm_init, to initialize any structures that the pmap
- *     system needs to map virtual memory.
- */
-void
-pmap_init(void)
-{
-       long                    npages;
-       vm_offset_t             addr;
-       vm_size_t               s;
-       vm_map_offset_t         vaddr;
-       ppnum_t ppn;
 
 
-       kernel_pmap->pm_obj_pml4 = &kpml4obj_object_store;
-       _vm_object_allocate((vm_object_size_t)NPML4PGS, &kpml4obj_object_store);
 
-       kernel_pmap->pm_obj_pdpt = &kpdptobj_object_store;
-       _vm_object_allocate((vm_object_size_t)NPDPTPGS, &kpdptobj_object_store);
+#if HIBERNATION
 
-       kernel_pmap->pm_obj = &kptobj_object_store;
-       _vm_object_allocate((vm_object_size_t)NPDEPGS, &kptobj_object_store);
+#include <IOKit/IOHibernatePrivate.h>
 
-       /*
-        *      Allocate memory for the pv_head_table and its lock bits,
-        *      the modify bit array, and the pte_page table.
-        */
+int32_t                pmap_npages;
+int32_t                pmap_teardown_last_valid_compact_indx = -1;
 
-       /*
-        * zero bias all these arrays now instead of off avail_start
-        * so we cover all memory
-        */
 
-       npages = i386_btop(avail_end);
-       s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
-                        + (sizeof (struct pv_hashed_entry_t *) * (npvhash+1))
-                        + pv_lock_table_size(npages)
-                        + pv_hash_lock_table_size((npvhash+1))
-                               + npages);
+void   hibernate_rebuild_pmap_structs(void);
+void   hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+void   pmap_pack_index(uint32_t);
+int32_t        pmap_unpack_index(pv_rooted_entry_t);
 
-       s = round_page(s);
-       if (kernel_memory_allocate(kernel_map, &addr, s, 0,
-                                  KMA_KOBJECT | KMA_PERMANENT)
-           != KERN_SUCCESS)
-               panic("pmap_init");
 
-       memset((char *)addr, 0, s);
+int32_t
+pmap_unpack_index(pv_rooted_entry_t pv_h)
+{
+       int32_t indx = 0;
 
-#if PV_DEBUG
-       if (0 == npvhash) panic("npvhash not initialized");
-#endif
+       indx = (int32_t)(*((uint64_t *)(&pv_h->qlink.next)) >> 48);
+       indx = indx << 16;
+       indx |= (int32_t)(*((uint64_t *)(&pv_h->qlink.prev)) >> 48);
+       
+       *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)0xffff << 48);
+       *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)0xffff << 48);
 
-       /*
-        *      Allocate the structures first to preserve word-alignment.
-        */
+       return (indx);
+}
+
+
+void
+pmap_pack_index(uint32_t indx)
+{
+       pv_rooted_entry_t       pv_h;
+
+       pv_h = &pv_head_table[indx];
+
+       *((uint64_t *)(&pv_h->qlink.next)) &= ~((uint64_t)0xffff << 48);
+       *((uint64_t *)(&pv_h->qlink.prev)) &= ~((uint64_t)0xffff << 48);
+
+       *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)(indx >> 16)) << 48;
+       *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)(indx & 0xffff)) << 48;
+}
+
+
+void
+hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end)
+{
+       int32_t         i;
+       int32_t         compact_target_indx;
+
+       compact_target_indx = 0;
+
+       for (i = 0; i < pmap_npages; i++) {
+               if (pv_head_table[i].pmap == PMAP_NULL) {
+
+                       if (pv_head_table[compact_target_indx].pmap != PMAP_NULL)
+                               compact_target_indx = i;
+               } else {
+                       pmap_pack_index((uint32_t)i);
+
+                       if (pv_head_table[compact_target_indx].pmap == PMAP_NULL) {
+                               /*
+                                 * we've got a hole to fill, so
+                                 * move this pv_rooted_entry_t to it's new home
+                                 */
+                               pv_head_table[compact_target_indx] = pv_head_table[i];
+                               pv_head_table[i].pmap = PMAP_NULL;
+                               
+                               pmap_teardown_last_valid_compact_indx = compact_target_indx;
+                               compact_target_indx++;
+                       } else
+                               pmap_teardown_last_valid_compact_indx = i;
+               }
+       }
+       *unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx+1];
+       *unneeded_end = (addr64_t)&pv_head_table[pmap_npages-1];
+       
+       HIBLOG("hibernate_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
+}
+
+
+void
+hibernate_rebuild_pmap_structs(void)
+{
+       int32_t                 cindx, eindx, rindx;
+       pv_rooted_entry_t       pv_h;
+
+       eindx = (int32_t)pmap_npages;
+
+       for (cindx = pmap_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+
+               pv_h = &pv_head_table[cindx];
+
+               rindx = pmap_unpack_index(pv_h);
+               assert(rindx < pmap_npages);
+
+               if (rindx != cindx) {
+                       /*
+                        * this pv_rooted_entry_t was moved by hibernate_teardown_pmap_structs,
+                        * so move it back to its real location
+                        */
+                       pv_head_table[rindx] = pv_head_table[cindx];
+               }
+               if (rindx+1 != eindx) {
+                       /*
+                        * the 'hole' between this vm_rooted_entry_t and the previous
+                        * vm_rooted_entry_t we moved needs to be initialized as 
+                        * a range of zero'd vm_rooted_entry_t's
+                        */
+                       bzero((char *)&pv_head_table[rindx+1], (eindx - rindx - 1) * sizeof (struct pv_rooted_entry));
+               }
+               eindx = rindx;
+       }
+       if (rindx)
+               bzero ((char *)&pv_head_table[0], rindx * sizeof (struct pv_rooted_entry));
+
+       HIBLOG("hibernate_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
+}
+
+#endif
+
+/*
+ *     Initialize the pmap module.
+ *     Called by vm_init, to initialize any structures that the pmap
+ *     system needs to map virtual memory.
+ */
+void
+pmap_init(void)
+{
+       long                    npages;
+       vm_offset_t             addr;
+       vm_size_t               s, vsize;
+       vm_map_offset_t         vaddr;
+       ppnum_t ppn;
+
+
+       kernel_pmap->pm_obj_pml4 = &kpml4obj_object_store;
+       _vm_object_allocate((vm_object_size_t)NPML4PGS * PAGE_SIZE, &kpml4obj_object_store);
+
+       kernel_pmap->pm_obj_pdpt = &kpdptobj_object_store;
+       _vm_object_allocate((vm_object_size_t)NPDPTPGS * PAGE_SIZE, &kpdptobj_object_store);
+
+       kernel_pmap->pm_obj = &kptobj_object_store;
+       _vm_object_allocate((vm_object_size_t)NPDEPGS * PAGE_SIZE, &kptobj_object_store);
+
+       /*
+        *      Allocate memory for the pv_head_table and its lock bits,
+        *      the modify bit array, and the pte_page table.
+        */
+
+       /*
+        * zero bias all these arrays now instead of off avail_start
+        * so we cover all memory
+        */
+
+       npages = i386_btop(avail_end);
+#if HIBERNATION
+       pmap_npages = (uint32_t)npages;
+#endif 
+       s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
+                        + (sizeof (struct pv_hashed_entry_t *) * (npvhashbuckets))
+                        + pv_lock_table_size(npages)
+                        + pv_hash_lock_table_size((npvhashbuckets))
+                               + npages);
+       s = round_page(s);
+       if (kernel_memory_allocate(kernel_map, &addr, s, 0,
+                                  KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PMAP)
+           != KERN_SUCCESS)
+               panic("pmap_init");
+
+       memset((char *)addr, 0, s);
+
+       vaddr = addr;
+       vsize = s;
+
+#if PV_DEBUG
+       if (0 == npvhashmask) panic("npvhashmask not initialized");
+#endif
+
+       /*
+        *      Allocate the structures first to preserve word-alignment.
+        */
        pv_head_table = (pv_rooted_entry_t) addr;
        addr = (vm_offset_t) (pv_head_table + npages);
 
        pv_hash_table = (pv_hashed_entry_t *)addr;
-       addr = (vm_offset_t) (pv_hash_table + (npvhash + 1));
+       addr = (vm_offset_t) (pv_hash_table + (npvhashbuckets));
 
        pv_lock_table = (char *) addr;
        addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
 
        pv_hash_lock_table = (char *) addr;
-       addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhash+1)));
+       addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhashbuckets)));
 
        pmap_phys_attributes = (char *) addr;
 
@@ -1175,34 +730,56 @@ pmap_init(void)
        for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
                if (pmptr->type != kEfiConventionalMemory)
                        continue;
-               unsigned int pn;
+               ppnum_t pn;
                for (pn = pmptr->base; pn <= pmptr->end; pn++) {
                        if (pn < last_pn) {
                                pmap_phys_attributes[pn] |= PHYS_MANAGED;
+
                                if (pn > last_managed_page)
                                        last_managed_page = pn;
+
+                               if (pn >= lowest_hi && pn <= highest_hi)
+                                       pmap_phys_attributes[pn] |= PHYS_NOENCRYPT;
                        }
                }
        }
+       while (vsize) {
+               ppn = pmap_find_phys(kernel_pmap, vaddr);
+
+               pmap_phys_attributes[ppn] |= PHYS_NOENCRYPT;
 
+               vaddr += PAGE_SIZE;
+               vsize -= PAGE_SIZE;
+       }
        /*
         *      Create the zone of physical maps,
         *      and of the physical-to-virtual entries.
         */
        s = (vm_size_t) sizeof(struct pmap);
        pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
-       s = (vm_size_t) sizeof(struct pv_hashed_entry);
-       pv_hashed_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
-       s = 63;
-       pdpt_zone = zinit(s, 400*s, 4096, "pdpt"); /* XXX */
+        zone_change(pmap_zone, Z_NOENCRYPT, TRUE);
+
+       pmap_anchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable anchors");
+       zone_change(pmap_anchor_zone, Z_NOENCRYPT, TRUE);
 
+       /* The anchor is required to be page aligned. Zone debugging adds
+        * padding which may violate that requirement. Tell the zone
+        * subsystem that alignment is required.
+        */
+
+       zone_change(pmap_anchor_zone, Z_ALIGNMENT_REQUIRED, TRUE);
+
+       s = (vm_size_t) sizeof(struct pv_hashed_entry);
+       pv_hashed_list_zone = zinit(s, 10000*s /* Expandable zone */,
+           4096 * 3 /* LCM x86_64*/, "pv_list");
+       zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE);
 
        /* create pv entries for kernel pages mapped by low level
           startup code.  these have to exist so we can pmap_remove()
           e.g. kext pages from the middle of our addr space */
 
        vaddr = (vm_map_offset_t) VM_MIN_KERNEL_ADDRESS;
-       for (ppn = 0; ppn < i386_btop(avail_start); ppn++) {
+       for (ppn = VM_MIN_KERNEL_PAGE; ppn < i386_btop(avail_start); ppn++) {
                pv_rooted_entry_t pv_e;
 
                pv_e = pai_to_pvh(ppn);
@@ -1213,22 +790,365 @@ pmap_init(void)
        }
        pmap_initialized = TRUE;
 
-       /*
-        *      Initialize pmap cache.
-        */
-       pmap_cache_list = PMAP_NULL;
-       pmap_cache_count = 0;
-       simple_lock_init(&pmap_cache_lock, 0);
-
        max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t);
 
        /*
         * Ensure the kernel's PML4 entry exists for the basement
         * before this is shared with any user.
         */
-       pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT);
+       pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT, PMAP_EXPAND_OPTIONS_NONE);
+
+#if CONFIG_VMX
+       pmap_ept_support_ad = vmx_hv_support()  && (VMX_CAP(MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT, 1) ? TRUE : FALSE);
+#else
+       pmap_ept_support_ad = FALSE;
+#endif /* CONFIG_VMX */
+}
+
+static
+void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro) {
+       uint64_t ev = sv + nxrosz, cv = sv;
+       pd_entry_t *pdep;
+       pt_entry_t *ptep = NULL;
+
+       assert(!is_ept_pmap(npmap));
+
+       assert(((sv & 0xFFFULL) | (nxrosz & 0xFFFULL)) == 0);
+
+       for (pdep = pmap_pde(npmap, cv); pdep != NULL && (cv < ev);) {
+               uint64_t pdev = (cv & ~((uint64_t)PDEMASK));
+
+               if (*pdep & INTEL_PTE_PS) {
+                       if (NX)
+                               *pdep |= INTEL_PTE_NX;
+                       if (ro)
+                               *pdep &= ~INTEL_PTE_WRITE;
+                       cv += NBPD;
+                       cv &= ~((uint64_t) PDEMASK);
+                       pdep = pmap_pde(npmap, cv);
+                       continue;
+               }
+
+               for (ptep = pmap_pte(npmap, cv); ptep != NULL && (cv < (pdev + NBPD)) && (cv < ev);) {
+                       if (NX)
+                               *ptep |= INTEL_PTE_NX;
+                       if (ro)
+                               *ptep &= ~INTEL_PTE_WRITE;
+                       cv += NBPT;
+                       ptep = pmap_pte(npmap, cv);
+               }
+       }
+       DPRINTF("%s(0x%llx, 0x%llx, %u, %u): 0x%llx, 0x%llx\n", __FUNCTION__, sv, nxrosz, NX, ro, cv, ptep ? *ptep: 0);
 }
 
+/*
+ * Called once VM is fully initialized so that we can release unused
+ * sections of low memory to the general pool.
+ * Also complete the set-up of identity-mapped sections of the kernel:
+ *  1) write-protect kernel text
+ *  2) map kernel text using large pages if possible
+ *  3) read and write-protect page zero (for K32)
+ *  4) map the global page at the appropriate virtual address.
+ *
+ * Use of large pages
+ * ------------------
+ * To effectively map and write-protect all kernel text pages, the text
+ * must be 2M-aligned at the base, and the data section above must also be
+ * 2M-aligned. That is, there's padding below and above. This is achieved
+ * through linker directives. Large pages are used only if this alignment
+ * exists (and not overriden by the -kernel_text_page_4K boot-arg). The
+ * memory layout is:
+ * 
+ *                       :                :
+ *                       |     __DATA     |
+ *               sdata:  ==================  2Meg
+ *                       |                |
+ *                       |  zero-padding  |
+ *                       |                |
+ *               etext:  ------------------ 
+ *                       |                |
+ *                       :                :
+ *                       |                |
+ *                       |     __TEXT     |
+ *                       |                |
+ *                       :                :
+ *                       |                |
+ *               stext:  ==================  2Meg
+ *                       |                |
+ *                       |  zero-padding  |
+ *                       |                |
+ *               eHIB:   ------------------ 
+ *                       |     __HIB      |
+ *                       :                :
+ *
+ * Prior to changing the mapping from 4K to 2M, the zero-padding pages
+ * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the
+ * 4K pages covering [stext,etext] are coalesced as 2M large pages.
+ * The now unused level-1 PTE pages are also freed.
+ */
+extern ppnum_t vm_kernel_base_page;
+void
+pmap_lowmem_finalize(void)
+{
+       spl_t           spl;
+       int             i;
+
+       /*
+        * Update wired memory statistics for early boot pages
+        */
+       PMAP_ZINFO_PALLOC(kernel_pmap, bootstrap_wired_pages * PAGE_SIZE);
+
+       /*
+        * Free pages in pmap regions below the base:
+        * rdar://6332712
+        *      We can't free all the pages to VM that EFI reports available.
+        *      Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake.
+        *      There's also a size miscalculation here: pend is one page less
+        *      than it should be but this is not fixed to be backwards
+        *      compatible.
+        * This is important for KASLR because up to 256*2MB = 512MB of space
+        * needs has to be released to VM.
+        */
+       for (i = 0;
+            pmap_memory_regions[i].end < vm_kernel_base_page;
+            i++) {
+               vm_offset_t     pbase = i386_ptob(pmap_memory_regions[i].base);
+               vm_offset_t     pend  = i386_ptob(pmap_memory_regions[i].end+1);
+
+               DBG("pmap region %d [%p..[%p\n",
+                   i, (void *) pbase, (void *) pend);
+
+               if (pmap_memory_regions[i].attribute & EFI_MEMORY_KERN_RESERVED)
+                       continue;
+               /*
+                * rdar://6332712
+                * Adjust limits not to free pages in range 0xc0000-0xff000.
+                */
+               if (pbase >= 0xc0000 && pend <= 0x100000)
+                       continue;
+               if (pbase < 0xc0000 && pend > 0x100000) {
+                       /* page range entirely within region, free lower part */
+                       DBG("- ml_static_mfree(%p,%p)\n",
+                           (void *) ml_static_ptovirt(pbase),
+                           (void *) (0xc0000-pbase));
+                       ml_static_mfree(ml_static_ptovirt(pbase),0xc0000-pbase);
+                       pbase = 0x100000;
+               }
+               if (pbase < 0xc0000)
+                       pend = MIN(pend, 0xc0000);
+               if (pend  > 0x100000)
+                       pbase = MAX(pbase, 0x100000);
+               DBG("- ml_static_mfree(%p,%p)\n",
+                   (void *) ml_static_ptovirt(pbase),
+                   (void *) (pend - pbase));
+               ml_static_mfree(ml_static_ptovirt(pbase), pend - pbase);
+       }
+
+       /* A final pass to get rid of all initial identity mappings to
+        * low pages.
+        */
+       DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base);
+
+       /*
+        * Remove all mappings past the boot-cpu descriptor aliases and low globals.
+        * Non-boot-cpu GDT aliases will be remapped later as needed. 
+        */
+       pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base);
+
+       /*
+        * If text and data are both 2MB-aligned,
+        * we can map text with large-pages,
+        * unless the -kernel_text_ps_4K boot-arg overrides.
+        */
+       if ((stext & I386_LPGMASK) == 0 && (sdata & I386_LPGMASK) == 0) {
+               kprintf("Kernel text is 2MB aligned");
+               kernel_text_ps_4K = FALSE;
+               if (PE_parse_boot_argn("-kernel_text_ps_4K",
+                                      &kernel_text_ps_4K,
+                                      sizeof (kernel_text_ps_4K)))
+                       kprintf(" but will be mapped with 4K pages\n");
+               else
+                       kprintf(" and will be mapped with 2M pages\n");
+       }
+
+       (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof (wpkernel));
+       if (wpkernel)
+               kprintf("Kernel text %p-%p to be write-protected\n",
+                       (void *) stext, (void *) etext);
+
+       spl = splhigh();
+
+       /*
+        * Scan over text if mappings are to be changed:
+        * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0 
+        * - Change to large-pages if possible and not overriden.
+        */
+       if (kernel_text_ps_4K && wpkernel) {
+               vm_offset_t     myva;
+               for (myva = stext; myva < etext; myva += PAGE_SIZE) {
+                       pt_entry_t     *ptep;
+
+                       ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
+                       if (ptep)
+                               pmap_store_pte(ptep, *ptep & ~INTEL_PTE_WRITE);
+               }
+       }
+
+       if (!kernel_text_ps_4K) {
+               vm_offset_t     myva;
+
+               /*
+                * Release zero-filled page padding used for 2M-alignment.
+                */
+               DBG("ml_static_mfree(%p,%p) for padding below text\n",
+                       (void *) eHIB, (void *) (stext - eHIB));
+               ml_static_mfree(eHIB, stext - eHIB);
+               DBG("ml_static_mfree(%p,%p) for padding above text\n",
+                       (void *) etext, (void *) (sdata - etext));
+               ml_static_mfree(etext, sdata - etext);
+
+               /*
+                * Coalesce text pages into large pages.
+                */
+               for (myva = stext; myva < sdata; myva += I386_LPGBYTES) {
+                       pt_entry_t      *ptep;
+                       vm_offset_t     pte_phys;
+                       pt_entry_t      *pdep;
+                       pt_entry_t      pde;
+
+                       pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva);
+                       ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
+                       DBG("myva: %p pdep: %p ptep: %p\n",
+                               (void *) myva, (void *) pdep, (void *) ptep);
+                       if ((*ptep & INTEL_PTE_VALID) == 0)
+                               continue;
+                       pte_phys = (vm_offset_t)(*ptep & PG_FRAME);
+                       pde = *pdep & PTMASK;   /* page attributes from pde */
+                       pde |= INTEL_PTE_PS;    /* make it a 2M entry */
+                       pde |= pte_phys;        /* take page frame from pte */
+
+                       if (wpkernel)
+                               pde &= ~INTEL_PTE_WRITE;
+                       DBG("pmap_store_pte(%p,0x%llx)\n",
+                               (void *)pdep, pde);
+                       pmap_store_pte(pdep, pde);
+
+                       /*
+                        * Free the now-unused level-1 pte.
+                        * Note: ptep is a virtual address to the pte in the
+                        *   recursive map. We can't use this address to free
+                        *   the page. Instead we need to compute its address
+                        *   in the Idle PTEs in "low memory".
+                        */
+                       vm_offset_t vm_ptep = (vm_offset_t) KPTphys
+                                               + (pte_phys >> PTPGSHIFT);
+                       DBG("ml_static_mfree(%p,0x%x) for pte\n",
+                               (void *) vm_ptep, PAGE_SIZE);
+                       ml_static_mfree(vm_ptep, PAGE_SIZE);
+               }
+
+               /* Change variable read by sysctl machdep.pmap */
+               pmap_kernel_text_ps = I386_LPGBYTES;
+       }
+
+       boolean_t doconstro = TRUE;
+
+       (void) PE_parse_boot_argn("dataconstro", &doconstro, sizeof(doconstro));
+
+       if ((sconstdata | econstdata) & PAGE_MASK) {
+               kprintf("Const DATA misaligned 0x%lx 0x%lx\n", sconstdata, econstdata);
+               if ((sconstdata & PAGE_MASK) || (doconstro_override == FALSE))
+                       doconstro = FALSE;
+       }
+
+       if ((sconstdata > edata) || (sconstdata < sdata) || ((econstdata - sconstdata) >= (edata - sdata))) {
+               kprintf("Const DATA incorrect size 0x%lx 0x%lx 0x%lx 0x%lx\n", sconstdata, econstdata, sdata, edata);
+               doconstro = FALSE;
+       }
+
+       if (doconstro)
+               kprintf("Marking const DATA read-only\n");
+
+       vm_offset_t dva;
+
+       for (dva = sdata; dva < edata; dva += I386_PGBYTES) {
+               assert(((sdata | edata) & PAGE_MASK) == 0);
+               if ( (sdata | edata) & PAGE_MASK) {
+                       kprintf("DATA misaligned, 0x%lx, 0x%lx\n", sdata, edata);
+                       break;
+               }
+
+               pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva);
+
+               dpte = *dptep;
+
+               assert((dpte & INTEL_PTE_VALID));
+               if ((dpte & INTEL_PTE_VALID) == 0) {
+                       kprintf("Missing data mapping 0x%lx 0x%lx 0x%lx\n", dva, sdata, edata);
+                       continue;
+               }
+
+               dpte |= INTEL_PTE_NX;
+               if (doconstro && (dva >= sconstdata) && (dva < econstdata)) {
+                       dpte &= ~INTEL_PTE_WRITE;
+               }
+               pmap_store_pte(dptep, dpte);
+       }
+       kernel_segment_command_t * seg;
+       kernel_section_t         * sec;
+
+       for (seg = firstseg(); seg != NULL; seg = nextsegfromheader(&_mh_execute_header, seg)) {
+               if (!strcmp(seg->segname, "__TEXT") ||
+                   !strcmp(seg->segname, "__DATA")) {
+                       continue;
+               }
+               //XXX
+               if (!strcmp(seg->segname, "__KLD")) {
+                       continue;
+               }
+               if (!strcmp(seg->segname, "__HIB")) {
+                       for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) {
+                               if (sec->addr & PAGE_MASK)
+                                       panic("__HIB segment's sections misaligned");
+                               if (!strcmp(sec->sectname, "__text")) {
+                                       pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), FALSE, TRUE);
+                               } else {
+                                       pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), TRUE, FALSE);
+                               }
+                       }
+               } else {
+                       pmap_mark_range(kernel_pmap, seg->vmaddr, round_page_64(seg->vmsize), TRUE, FALSE);
+               }
+       }
+
+       /*
+        * If we're debugging, map the low global vector page at the fixed
+        * virtual address.  Otherwise, remove the mapping for this.
+        */
+       if (debug_boot_arg) {
+               pt_entry_t *pte = NULL;
+               if (0 == (pte = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS)))
+                       panic("lowmem pte");
+               /* make sure it is defined on page boundary */
+               assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK));
+               pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)
+                                       | INTEL_PTE_REF
+                                       | INTEL_PTE_MOD
+                                       | INTEL_PTE_WIRED
+                                       | INTEL_PTE_VALID
+                                       | INTEL_PTE_WRITE
+                                       | INTEL_PTE_NX);
+       } else {
+               pmap_remove(kernel_pmap,
+                           LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE);
+       }
+       
+       splx(spl);
+       if (pmap_pcid_ncpus)
+               tlb_flush_global();
+       else
+               flush_tlb_raw();
+}
 
 /*
  * this function is only used for debugging fron the vm layer
@@ -1296,6 +1216,28 @@ pmap_is_empty(
        return TRUE;
 }
 
+void
+hv_ept_pmap_create(void **ept_pmap, void **eptp)
+{
+       pmap_t p;
+
+       if ((ept_pmap == NULL) || (eptp == NULL)) {
+               return;
+       }
+
+       p = pmap_create_options(get_task_ledger(current_task()), 0, (PMAP_CREATE_64BIT | PMAP_CREATE_EPT));
+       if (p == PMAP_NULL) {
+               *ept_pmap = NULL;
+               *eptp = NULL;
+               return;
+       }
+
+       assert(is_ept_pmap(p));
+
+       *ept_pmap = (void*)p;
+       *eptp = (void*)(p->pm_eptp);
+       return;
+}
 
 /*
  *     Create and return a physical map.
@@ -1310,9 +1252,10 @@ pmap_is_empty(
  *     is bounded by that size.
  */
 pmap_t
-pmap_create(
-           vm_map_size_t       sz,
-           boolean_t           is_64bit)
+pmap_create_options(
+       ledger_t        ledger,
+       vm_map_size_t   sz,
+       int             flags)
 {
        pmap_t          p;
        vm_size_t       size;
@@ -1320,7 +1263,7 @@ pmap_create(
        pml4_entry_t    *kpml4;
 
        PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
-                  (uint32_t) (sz>>32), (uint32_t) sz, is_64bit, 0, 0);
+                  (uint32_t) (sz>>32), (uint32_t) sz, flags, 0, 0);
 
        size = (vm_size_t) sz;
 
@@ -1332,68 +1275,98 @@ pmap_create(
                return(PMAP_NULL);
        }
 
+       /*
+        *      Return error when unrecognized flags are passed.
+        */
+       if ((flags & ~(PMAP_CREATE_KNOWN_FLAGS)) != 0) {
+               return(PMAP_NULL);
+       }
+
        p = (pmap_t) zalloc(pmap_zone);
        if (PMAP_NULL == p)
                panic("pmap_create zalloc");
-
+       /* Zero all fields */
+       bzero(p, sizeof(*p));
        /* init counts now since we'll be bumping some */
        simple_lock_init(&p->lock, 0);
+#if 00
        p->stats.resident_count = 0;
        p->stats.resident_max = 0;
        p->stats.wired_count = 0;
+#else
+       bzero(&p->stats, sizeof (p->stats));
+#endif
        p->ref_count = 1;
        p->nx_enabled = 1;
        p->pm_shared = FALSE;
+       ledger_reference(ledger);
+       p->ledger = ledger;
 
-       p->pm_task_map = is_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT;;
+       p->pm_task_map = ((flags & PMAP_CREATE_64BIT) ? TASK_MAP_64BIT : TASK_MAP_32BIT);
+       if (pmap_pcid_ncpus)
+               pmap_pcid_initialize(p);
 
-        /* alloc the pml4 page in kernel vm */
-        if (KERN_SUCCESS != kmem_alloc_kobject(kernel_map, (vm_offset_t *)(&p->pm_pml4), PAGE_SIZE))
-               panic("pmap_create kmem_alloc_kobject pml4");
+       p->pm_pml4 = zalloc(pmap_anchor_zone);
 
-        memset((char *)p->pm_pml4, 0, PAGE_SIZE);
-       p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
+       pmap_assert((((uintptr_t)p->pm_pml4) & PAGE_MASK) == 0);
 
-       OSAddAtomic(1,  &inuse_ptepages_count);
+       memset((char *)p->pm_pml4, 0, PAGE_SIZE);
+
+       if (flags & PMAP_CREATE_EPT) {
+               p->pm_eptp = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
+               p->pm_cr3 = 0;
+       } else {
+               p->pm_eptp = 0;
+               p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
+       }
 
        /* allocate the vm_objs to hold the pdpt, pde and pte pages */
 
-       p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS));
+       p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS) * PAGE_SIZE);
        if (NULL == p->pm_obj_pml4)
                panic("pmap_create pdpt obj");
 
-       p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS));
+       p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS) * PAGE_SIZE);
        if (NULL == p->pm_obj_pdpt)
                panic("pmap_create pdpt obj");
 
-       p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS));
+       p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS) * PAGE_SIZE);
        if (NULL == p->pm_obj)
                panic("pmap_create pte obj");
 
-       /* All pmaps share the kennel's pml4 */
+       /* All pmaps share the kernel's pml4 */
        pml4 = pmap64_pml4(p, 0ULL);
        kpml4 = kernel_pmap->pm_pml4;
        pml4[KERNEL_PML4_INDEX]    = kpml4[KERNEL_PML4_INDEX];
        pml4[KERNEL_KEXTS_INDEX]   = kpml4[KERNEL_KEXTS_INDEX];
-       pml4[KERNEL_PHYSMAP_INDEX] = kpml4[KERNEL_PHYSMAP_INDEX];
+       pml4[KERNEL_PHYSMAP_PML4_INDEX] = kpml4[KERNEL_PHYSMAP_PML4_INDEX];
 
        PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
-                  p, is_64bit, 0, 0, 0);
+                  p, flags, 0, 0, 0);
 
        return(p);
 }
 
+pmap_t
+pmap_create(
+       ledger_t        ledger,
+       vm_map_size_t   sz,
+       boolean_t       is_64bit)
+{
+       return pmap_create_options(ledger, sz, ((is_64bit) ? PMAP_CREATE_64BIT : 0));
+}
+
 /*
  *     Retire the given physical map from service.
  *     Should only be called if the map contains
  *     no valid mappings.
  */
+extern int vm_wired_objects_page_count;
 
 void
-pmap_destroy(
-       register pmap_t p)
+pmap_destroy(pmap_t    p)
 {
-       register int            c;
+       int             c;
 
        if (p == PMAP_NULL)
                return;
@@ -1405,6 +1378,8 @@ pmap_destroy(
 
        c = --p->ref_count;
 
+       pmap_assert((current_thread() && (current_thread()->map)) ? (current_thread()->map->pmap != p) : TRUE);
+
        if (c == 0) {
                /* 
                 * If some cpu is not using the physical pmap pointer that it
@@ -1413,6 +1388,8 @@ pmap_destroy(
                 * physically on the right pmap:
                 */
                PMAP_UPDATE_TLBS(p, 0x0ULL, 0xFFFFFFFFFFFFF000ULL);
+               if (pmap_pcid_ncpus)
+                       pmap_destroy_pcid_sync(p);
        }
 
        PMAP_UNLOCK(p);
@@ -1420,6 +1397,7 @@ pmap_destroy(
        if (c != 0) {
                PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
                           p, 1, 0, 0, 0);
+               pmap_assert(p == kernel_pmap);
                return; /* still in use */
        }
 
@@ -1429,8 +1407,7 @@ pmap_destroy(
         */
        int inuse_ptepages = 0;
 
-       inuse_ptepages++;
-       kmem_free(kernel_map, (vm_offset_t)p->pm_pml4, PAGE_SIZE);
+       zfree(pmap_anchor_zone, p->pm_pml4);
 
        inuse_ptepages += p->pm_obj_pml4->resident_page_count;
        vm_object_deallocate(p->pm_obj_pml4);
@@ -1442,7 +1419,8 @@ pmap_destroy(
        vm_object_deallocate(p->pm_obj);
 
        OSAddAtomic(-inuse_ptepages,  &inuse_ptepages_count);
-
+       PMAP_ZINFO_PFREE(p, inuse_ptepages * PAGE_SIZE);
+       ledger_dereference(p->ledger);
        zfree(pmap_zone, p);
 
        PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
@@ -1464,444 +1442,30 @@ pmap_reference(pmap_t  p)
 }
 
 /*
- *     Remove a range of hardware page-table entries.
- *     The entries given are the first (inclusive)
- *     and last (exclusive) entries for the VM pages.
- *     The virtual address is the va for the first pte.
+ *     Remove phys addr if mapped in specified map
  *
- *     The pmap must be locked.
- *     If the pmap is not the kernel pmap, the range must lie
- *     entirely within one pte-page.  This is NOT checked.
- *     Assumes that the pte-page exists.
  */
-
 void
-pmap_remove_range(
-       pmap_t                  pmap,
-       vm_map_offset_t         start_vaddr,
-       pt_entry_t              *spte,
-       pt_entry_t              *epte)
+pmap_remove_some_phys(
+       __unused pmap_t         map,
+       __unused ppnum_t         pn)
 {
-       pt_entry_t              *cpte;
-       pv_hashed_entry_t       pvh_et = PV_HASHED_ENTRY_NULL;
-       pv_hashed_entry_t       pvh_eh = PV_HASHED_ENTRY_NULL;
-       pv_hashed_entry_t       pvh_e;
-       int                     pvh_cnt = 0;
-       int                     num_removed, num_unwired, num_found;
-       int                     pai;
-       pmap_paddr_t            pa;
-       vm_map_offset_t         vaddr;
 
-       num_removed = 0;
-       num_unwired = 0;
-       num_found   = 0;
+/* Implement to support working set code */
 
-       /* invalidate the PTEs first to "freeze" them */
-       for (cpte = spte, vaddr = start_vaddr;
-            cpte < epte;
-            cpte++, vaddr += PAGE_SIZE_64) {
+}
 
-               pa = pte_to_pa(*cpte);
-               if (pa == 0)
-                       continue;
-               num_found++;
 
-               if (iswired(*cpte))
-                       num_unwired++;
+void
+pmap_protect(
+       pmap_t          map,
+       vm_map_offset_t sva,
+       vm_map_offset_t eva,
+       vm_prot_t       prot)
+{
+       pmap_protect_options(map, sva, eva, prot, 0, NULL);
+}
 
-               pai = pa_index(pa);
-
-               if (!IS_MANAGED_PAGE(pai)) {
-                       /*
-                        *      Outside range of managed physical memory.
-                        *      Just remove the mappings.
-                        */
-                       pmap_store_pte(cpte, 0);
-                       continue;
-               }
-
-               /* invalidate the PTE */ 
-               pmap_update_pte(cpte, *cpte, (*cpte & ~INTEL_PTE_VALID));
-       }
-
-       if (num_found == 0) {
-               /* nothing was changed: we're done */
-               goto update_counts;
-       }
-
-       /* propagate the invalidates to other CPUs */
-
-       PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
-
-       for (cpte = spte, vaddr = start_vaddr;
-            cpte < epte;
-            cpte++, vaddr += PAGE_SIZE_64) {
-
-               pa = pte_to_pa(*cpte);
-               if (pa == 0)
-                       continue;
-
-               pai = pa_index(pa);
-
-               LOCK_PVH(pai);
-
-               pa = pte_to_pa(*cpte);
-               if (pa == 0) {
-                       UNLOCK_PVH(pai);
-                       continue;
-               }
-               num_removed++;
-
-               /*
-                * Get the modify and reference bits, then
-                * nuke the entry in the page table
-                */
-               /* remember reference and change */
-               pmap_phys_attributes[pai] |=
-                       (char) (*cpte & (PHYS_MODIFIED | PHYS_REFERENCED));
-               /* completely invalidate the PTE */
-               pmap_store_pte(cpte, 0);
-
-               /*
-                * Remove the mapping from the pvlist for this physical page.
-                */
-               pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t) pai);
-
-               UNLOCK_PVH(pai);
-
-               if (pvh_e != PV_HASHED_ENTRY_NULL) {
-                       pvh_e->qlink.next = (queue_entry_t) pvh_eh;
-                       pvh_eh = pvh_e;
-
-                       if (pvh_et == PV_HASHED_ENTRY_NULL) {
-                               pvh_et = pvh_e;
-                       }
-                       pvh_cnt++;
-               }
-       } /* for loop */
-
-       if (pvh_eh != PV_HASHED_ENTRY_NULL) {
-               PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
-       }
-update_counts:
-       /*
-        *      Update the counts
-        */
-#if TESTING
-       if (pmap->stats.resident_count < num_removed)
-               panic("pmap_remove_range: resident_count");
-#endif
-       assert(pmap->stats.resident_count >= num_removed);
-       OSAddAtomic(-num_removed,  &pmap->stats.resident_count);
-
-#if TESTING
-       if (pmap->stats.wired_count < num_unwired)
-               panic("pmap_remove_range: wired_count");
-#endif
-       assert(pmap->stats.wired_count >= num_unwired);
-       OSAddAtomic(-num_unwired,  &pmap->stats.wired_count);
-
-       return;
-}
-
-/*
- *     Remove phys addr if mapped in specified map
- *
- */
-void
-pmap_remove_some_phys(
-       __unused pmap_t         map,
-       __unused ppnum_t         pn)
-{
-
-/* Implement to support working set code */
-
-}
-
-/*
- *     Remove the given range of addresses
- *     from the specified map.
- *
- *     It is assumed that the start and end are properly
- *     rounded to the hardware page size.
- */
-void
-pmap_remove(
-       pmap_t          map,
-       addr64_t        s64,
-       addr64_t        e64)
-{
-       pt_entry_t     *pde;
-       pt_entry_t     *spte, *epte;
-       addr64_t        l64;
-       uint64_t        deadline;
-
-       pmap_intr_assert();
-
-       if (map == PMAP_NULL || s64 == e64)
-               return;
-
-       PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START,
-                  map,
-                  (uint32_t) (s64 >> 32), s64,
-                  (uint32_t) (e64 >> 32), e64);
-
-
-       PMAP_LOCK(map);
-
-#if 0
-       /*
-        * Check that address range in the kernel does not overlap the stacks.
-        * We initialize local static min/max variables once to avoid making
-        * 2 function calls for every remove. Note also that these functions
-        * both return 0 before kernel stacks have been initialized, and hence
-        * the panic is not triggered in this case.
-        */
-       if (map == kernel_pmap) {
-               static vm_offset_t kernel_stack_min = 0;
-               static vm_offset_t kernel_stack_max = 0;
-
-               if (kernel_stack_min == 0) {
-                       kernel_stack_min = min_valid_stack_address();
-                       kernel_stack_max = max_valid_stack_address();
-               }
-               if ((kernel_stack_min <= s64 && s64 < kernel_stack_max) ||
-                   (kernel_stack_min < e64 && e64 <= kernel_stack_max))
-                       panic("pmap_remove() attempted in kernel stack");
-       }
-#else
-
-       /*
-        * The values of kernel_stack_min and kernel_stack_max are no longer
-        * relevant now that we allocate kernel stacks in the kernel map,
-        * so the old code above no longer applies.  If we wanted to check that
-        * we weren't removing a mapping of a page in a kernel stack we'd 
-        * mark the PTE with an unused bit and check that here.
-        */
-
-#endif
-
-       deadline = rdtsc64() + max_preemption_latency_tsc;
-
-       while (s64 < e64) {
-               l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size - 1);
-               if (l64 > e64)
-                       l64 = e64;
-               pde = pmap_pde(map, s64);
-
-               if (pde && (*pde & INTEL_PTE_VALID)) {
-                       if (*pde & INTEL_PTE_PS) {
-                               /*
-                                * If we're removing a superpage, pmap_remove_range()
-                                * must work on level 2 instead of level 1; and we're
-                                * only passing a single level 2 entry instead of a
-                                * level 1 range.
-                                */
-                               spte = pde;
-                               epte = spte+1; /* excluded */
-                       } else {
-                               spte = pmap_pte(map, (s64 & ~(pde_mapped_size - 1)));
-                               spte = &spte[ptenum(s64)];
-                               epte = &spte[intel_btop(l64 - s64)];
-                       }
-                       pmap_remove_range(map, s64, spte, epte);
-               }
-               s64 = l64;
-               pde++;
-
-               if (s64 < e64 && rdtsc64() >= deadline) {
-                       PMAP_UNLOCK(map)
-                       PMAP_LOCK(map)
-                       deadline = rdtsc64() + max_preemption_latency_tsc;
-               }
-       }
-
-       PMAP_UNLOCK(map);
-
-       PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_END,
-                  map, 0, 0, 0, 0);
-
-}
-
-/*
- *     Routine:        pmap_page_protect
- *
- *     Function:
- *             Lower the permission for all mappings to a given
- *             page.
- */
-void
-pmap_page_protect(
-        ppnum_t         pn,
-       vm_prot_t       prot)
-{
-       pv_hashed_entry_t       pvh_eh = PV_HASHED_ENTRY_NULL;
-       pv_hashed_entry_t       pvh_et = PV_HASHED_ENTRY_NULL;
-       pv_hashed_entry_t       nexth;
-       int                     pvh_cnt = 0;
-       pv_rooted_entry_t       pv_h;
-       pv_rooted_entry_t       pv_e;
-       pv_hashed_entry_t       pvh_e;
-       pt_entry_t              *pte;
-       int                     pai;
-       pmap_t                  pmap;
-       boolean_t               remove;
-
-       pmap_intr_assert();
-       assert(pn != vm_page_fictitious_addr);
-       if (pn == vm_page_guard_addr)
-               return;
-
-       pai = ppn_to_pai(pn);
-
-       if (!IS_MANAGED_PAGE(pai)) {
-               /*
-                *      Not a managed page.
-                */
-               return;
-       }
-       PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_START,
-                  pn, prot, 0, 0, 0);
-
-       /*
-        * Determine the new protection.
-        */
-       switch (prot) {
-       case VM_PROT_READ:
-       case VM_PROT_READ | VM_PROT_EXECUTE:
-               remove = FALSE;
-               break;
-       case VM_PROT_ALL:
-               return;         /* nothing to do */
-       default:
-               remove = TRUE;
-               break;
-       }
-
-       pv_h = pai_to_pvh(pai);
-
-       LOCK_PVH(pai);
-
-
-       /*
-        * Walk down PV list, if any, changing or removing all mappings.
-        */
-       if (pv_h->pmap == PMAP_NULL)
-               goto done;
-
-       pv_e = pv_h;
-       pvh_e = (pv_hashed_entry_t) pv_e;       /* cheat */
-
-       do {
-               vm_map_offset_t vaddr;
-
-               pmap = pv_e->pmap;
-               vaddr = pv_e->va;
-               pte = pmap_pte(pmap, vaddr);
-               if (0 == pte) {
-                       panic("pmap_page_protect() "
-                               "pmap=%p pn=0x%x vaddr=0x%llx\n",
-                               pmap, pn, vaddr);
-               }
-               nexth = (pv_hashed_entry_t) queue_next(&pvh_e->qlink);
-
-               /*
-                * Remove the mapping if new protection is NONE
-                * or if write-protecting a kernel mapping.
-                */
-               if (remove || pmap == kernel_pmap) {
-                       /*
-                        * Remove the mapping, collecting dirty bits.
-                        */
-                       pmap_update_pte(pte, *pte, *pte & ~INTEL_PTE_VALID);
-                       PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
-                       pmap_phys_attributes[pai] |=
-                               *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-                       pmap_store_pte(pte, 0);
-
-#if TESTING
-                       if (pmap->stats.resident_count < 1)
-                               panic("pmap_page_protect: resident_count");
-#endif
-                       assert(pmap->stats.resident_count >= 1);
-                       OSAddAtomic(-1,  &pmap->stats.resident_count);
-
-                       /*
-                        * Deal with the pv_rooted_entry.
-                        */
-
-                       if (pv_e == pv_h) {
-                               /*
-                                * Fix up head later.
-                                */
-                               pv_h->pmap = PMAP_NULL;
-                       } else {
-                               /*
-                                * Delete this entry.
-                                */
-                               pv_hash_remove(pvh_e);
-                               pvh_e->qlink.next = (queue_entry_t) pvh_eh;
-                               pvh_eh = pvh_e;
-
-                               if (pvh_et == PV_HASHED_ENTRY_NULL)
-                                       pvh_et = pvh_e;
-                               pvh_cnt++;
-                       }
-               } else {
-                       /*
-                        * Write-protect.
-                        */
-                       pmap_update_pte(pte, *pte, *pte & ~INTEL_PTE_WRITE);
-                       PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
-               }
-               pvh_e = nexth;
-       } while ((pv_e = (pv_rooted_entry_t) nexth) != pv_h);
-
-
-       /*
-         * If pv_head mapping was removed, fix it up.
-         */
-       if (pv_h->pmap == PMAP_NULL) {
-               pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink);
-
-               if (pvh_e != (pv_hashed_entry_t) pv_h) {
-                       pv_hash_remove(pvh_e);
-                       pv_h->pmap = pvh_e->pmap;
-                       pv_h->va = pvh_e->va;
-                       pvh_e->qlink.next = (queue_entry_t) pvh_eh;
-                       pvh_eh = pvh_e;
-
-                       if (pvh_et == PV_HASHED_ENTRY_NULL)
-                               pvh_et = pvh_e;
-                       pvh_cnt++;
-               }
-       }
-       if (pvh_eh != PV_HASHED_ENTRY_NULL) {
-               PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
-       }
-done:
-       UNLOCK_PVH(pai);
-
-       PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_END,
-                  0, 0, 0, 0, 0);
-}
-
-
-/*
- *     Routine:
- *             pmap_disconnect
- *
- *     Function:
- *             Disconnect all mappings for this page and return reference and change status
- *             in generic format.
- *
- */
-unsigned int pmap_disconnect(
-       ppnum_t pa)
-{
-       pmap_page_protect(pa, 0);               /* disconnect the page */
-       return (pmap_get_refmod(pa));           /* return ref/chg status */
-}
 
 /*
  *     Set the physical protection on the
@@ -1909,11 +1473,13 @@ unsigned int pmap_disconnect(
  *     Will not increase permissions.
  */
 void
-pmap_protect(
+pmap_protect_options(
        pmap_t          map,
        vm_map_offset_t sva,
        vm_map_offset_t eva,
-       vm_prot_t       prot)
+       vm_prot_t       prot,
+       unsigned int    options,
+       void            *arg)
 {
        pt_entry_t      *pde;
        pt_entry_t      *spte, *epte;
@@ -1921,6 +1487,7 @@ pmap_protect(
        vm_map_offset_t orig_sva;
        boolean_t       set_NX;
        int             num_found = 0;
+       boolean_t       is_ept;
 
        pmap_intr_assert();
 
@@ -1928,7 +1495,7 @@ pmap_protect(
                return;
 
        if (prot == VM_PROT_NONE) {
-               pmap_remove(map, sva, eva);
+               pmap_remove_options(map, sva, eva, options);
                return;
        }
        PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
@@ -1941,6 +1508,9 @@ pmap_protect(
        else
                set_NX = TRUE;
 
+       is_ept = is_ept_pmap(map);
+
+
        PMAP_LOCK(map);
 
        orig_sva = sva;
@@ -1949,8 +1519,8 @@ pmap_protect(
                if (lva > eva)
                        lva = eva;
                pde = pmap_pde(map, sva);
-               if (pde && (*pde & INTEL_PTE_VALID)) {
-                       if (*pde & INTEL_PTE_PS) {
+               if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
+                       if (*pde & PTE_PS) {
                                /* superpage */
                                spte = pde;
                                epte = spte+1; /* excluded */
@@ -1961,31 +1531,42 @@ pmap_protect(
                        }
 
                        for (; spte < epte; spte++) {
-                               if (!(*spte & INTEL_PTE_VALID))
+                               if (!(*spte & PTE_VALID_MASK(is_ept)))
                                        continue;
 
+                               if (is_ept) {
+                                       if (prot & VM_PROT_READ)
+                                               pmap_update_pte(spte, 0, PTE_READ(is_ept));
+                                       else
+                                               pmap_update_pte(spte, PTE_READ(is_ept), 0);
+                               }
                                if (prot & VM_PROT_WRITE)
-                                       pmap_update_pte(spte, *spte,
-                                               *spte | INTEL_PTE_WRITE);
+                                       pmap_update_pte(spte, 0, PTE_WRITE(is_ept));
                                else
-                                       pmap_update_pte(spte, *spte,
-                                               *spte & ~INTEL_PTE_WRITE);
-
-                               if (set_NX)
-                                       pmap_update_pte(spte, *spte,
-                                               *spte | INTEL_PTE_NX);
-                               else
-                                       pmap_update_pte(spte, *spte,
-                                               *spte & ~INTEL_PTE_NX);
-
+                                       pmap_update_pte(spte, PTE_WRITE(is_ept), 0);
+
+                               if (set_NX) {
+                                       if (!is_ept)
+                                               pmap_update_pte(spte, 0, INTEL_PTE_NX);
+                                       else
+                                               pmap_update_pte(spte, INTEL_EPT_EX, 0);
+                               } else {
+                                       if (!is_ept)
+                                               pmap_update_pte(spte, INTEL_PTE_NX, 0);
+                                       else
+                                               pmap_update_pte(spte, 0, INTEL_EPT_EX);
+                               }
                                num_found++;
                        }
                }
                sva = lva;
        }
-       if (num_found)
-               PMAP_UPDATE_TLBS(map, orig_sva, eva);
-
+       if (num_found) {
+               if (options & PMAP_OPTIONS_NOFLUSH)
+                       PMAP_UPDATE_TLBS_DELAYED(map, orig_sva, eva, (pmap_flush_context *)arg);
+               else
+                       PMAP_UPDATE_TLBS(map, orig_sva, eva);
+       }
        PMAP_UNLOCK(map);
 
        PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END,
@@ -2013,462 +1594,35 @@ pmap_map_block(
                cur_page_size =  PAGE_SIZE;
 
        for (page = 0; page < size; page+=cur_page_size/PAGE_SIZE) {
-               pmap_enter(pmap, va, pa, prot, attr, TRUE);
+               pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE);
                va += cur_page_size;
                pa+=cur_page_size/PAGE_SIZE;
        }
 }
 
-
-/*
- *     Insert the given physical page (p) at
- *     the specified virtual address (v) in the
- *     target physical map with the protection requested.
- *
- *     If specified, the page will be wired down, meaning
- *     that the related pte cannot be reclaimed.
- *
- *     NB:  This is the only routine which MAY NOT lazy-evaluate
- *     or lose information.  That is, this routine must actually
- *     insert this page into the given map NOW.
- */
-void
-pmap_enter(
-       register pmap_t         pmap,
-       vm_map_offset_t         vaddr,
-       ppnum_t                 pn,
-       vm_prot_t               prot,
-       unsigned int            flags,
-       boolean_t               wired)
-{
-       pt_entry_t              *pte;
-       pv_rooted_entry_t       pv_h;
-       int                     pai;
-       pv_hashed_entry_t       pvh_e;
-       pv_hashed_entry_t       pvh_new;
-       pt_entry_t              template;
-       pmap_paddr_t            old_pa;
-       pmap_paddr_t            pa = (pmap_paddr_t) i386_ptob(pn);
-       boolean_t               need_tlbflush = FALSE;
-       boolean_t               set_NX;
-       char                    oattr;
-       boolean_t               old_pa_locked;
-       boolean_t               superpage = flags & VM_MEM_SUPERPAGE;
-       vm_object_t             delpage_pm_obj = NULL;
-       int                     delpage_pde_index = 0;
-
-
-       pmap_intr_assert();
-       assert(pn != vm_page_fictitious_addr);
-       if (pmap_debug)
-               kprintf("pmap_enter(%p,%llu,%u)\n", pmap, vaddr, pn);
-       if (pmap == PMAP_NULL)
-               return;
-       if (pn == vm_page_guard_addr)
-               return;
-
-       PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START,
-                  pmap,
-                  (uint32_t) (vaddr >> 32), (uint32_t) vaddr,
-                  pn, prot);
-
-       if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled)
-               set_NX = FALSE;
-       else
-               set_NX = TRUE;
-
-       /*
-        *      Must allocate a new pvlist entry while we're unlocked;
-        *      zalloc may cause pageout (which will lock the pmap system).
-        *      If we determine we need a pvlist entry, we will unlock
-        *      and allocate one.  Then we will retry, throughing away
-        *      the allocated entry later (if we no longer need it).
-        */
-
-       pvh_new = PV_HASHED_ENTRY_NULL;
-Retry:
-       pvh_e = PV_HASHED_ENTRY_NULL;
-
-       PMAP_LOCK(pmap);
-
-       /*
-        *      Expand pmap to include this pte.  Assume that
-        *      pmap is always expanded to include enough hardware
-        *      pages to map one VM page.
-        */
-        if(superpage) {
-               while ((pte = pmap64_pde(pmap, vaddr)) == PD_ENTRY_NULL) {
-                       /* need room for another pde entry */
-                       PMAP_UNLOCK(pmap);
-                       pmap_expand_pdpt(pmap, vaddr);
-                       PMAP_LOCK(pmap);
-               }
-       } else {
-               while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
-                       /*
-                        * Must unlock to expand the pmap
-                        * going to grow pde level page(s)
-                        */
-                       PMAP_UNLOCK(pmap);
-                       pmap_expand(pmap, vaddr);
-                       PMAP_LOCK(pmap);
-               }
-       }
-
-       if (superpage && *pte && !(*pte & INTEL_PTE_PS)) {
-               /*
-                * There is still an empty page table mapped that
-                * was used for a previous base page mapping.
-                * Remember the PDE and the PDE index, so that we
-                * can free the page at the end of this function.
-                */
-               delpage_pde_index = (int)pdeidx(pmap, vaddr);
-               delpage_pm_obj = pmap->pm_obj;
-               *pte = 0;
-       }
-
-       old_pa = pte_to_pa(*pte);
-       pai = pa_index(old_pa);
-       old_pa_locked = FALSE;
-
-       /*
-        * if we have a previous managed page, lock the pv entry now. after
-        * we lock it, check to see if someone beat us to the lock and if so
-        * drop the lock
-        */
-       if ((0 != old_pa) && IS_MANAGED_PAGE(pai)) {
-               LOCK_PVH(pai);
-               old_pa_locked = TRUE;
-               old_pa = pte_to_pa(*pte);
-               if (0 == old_pa) {
-                       UNLOCK_PVH(pai);        /* another path beat us to it */
-                       old_pa_locked = FALSE;
-               }
-       }
-
-       /*
-        *      Special case if the incoming physical page is already mapped
-        *      at this address.
-        */
-       if (old_pa == pa) {
-
-               /*
-                *      May be changing its wired attribute or protection
-                */
-
-               template = pa_to_pte(pa) | INTEL_PTE_VALID;
-
-               if (VM_MEM_NOT_CACHEABLE ==
-                   (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
-                       if (!(flags & VM_MEM_GUARDED))
-                               template |= INTEL_PTE_PTA;
-                       template |= INTEL_PTE_NCACHE;
-               }
-               if (pmap != kernel_pmap)
-                       template |= INTEL_PTE_USER;
-               if (prot & VM_PROT_WRITE)
-                       template |= INTEL_PTE_WRITE;
-
-               if (set_NX)
-                       template |= INTEL_PTE_NX;
-
-               if (wired) {
-                       template |= INTEL_PTE_WIRED;
-                       if (!iswired(*pte))
-                               OSAddAtomic(+1,
-                                       &pmap->stats.wired_count);
-               } else {
-                       if (iswired(*pte)) {
-                               assert(pmap->stats.wired_count >= 1);
-                               OSAddAtomic(-1,
-                                       &pmap->stats.wired_count);
-                       }
-               }
-               if (superpage)          /* this path can not be used */
-                       template |= INTEL_PTE_PS;       /* to change the page size! */
-
-               /* store modified PTE and preserve RC bits */
-               pmap_update_pte(pte, *pte,
-                       template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
-               if (old_pa_locked) {
-                       UNLOCK_PVH(pai);
-                       old_pa_locked = FALSE;
-               }
-               need_tlbflush = TRUE;
-               goto Done;
-       }
-
-       /*
-        *      Outline of code from here:
-        *         1) If va was mapped, update TLBs, remove the mapping
-        *            and remove old pvlist entry.
-        *         2) Add pvlist entry for new mapping
-        *         3) Enter new mapping.
-        *
-        *      If the old physical page is not managed step 1) is skipped
-        *      (except for updating the TLBs), and the mapping is
-        *      overwritten at step 3).  If the new physical page is not
-        *      managed, step 2) is skipped.
-        */
-
-       if (old_pa != (pmap_paddr_t) 0) {
-
-               /*
-                *      Don't do anything to pages outside valid memory here.
-                *      Instead convince the code that enters a new mapping
-                *      to overwrite the old one.
-                */
-
-               /* invalidate the PTE */
-               pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
-               /* propagate invalidate everywhere */
-               PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
-               /* remember reference and change */
-               oattr = (char) (*pte & (PHYS_MODIFIED | PHYS_REFERENCED));
-               /* completely invalidate the PTE */
-               pmap_store_pte(pte, 0);
-
-               if (IS_MANAGED_PAGE(pai)) {
-#if TESTING
-                       if (pmap->stats.resident_count < 1)
-                               panic("pmap_enter: resident_count");
-#endif
-                       assert(pmap->stats.resident_count >= 1);
-                       OSAddAtomic(-1,
-                               &pmap->stats.resident_count);
-
-                       if (iswired(*pte)) {
-#if TESTING
-                               if (pmap->stats.wired_count < 1)
-                                       panic("pmap_enter: wired_count");
-#endif
-                               assert(pmap->stats.wired_count >= 1);
-                               OSAddAtomic(-1,
-                                       &pmap->stats.wired_count);
-                       }
-                       pmap_phys_attributes[pai] |= oattr;
-
-                       /*
-                        *      Remove the mapping from the pvlist for
-                        *      this physical page.
-                        *      We'll end up with either a rooted pv or a
-                        *      hashed pv
-                        */
-                       pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t) pai);
-
-               } else {
-
-                       /*
-                        *      old_pa is not managed.
-                        *      Do removal part of accounting.
-                        */
-
-                       if (iswired(*pte)) {
-                               assert(pmap->stats.wired_count >= 1);
-                               OSAddAtomic(-1,
-                                       &pmap->stats.wired_count);
-                       }
-               }
-       }
-
-       /*
-        * if we had a previously managed paged locked, unlock it now
-        */
-       if (old_pa_locked) {
-               UNLOCK_PVH(pai);
-               old_pa_locked = FALSE;
-       }
-
-       pai = pa_index(pa);     /* now working with new incoming phys page */
-       if (IS_MANAGED_PAGE(pai)) {
-
-               /*
-                *      Step 2) Enter the mapping in the PV list for this
-                *      physical page.
-                */
-               pv_h = pai_to_pvh(pai);
-
-               LOCK_PVH(pai);
-
-               if (pv_h->pmap == PMAP_NULL) {
-                       /*
-                        *      No mappings yet, use rooted pv
-                        */
-                       pv_h->va = vaddr;
-                       pv_h->pmap = pmap;
-                       queue_init(&pv_h->qlink);
-               } else {
-                       /*
-                        *      Add new pv_hashed_entry after header.
-                        */
-                       if ((PV_HASHED_ENTRY_NULL == pvh_e) && pvh_new) {
-                               pvh_e = pvh_new;
-                               pvh_new = PV_HASHED_ENTRY_NULL;
-                       } else if (PV_HASHED_ENTRY_NULL == pvh_e) {
-                               PV_HASHED_ALLOC(pvh_e);
-                               if (PV_HASHED_ENTRY_NULL == pvh_e) {
-                                       /*
-                                        * the pv list is empty. if we are on
-                                        * the kernel pmap we'll use one of
-                                        * the special private kernel pv_e's,
-                                        * else, we need to unlock
-                                        * everything, zalloc a pv_e, and
-                                        * restart bringing in the pv_e with
-                                        * us.
-                                        */
-                                       if (kernel_pmap == pmap) {
-                                               PV_HASHED_KERN_ALLOC(pvh_e);
-                                       } else {
-                                               UNLOCK_PVH(pai);
-                                               PMAP_UNLOCK(pmap);
-                                               pvh_new = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
-                                               goto Retry;
-                                       }
-                               }
-                       }
-                       if (PV_HASHED_ENTRY_NULL == pvh_e)
-                               panic("pvh_e exhaustion");
-
-                       pvh_e->va = vaddr;
-                       pvh_e->pmap = pmap;
-                       pvh_e->ppn = pn;
-                       pv_hash_add(pvh_e, pv_h);
-
-                       /*
-                        *      Remember that we used the pvlist entry.
-                        */
-                       pvh_e = PV_HASHED_ENTRY_NULL;
-               }
-
-               /*
-                * only count the mapping
-                * for 'managed memory'
-                */
-               OSAddAtomic(+1,  & pmap->stats.resident_count);
-               if (pmap->stats.resident_count > pmap->stats.resident_max) {
-                       pmap->stats.resident_max = pmap->stats.resident_count;
-               }
-       }
-       /*
-        * Step 3) Enter the mapping.
-        *
-        *      Build a template to speed up entering -
-        *      only the pfn changes.
-        */
-       template = pa_to_pte(pa) | INTEL_PTE_VALID;
-
-       if (flags & VM_MEM_NOT_CACHEABLE) {
-               if (!(flags & VM_MEM_GUARDED))
-                       template |= INTEL_PTE_PTA;
-               template |= INTEL_PTE_NCACHE;
-       }
-       if (pmap != kernel_pmap)
-               template |= INTEL_PTE_USER;
-       if (prot & VM_PROT_WRITE)
-               template |= INTEL_PTE_WRITE;
-       if (set_NX)
-               template |= INTEL_PTE_NX;
-       if (wired) {
-               template |= INTEL_PTE_WIRED;
-               OSAddAtomic(+1,  & pmap->stats.wired_count);
-       }
-       if (superpage)
-               template |= INTEL_PTE_PS;
-       pmap_store_pte(pte, template);
-
-       /*
-        * if this was a managed page we delayed unlocking the pv until here
-        * to prevent pmap_page_protect et al from finding it until the pte
-        * has been stored
-        */
-       if (IS_MANAGED_PAGE(pai)) {
-               UNLOCK_PVH(pai);
-       }
-Done:
-       if (need_tlbflush == TRUE)
-               PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
-
-       if (pvh_e != PV_HASHED_ENTRY_NULL) {
-               PV_HASHED_FREE_LIST(pvh_e, pvh_e, 1);
-       }
-       if (pvh_new != PV_HASHED_ENTRY_NULL) {
-               PV_HASHED_KERN_FREE_LIST(pvh_new, pvh_new, 1);
-       }
-       PMAP_UNLOCK(pmap);
-
-       if (delpage_pm_obj) {
-               vm_page_t m;
-
-               vm_object_lock(delpage_pm_obj);
-               m = vm_page_lookup(delpage_pm_obj, delpage_pde_index);
-               if (m == VM_PAGE_NULL)
-                   panic("pmap_enter: pte page not in object");
-               VM_PAGE_FREE(m);
-               OSAddAtomic(-1,  &inuse_ptepages_count);
-               vm_object_unlock(delpage_pm_obj);
-       }
-
-       PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
-}
-
-/*
- *     Routine:        pmap_change_wiring
- *     Function:       Change the wiring attribute for a map/virtual-address
- *                     pair.
- *     In/out conditions:
- *                     The mapping must already exist in the pmap.
- */
-void
-pmap_change_wiring(
-       pmap_t          map,
-       vm_map_offset_t vaddr,
-       boolean_t       wired)
-{
-       pt_entry_t      *pte;
-
-       PMAP_LOCK(map);
-
-       if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL)
-               panic("pmap_change_wiring: pte missing");
-
-       if (wired && !iswired(*pte)) {
-               /*
-                * wiring down mapping
-                */
-               OSAddAtomic(+1,  &map->stats.wired_count);
-               pmap_update_pte(pte, *pte, (*pte | INTEL_PTE_WIRED));
-       }
-       else if (!wired && iswired(*pte)) {
-               /*
-                * unwiring mapping
-                */
-               assert(map->stats.wired_count >= 1);
-               OSAddAtomic(-1,  &map->stats.wired_count);
-               pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WIRED));
-       }
-
-       PMAP_UNLOCK(map);
-}
-
-void
+kern_return_t
 pmap_expand_pml4(
        pmap_t          map,
-       vm_map_offset_t vaddr)
+       vm_map_offset_t vaddr,
+       unsigned int options)
 {
        vm_page_t       m;
        pmap_paddr_t    pa;
        uint64_t        i;
        ppnum_t         pn;
        pml4_entry_t    *pml4p;
+       boolean_t       is_ept = is_ept_pmap(map);
 
        DBG("pmap_expand_pml4(%p,%p)\n", map, (void *)vaddr);
 
        /*
         *      Allocate a VM page for the pml4 page
         */
-       while ((m = vm_page_grab()) == VM_PAGE_NULL)
+       while ((m = vm_page_grab()) == VM_PAGE_NULL) {
+               if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
+                       return KERN_RESOURCE_SHORTAGE;
                VM_PAGE_WAIT();
-
+       }
        /*
         *      put the page into the pmap's obj list so it
         *      can be found later.
@@ -2483,10 +1637,12 @@ pmap_expand_pml4(
        pmap_zero_page(pn);
 
        vm_page_lockspin_queues();
-       vm_page_wire(m);
+       vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
        vm_page_unlock_queues();
 
        OSAddAtomic(1,  &inuse_ptepages_count);
+       OSAddAtomic64(1,  &alloc_ptepages_count);
+       PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
 
        /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
        vm_object_lock(map->pm_obj_pml4);
@@ -2502,16 +1658,17 @@ pmap_expand_pml4(
                VM_PAGE_FREE(m);
 
                OSAddAtomic(-1,  &inuse_ptepages_count);
-               return;
+               PMAP_ZINFO_PFREE(map, PAGE_SIZE);
+               return KERN_SUCCESS;
        }
 
 #if 0 /* DEBUG */
-       if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i)) {
+       if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE)) {
               panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
                     map, map->pm_obj_pml4, vaddr, i);
        }
 #endif
-       vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i);
+       vm_page_insert_wired(m, map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
        vm_object_unlock(map->pm_obj_pml4);
 
        /*
@@ -2520,37 +1677,41 @@ pmap_expand_pml4(
        pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
 
        pmap_store_pte(pml4p, pa_to_pte(pa)
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
 
        PMAP_UNLOCK(map);
 
-       return;
+       return KERN_SUCCESS;
 }
 
-void
-pmap_expand_pdpt(
-                pmap_t map,
-                vm_map_offset_t vaddr)
+kern_return_t
+pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
 {
        vm_page_t       m;
        pmap_paddr_t    pa;
        uint64_t        i;
        ppnum_t         pn;
        pdpt_entry_t    *pdptp;
+       boolean_t       is_ept = is_ept_pmap(map);
 
        DBG("pmap_expand_pdpt(%p,%p)\n", map, (void *)vaddr);
 
        while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
-               pmap_expand_pml4(map, vaddr);
+               kern_return_t pep4kr = pmap_expand_pml4(map, vaddr, options);
+               if (pep4kr != KERN_SUCCESS)
+                       return pep4kr;
        }
 
        /*
         *      Allocate a VM page for the pdpt page
         */
-       while ((m = vm_page_grab()) == VM_PAGE_NULL)
+       while ((m = vm_page_grab()) == VM_PAGE_NULL) {
+               if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
+                       return KERN_RESOURCE_SHORTAGE;
                VM_PAGE_WAIT();
+       }
 
        /*
         *      put the page into the pmap's obj list so it
@@ -2566,10 +1727,12 @@ pmap_expand_pdpt(
        pmap_zero_page(pn);
 
        vm_page_lockspin_queues();
-       vm_page_wire(m);
+       vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
        vm_page_unlock_queues();
 
        OSAddAtomic(1,  &inuse_ptepages_count);
+       OSAddAtomic64(1,  &alloc_ptepages_count);
+       PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
 
        /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
        vm_object_lock(map->pm_obj_pdpt);
@@ -2585,16 +1748,17 @@ pmap_expand_pdpt(
                VM_PAGE_FREE(m);
 
                OSAddAtomic(-1,  &inuse_ptepages_count);
-               return;
+               PMAP_ZINFO_PFREE(map, PAGE_SIZE);
+               return KERN_SUCCESS;
        }
 
 #if 0 /* DEBUG */
-       if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i)) {
+       if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE)) {
               panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
                     map, map->pm_obj_pdpt, vaddr, i);
        }
 #endif
-       vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i);
+       vm_page_insert_wired(m, map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
        vm_object_unlock(map->pm_obj_pdpt);
 
        /*
@@ -2603,13 +1767,13 @@ pmap_expand_pdpt(
        pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
 
        pmap_store_pte(pdptp, pa_to_pte(pa)
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
 
        PMAP_UNLOCK(map);
 
-       return;
+       return KERN_SUCCESS;
 
 }
 
@@ -2630,16 +1794,18 @@ pmap_expand_pdpt(
  *     has been expanded enough.
  *     (We won't loop forever, since page tables aren't shrunk.)
  */
-void
+kern_return_t
 pmap_expand(
        pmap_t          map,
-       vm_map_offset_t vaddr)
+       vm_map_offset_t vaddr,
+       unsigned int options)
 {
        pt_entry_t              *pdp;
        register vm_page_t      m;
        register pmap_paddr_t   pa;
        uint64_t                i;
        ppnum_t                 pn;
+       boolean_t               is_ept = is_ept_pmap(map);
 
 
        /*
@@ -2653,15 +1819,19 @@ pmap_expand(
 
 
        while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
-               /* need room for another pde entry */
-               pmap_expand_pdpt(map, vaddr);
+               kern_return_t pepkr = pmap_expand_pdpt(map, vaddr, options);
+               if (pepkr != KERN_SUCCESS)
+                       return pepkr;
        }
 
        /*
         *      Allocate a VM page for the pde entries.
         */
-       while ((m = vm_page_grab()) == VM_PAGE_NULL)
+       while ((m = vm_page_grab()) == VM_PAGE_NULL) {
+               if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
+                       return KERN_RESOURCE_SHORTAGE;
                VM_PAGE_WAIT();
+       }
 
        /*
         *      put the page into the pmap's obj list so it
@@ -2677,10 +1847,12 @@ pmap_expand(
        pmap_zero_page(pn);
 
        vm_page_lockspin_queues();
-       vm_page_wire(m);
+       vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
        vm_page_unlock_queues();
 
        OSAddAtomic(1,  &inuse_ptepages_count);
+       OSAddAtomic64(1,  &alloc_ptepages_count);
+       PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
 
        /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
        vm_object_lock(map->pm_obj);
@@ -2697,16 +1869,17 @@ pmap_expand(
                VM_PAGE_FREE(m);
 
                OSAddAtomic(-1,  &inuse_ptepages_count);
-               return;
+               PMAP_ZINFO_PFREE(map, PAGE_SIZE);
+               return KERN_SUCCESS;
        }
 
 #if 0 /* DEBUG */
-       if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i)) {
+       if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE)) {
               panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
                     map, map->pm_obj, vaddr, i);
        }
 #endif
-       vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i);
+       vm_page_insert_wired(m, map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
        vm_object_unlock(map->pm_obj);
 
        /*
@@ -2714,13 +1887,13 @@ pmap_expand(
         */
        pdp = pmap_pde(map, vaddr);
        pmap_store_pte(pdp, pa_to_pte(pa)
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
 
        PMAP_UNLOCK(map);
 
-       return;
+       return KERN_SUCCESS;
 }
 
 /* On K64 machines with more than 32GB of memory, pmap_steal_memory
@@ -2729,14 +1902,16 @@ pmap_expand(
  * that pmap_steal_memory uses, rather than calling vm_page_grab (which
  * isn't available yet). */
 void
-pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr) {
+pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr)
+{
        ppnum_t pn;
        pt_entry_t              *pte;
+       boolean_t               is_ept = is_ept_pmap(pmap);
 
        PMAP_LOCK(pmap);
 
        if(pmap64_pdpt(pmap, vaddr) == PDPT_ENTRY_NULL) {
-               if (!pmap_next_page_k64(&pn))
+               if (!pmap_next_page_hi(&pn))
                        panic("pmap_pre_expand");
 
                pmap_zero_page(pn);
@@ -2744,13 +1919,13 @@ pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr) {
                pte = pmap64_pml4(pmap, vaddr);
 
                pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
        }
 
        if(pmap64_pde(pmap, vaddr) == PD_ENTRY_NULL) {
-               if (!pmap_next_page_k64(&pn))
+               if (!pmap_next_page_hi(&pn))
                        panic("pmap_pre_expand");
 
                pmap_zero_page(pn);
@@ -2758,13 +1933,13 @@ pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr) {
                pte = pmap64_pdpt(pmap, vaddr);
 
                pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
        }
 
        if(pmap_pte(pmap, vaddr) == PT_ENTRY_NULL) {
-               if (!pmap_next_page_k64(&pn))
+               if (!pmap_next_page_hi(&pn))
                        panic("pmap_pre_expand");
 
                pmap_zero_page(pn);
@@ -2772,9 +1947,9 @@ pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr) {
                pte = pmap64_pde(pmap, vaddr);
 
                pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
-                               | INTEL_PTE_VALID
-                               | INTEL_PTE_USER
-                               | INTEL_PTE_WRITE);
+                               | PTE_READ(is_ept)
+                               | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
+                               | PTE_WRITE(is_ept));
        }
 
        PMAP_UNLOCK(pmap);
@@ -2829,6 +2004,7 @@ pmap_collect(
        register pt_entry_t     *pdp, *ptp;
        pt_entry_t              *eptp;
        int                     wired;
+       boolean_t               is_ept;
 
        if (p == PMAP_NULL)
                return;
@@ -2836,6 +2012,8 @@ pmap_collect(
        if (p == kernel_pmap)
                return;
 
+       is_ept = is_ept_pmap(p);
+
        /*
         *      Garbage collect map.
         */
@@ -2845,407 +2023,111 @@ pmap_collect(
             pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
             pdp++)
        {
-          if (*pdp & INTEL_PTE_VALID) {
-             if(*pdp & INTEL_PTE_REF) {
-               pmap_store_pte(pdp, *pdp & ~INTEL_PTE_REF);
-               collect_ref++;
-             } else {
-               collect_unref++;
-               ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase));
-               eptp = ptp + NPTEPG;
-
-               /*
-                * If the pte page has any wired mappings, we cannot
-                * free it.
-                */
-               wired = 0;
-               {
-                   register pt_entry_t *ptep;
-                   for (ptep = ptp; ptep < eptp; ptep++) {
-                       if (iswired(*ptep)) {
-                           wired = 1;
-                           break;
-                       }
-                   }
-               }
-               if (!wired) {
-                   /*
-                    * Remove the virtual addresses mapped by this pte page.
-                    */
-                   pmap_remove_range(p,
-                               pdetova(pdp - (pt_entry_t *)p->dirbase),
-                               ptp,
-                               eptp);
-
-                   /*
-                    * Invalidate the page directory pointer.
-                    */
-                   pmap_store_pte(pdp, 0x0);
-                
-                   PMAP_UNLOCK(p);
-
-                   /*
-                    * And free the pte page itself.
-                    */
-                   {
-                       register vm_page_t m;
-
-                       vm_object_lock(p->pm_obj);
-
-                       m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]));
-                       if (m == VM_PAGE_NULL)
-                           panic("pmap_collect: pte page not in object");
-
-                       VM_PAGE_FREE(m);
-
-                       OSAddAtomic(-1,  &inuse_ptepages_count);
-
-                       vm_object_unlock(p->pm_obj);
-                   }
-
-                   PMAP_LOCK(p);
-               }
-             }
-          }
-       }
-
-       PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
-       PMAP_UNLOCK(p);
-       return;
-
-}
-#endif
-
-
-void
-pmap_copy_page(ppnum_t src, ppnum_t dst)
-{
-       bcopy_phys((addr64_t)i386_ptob(src),
-                  (addr64_t)i386_ptob(dst),
-                  PAGE_SIZE);
-}
-
-
-/*
- *     Routine:        pmap_pageable
- *     Function:
- *             Make the specified pages (by pmap, offset)
- *             pageable (or not) as requested.
- *
- *             A page which is not pageable may not take
- *             a fault; therefore, its page table entry
- *             must remain valid for the duration.
- *
- *             This routine is merely advisory; pmap_enter
- *             will specify that these pages are to be wired
- *             down (or not) as appropriate.
- */
-void
-pmap_pageable(
-       __unused pmap_t                 pmap,
-       __unused vm_map_offset_t        start_addr,
-       __unused vm_map_offset_t        end_addr,
-       __unused boolean_t              pageable)
-{
-#ifdef lint
-       pmap++; start_addr++; end_addr++; pageable++;
-#endif /* lint */
-}
-
-/*
- *     Clear specified attribute bits.
- */
-void
-phys_attribute_clear(
-       ppnum_t         pn,
-       int             bits)
-{
-       pv_rooted_entry_t       pv_h;
-       pv_hashed_entry_t       pv_e;
-       pt_entry_t              *pte;
-       int                     pai;
-       pmap_t                  pmap;
-
-       pmap_intr_assert();
-       assert(pn != vm_page_fictitious_addr);
-       if (pn == vm_page_guard_addr)
-               return;
-
-       pai = ppn_to_pai(pn);
-
-       if (!IS_MANAGED_PAGE(pai)) {
-               /*
-                *      Not a managed page.
-                */
-               return;
-       }
-
-
-       PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_START,
-                  pn, bits, 0, 0, 0);
-
-       pv_h = pai_to_pvh(pai);
-
-       LOCK_PVH(pai);
-
-       /*
-        * Walk down PV list, clearing all modify or reference bits.
-        * We do not have to lock the pv_list because we have
-        * the entire pmap system locked.
-        */
-       if (pv_h->pmap != PMAP_NULL) {
-               /*
-                * There are some mappings.
-                */
-
-               pv_e = (pv_hashed_entry_t)pv_h;
-
-               do {
-                       vm_map_offset_t va;
-
-                       pmap = pv_e->pmap;
-                       va = pv_e->va;
-
-                        /*
-                         * Clear modify and/or reference bits.
-                         */
-                       pte = pmap_pte(pmap, va);
-                       pmap_update_pte(pte, *pte, (*pte & ~bits));
-                       /* Ensure all processors using this translation
-                        * invalidate this TLB entry. The invalidation *must*
-                        * follow the PTE update, to ensure that the TLB
-                        * shadow of the 'D' bit (in particular) is
-                        * synchronized with the updated PTE.
-                        */
-                       PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
-
-                       pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
-
-               } while (pv_e != (pv_hashed_entry_t)pv_h);
-       }
-       pmap_phys_attributes[pai] &= ~bits;
-
-       UNLOCK_PVH(pai);
-
-       PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_END,
-                  0, 0, 0, 0, 0);
-}
-
-/*
- *     Check specified attribute bits.
- */
-int
-phys_attribute_test(
-       ppnum_t         pn,
-       int             bits)
-{
-       pv_rooted_entry_t       pv_h;
-       pv_hashed_entry_t       pv_e;
-       pt_entry_t              *pte;
-       int                     pai;
-       pmap_t                  pmap;
-       int                     attributes = 0;
-
-       pmap_intr_assert();
-       assert(pn != vm_page_fictitious_addr);
-       if (pn == vm_page_guard_addr)
-               return 0;
-
-       pai = ppn_to_pai(pn);
-
-       if (!IS_MANAGED_PAGE(pai)) {
-               /*
-                *      Not a managed page.
-                */
-               return 0;
-       }
-
-       /*
-        * super fast check...  if bits already collected
-        * no need to take any locks...
-        * if not set, we need to recheck after taking
-        * the lock in case they got pulled in while
-        * we were waiting for the lock
-        */
-       if ((pmap_phys_attributes[pai] & bits) == bits)
-               return bits;
-
-       pv_h = pai_to_pvh(pai);
-
-       LOCK_PVH(pai);
-
-       attributes = pmap_phys_attributes[pai] & bits;
-
-
-       /*
-        * Walk down PV list, checking the mappings until we
-        * reach the end or we've found the attributes we've asked for
-        * We do not have to lock the pv_list because we have
-        * the entire pmap system locked.
-        */
-       if (attributes != bits &&
-           pv_h->pmap != PMAP_NULL) {
-               /*
-                * There are some mappings.
-                */
-               pv_e = (pv_hashed_entry_t)pv_h;
-               do {
-                       vm_map_offset_t va;
-
-                       pmap = pv_e->pmap;
-                       va = pv_e->va;
-                       /*
-                        * first make sure any processor actively
-                        * using this pmap, flushes its TLB state
-                        */
-                       PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
-
-                       /*
-                        * pick up modify and/or reference bits from mapping
-                        */
+               if (*pdp & PTE_VALID_MASK(is_ept)) {
+                       if (*pdp & PTE_REF(is_ept)) {
+                               pmap_store_pte(pdp, *pdp & ~PTE_REF(is_ept));
+                               collect_ref++;
+                       } else {
+                               collect_unref++;
+                               ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase));
+                               eptp = ptp + NPTEPG;
 
-                       pte = pmap_pte(pmap, va);
-                       attributes |= (int)(*pte & bits);
+                               /*
+                                * If the pte page has any wired mappings, we cannot
+                                * free it.
+                                */
+                               wired = 0;
+                               {
+                                       register pt_entry_t *ptep;
+                                       for (ptep = ptp; ptep < eptp; ptep++) {
+                                               if (iswired(*ptep)) {
+                                                       wired = 1;
+                                                       break;
+                                               }
+                                       }
+                               }
+                               if (!wired) {
+                                       /*
+                                        * Remove the virtual addresses mapped by this pte page.
+                                        */
+                                               pmap_remove_range(p,
+                                                       pdetova(pdp - (pt_entry_t *)p->dirbase),
+                                                       ptp,
+                                                       eptp);
 
-                       pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
+                                       /*
+                                        * Invalidate the page directory pointer.
+                                        */
+                                       pmap_store_pte(pdp, 0x0);
 
-               } while ((attributes != bits) &&
-                        (pv_e != (pv_hashed_entry_t)pv_h));
-       }
+                                       PMAP_UNLOCK(p);
 
-       UNLOCK_PVH(pai);
-       return (attributes);
-}
+                                       /*
+                                        * And free the pte page itself.
+                                        */
+                                       {
+                                               register vm_page_t m;
 
-/*
- *     Set specified attribute bits.
- */
-void
-phys_attribute_set(
-       ppnum_t         pn,
-       int             bits)
-{
-       int             pai;
+                                               vm_object_lock(p->pm_obj);
 
-       pmap_intr_assert();
-       assert(pn != vm_page_fictitious_addr);
-       if (pn == vm_page_guard_addr)
-               return;
+                                               m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]) * PAGE_SIZE);
+                                               if (m == VM_PAGE_NULL)
+                                                       panic("pmap_collect: pte page not in object");
 
-       pai = ppn_to_pai(pn);
+                                               vm_object_unlock(p->pm_obj);
 
-       if (!IS_MANAGED_PAGE(pai)) {
-               /* Not a managed page.  */
-               return;
-       }
+                                               VM_PAGE_FREE(m);
 
-       LOCK_PVH(pai);
-       pmap_phys_attributes[pai] |= bits;
-       UNLOCK_PVH(pai);
-}
+                                               OSAddAtomic(-1,  &inuse_ptepages_count);
+                                               PMAP_ZINFO_PFREE(p, PAGE_SIZE);
+                                       }
 
-/*
- *     Set the modify bit on the specified physical page.
- */
+                                       PMAP_LOCK(p);
+                               }
+                       }
+               }
+       }
 
-void
-pmap_set_modify(ppnum_t pn)
-{
-       phys_attribute_set(pn, PHYS_MODIFIED);
+       PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
+       PMAP_UNLOCK(p);
+       return;
 }
+#endif
 
-/*
- *     Clear the modify bits on the specified physical page.
- */
 
 void
-pmap_clear_modify(ppnum_t pn)
+pmap_copy_page(ppnum_t src, ppnum_t dst)
 {
-       phys_attribute_clear(pn, PHYS_MODIFIED);
+       bcopy_phys((addr64_t)i386_ptob(src),
+                  (addr64_t)i386_ptob(dst),
+                  PAGE_SIZE);
 }
 
-/*
- *     pmap_is_modified:
- *
- *     Return whether or not the specified physical page is modified
- *     by any physical maps.
- */
-
-boolean_t
-pmap_is_modified(ppnum_t pn)
-{
-       if (phys_attribute_test(pn, PHYS_MODIFIED))
-               return TRUE;
-       return FALSE;
-}
 
 /*
- *     pmap_clear_reference:
+ *     Routine:        pmap_pageable
+ *     Function:
+ *             Make the specified pages (by pmap, offset)
+ *             pageable (or not) as requested.
  *
- *     Clear the reference bit on the specified physical page.
- */
-
-void
-pmap_clear_reference(ppnum_t pn)
-{
-       phys_attribute_clear(pn, PHYS_REFERENCED);
-}
-
-void
-pmap_set_reference(ppnum_t pn)
-{
-       phys_attribute_set(pn, PHYS_REFERENCED);
-}
-
-/*
- *     pmap_is_referenced:
+ *             A page which is not pageable may not take
+ *             a fault; therefore, its page table entry
+ *             must remain valid for the duration.
  *
- *     Return whether or not the specified physical page is referenced
- *     by any physical maps.
- */
-
-boolean_t
-pmap_is_referenced(ppnum_t pn)
-{
-        if (phys_attribute_test(pn, PHYS_REFERENCED))
-               return TRUE;
-       return FALSE;
-}
-
-/*
- * pmap_get_refmod(phys)
- *  returns the referenced and modified bits of the specified
- *  physical page.
- */
-unsigned int
-pmap_get_refmod(ppnum_t pn)
-{
-        int            refmod;
-       unsigned int    retval = 0;
-
-       refmod = phys_attribute_test(pn, PHYS_MODIFIED | PHYS_REFERENCED);
-
-       if (refmod & PHYS_MODIFIED)
-               retval |= VM_MEM_MODIFIED;
-       if (refmod & PHYS_REFERENCED)
-               retval |= VM_MEM_REFERENCED;
-
-       return (retval);
-}
-
-/*
- * pmap_clear_refmod(phys, mask)
- *  clears the referenced and modified bits as specified by the mask
- *  of the specified physical page.
+ *             This routine is merely advisory; pmap_enter
+ *             will specify that these pages are to be wired
+ *             down (or not) as appropriate.
  */
 void
-pmap_clear_refmod(ppnum_t pn, unsigned int mask)
+pmap_pageable(
+       __unused pmap_t                 pmap,
+       __unused vm_map_offset_t        start_addr,
+       __unused vm_map_offset_t        end_addr,
+       __unused boolean_t              pageable)
 {
-       unsigned int  x86Mask;
-
-       x86Mask = (   ((mask &   VM_MEM_MODIFIED)?   PHYS_MODIFIED : 0)
-                   | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
-       phys_attribute_clear(pn, x86Mask);
+#ifdef lint
+       pmap++; start_addr++; end_addr++; pageable++;
+#endif /* lint */
 }
 
 void 
@@ -3274,12 +2156,12 @@ extern kern_return_t dtrace_copyio_postflight(addr64_t);
 kern_return_t dtrace_copyio_preflight(__unused addr64_t va)
 {
        thread_t thread = current_thread();
-
+       uint64_t ccr3;
        if (current_map() == kernel_map)
                return KERN_FAILURE;
-       else if (get_cr3() != thread->map->pmap->pm_cr3)
+       else if (((ccr3 = get_cr3_base()) != thread->map->pmap->pm_cr3) && (no_shared_cr3 == FALSE))
                return KERN_FAILURE;
-       else if (thread->machine.specFlags & CopyIOActive)
+       else if (no_shared_cr3 && (ccr3 != kernel_pmap->pm_cr3))
                return KERN_FAILURE;
        else
                return KERN_SUCCESS;
@@ -3341,94 +2223,6 @@ phys_page_exists(ppnum_t pn)
        return TRUE;
 }
 
-void
-mapping_free_prime(void)
-{
-       int                     i;
-       pv_hashed_entry_t       pvh_e;
-       pv_hashed_entry_t       pvh_eh;
-       pv_hashed_entry_t       pvh_et;
-       int                     pv_cnt;
-
-       pv_cnt = 0;
-       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
-       for (i = 0; i < (5 * PV_HASHED_ALLOC_CHUNK); i++) {
-               pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
-
-               pvh_e->qlink.next = (queue_entry_t)pvh_eh;
-               pvh_eh = pvh_e;
-
-               if (pvh_et == PV_HASHED_ENTRY_NULL)
-                       pvh_et = pvh_e;
-               pv_cnt++;
-       }
-       PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
-
-       pv_cnt = 0;
-       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
-       for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
-               pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
-
-               pvh_e->qlink.next = (queue_entry_t)pvh_eh;
-               pvh_eh = pvh_e;
-
-               if (pvh_et == PV_HASHED_ENTRY_NULL)
-                       pvh_et = pvh_e;
-               pv_cnt++;
-       }
-       PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
-
-}
-
-void
-mapping_adjust(void)
-{
-       pv_hashed_entry_t       pvh_e;
-       pv_hashed_entry_t       pvh_eh;
-       pv_hashed_entry_t       pvh_et;
-       int                     pv_cnt;
-       int                     i;
-
-       if (mapping_adjust_call == NULL) {
-               thread_call_setup(&mapping_adjust_call_data,
-                                 (thread_call_func_t) mapping_adjust,
-                                 (thread_call_param_t) NULL);
-               mapping_adjust_call = &mapping_adjust_call_data;
-       }
-
-       pv_cnt = 0;
-       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
-       if (pv_hashed_kern_free_count < PV_HASHED_KERN_LOW_WATER_MARK) {
-               for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
-                       pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
-
-                       pvh_e->qlink.next = (queue_entry_t)pvh_eh;
-                       pvh_eh = pvh_e;
-
-                       if (pvh_et == PV_HASHED_ENTRY_NULL)
-                               pvh_et = pvh_e;
-                       pv_cnt++;
-               }
-               PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
-       }
-
-       pv_cnt = 0;
-       pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
-       if (pv_hashed_free_count < PV_HASHED_LOW_WATER_MARK) {
-               for (i = 0; i < PV_HASHED_ALLOC_CHUNK; i++) {
-                       pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
-
-                       pvh_e->qlink.next = (queue_entry_t)pvh_eh;
-                       pvh_eh = pvh_e;
-
-                       if (pvh_et == PV_HASHED_ENTRY_NULL)
-                               pvh_et = pvh_e;
-                       pv_cnt++;
-               }
-               PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
-       }
-       mappingrecurse = 0;
-}
 
 
 void
@@ -3437,7 +2231,7 @@ pmap_switch(pmap_t tpmap)
         spl_t  s;
 
        s = splhigh();          /* Make sure interruptions are disabled */
-       set_dirbase(tpmap, current_thread());
+       set_dirbase(tpmap, current_thread(), cpu_number());
        splx(s);
 }
 
@@ -3452,6 +2246,12 @@ pmap_disable_NX(pmap_t pmap)
         pmap->nx_enabled = 0;
 }
 
+void 
+pt_fake_zone_init(int zone_index)
+{
+       pt_fake_zone_index = zone_index;
+}
+
 void
 pt_fake_zone_info(
        int             *count,
@@ -3459,8 +2259,10 @@ pt_fake_zone_info(
        vm_size_t       *max_size,
        vm_size_t       *elem_size,
        vm_size_t       *alloc_size,
+       uint64_t        *sum_size,
        int             *collectable,
-       int             *exhaustable)
+       int             *exhaustable,
+       int             *caller_acct)
 {
         *count      = inuse_ptepages_count;
        *cur_size   = PAGE_SIZE * inuse_ptepages_count;
@@ -3470,23 +2272,143 @@ pt_fake_zone_info(
                                   vm_page_free_count);
        *elem_size  = PAGE_SIZE;
        *alloc_size = PAGE_SIZE;
+       *sum_size = alloc_ptepages_count * PAGE_SIZE;
 
        *collectable = 1;
        *exhaustable = 0;
+       *caller_acct = 1;
 }
 
-static inline void
-pmap_cpuset_NMIPI(cpu_set cpu_mask) {
-       unsigned int cpu, cpu_bit;
-       uint64_t deadline;
 
-       for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
-               if (cpu_mask & cpu_bit)
-                       cpu_NMI_interrupt(cpu);
+void
+pmap_flush_context_init(pmap_flush_context *pfc)
+{
+       pfc->pfc_cpus = 0;
+       pfc->pfc_invalid_global = 0;
+}
+
+extern unsigned TLBTimeOut;
+void
+pmap_flush(
+       pmap_flush_context *pfc)
+{
+       unsigned int    my_cpu;
+       unsigned int    cpu;
+       unsigned int    cpu_bit;
+       cpumask_t       cpus_to_respond = 0;
+       cpumask_t       cpus_to_signal = 0;
+       cpumask_t       cpus_signaled = 0;
+       boolean_t       flush_self = FALSE;
+       uint64_t        deadline;
+
+       mp_disable_preemption();
+
+       my_cpu = cpu_number();
+       cpus_to_signal = pfc->pfc_cpus;
+
+       PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_START,
+                           NULL, cpus_to_signal, 0, 0, 0);
+
+       for (cpu = 0, cpu_bit = 1; cpu < real_ncpus && cpus_to_signal; cpu++, cpu_bit <<= 1) {
+
+               if (cpus_to_signal & cpu_bit) {
+
+                       cpus_to_signal &= ~cpu_bit;
+
+                       if (!cpu_datap(cpu)->cpu_running)
+                               continue;
+
+                       if (pfc->pfc_invalid_global & cpu_bit)
+                               cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE;
+                       else
+                               cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE;
+                       mfence();
+
+                       if (cpu == my_cpu) {
+                               flush_self = TRUE;
+                               continue;
+                       }
+                       if (CPU_CR3_IS_ACTIVE(cpu)) {
+                               cpus_to_respond |= cpu_bit;
+                               i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
+                       }
+               }
+       }
+       cpus_signaled = cpus_to_respond;
+
+       /*
+        * Flush local tlb if required.
+        * Do this now to overlap with other processors responding.
+        */
+       if (flush_self && cpu_datap(my_cpu)->cpu_tlb_invalid != FALSE)
+               process_pmap_updates();
+
+       if (cpus_to_respond) {
+
+               deadline = mach_absolute_time() +
+                               (TLBTimeOut ? TLBTimeOut : LockTimeOut);
+               boolean_t is_timeout_traced = FALSE;
+               
+               /*
+                * Wait for those other cpus to acknowledge
+                */
+               while (cpus_to_respond != 0) {
+                       long orig_acks = 0;
+
+                       for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+                               /* Consider checking local/global invalidity
+                                * as appropriate in the PCID case.
+                                */
+                               if ((cpus_to_respond & cpu_bit) != 0) {
+                                       if (!cpu_datap(cpu)->cpu_running ||
+                                           cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
+                                           !CPU_CR3_IS_ACTIVE(cpu)) {
+                                               cpus_to_respond &= ~cpu_bit;
+                                       }
+                                       cpu_pause();
+                               }
+                               if (cpus_to_respond == 0)
+                                       break;
+                       }
+                       if (cpus_to_respond && (mach_absolute_time() > deadline)) {
+                               if (machine_timeout_suspended())
+                                       continue;
+                               if (TLBTimeOut == 0) {
+                                       if (is_timeout_traced)
+                                               continue;
+                                       PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO),
+                                               NULL, cpus_to_signal, cpus_to_respond, 0, 0);
+                                       is_timeout_traced = TRUE;
+                                       continue;
+                               }
+                               pmap_tlb_flush_timeout = TRUE;
+                               orig_acks = NMIPI_acks;
+                               mp_cpus_NMIPI(cpus_to_respond);
+
+                               panic("TLB invalidation IPI timeout: "
+                                   "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%llx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
+                                   cpus_to_respond, orig_acks, NMIPI_acks);
+                       }
+               }
        }
-       deadline = mach_absolute_time() + (LockTimeOut);
-       while (mach_absolute_time() < deadline)
-               cpu_pause();
+       PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_END,
+                           NULL, cpus_signaled, flush_self, 0, 0);
+
+       mp_enable_preemption();
+}
+
+
+static void
+invept(void *eptp)
+{
+       struct {
+               uint64_t eptp;
+               uint64_t reserved;
+       } __attribute__((aligned(16), packed)) invept_descriptor = {(uint64_t)eptp, 0};
+
+       __asm__ volatile("invept (%%rax), %%rcx"
+               : : "c" (PMAP_INVEPT_SINGLE_CONTEXT), "a" (&invept_descriptor)
+               : "cc", "memory");
 }
 
 /*
@@ -3498,26 +2420,61 @@ pmap_cpuset_NMIPI(cpu_set cpu_mask) {
  *  - flush the local tlb if active for this pmap
  *  - return ... the caller will unlock the pmap
  */
+
 void
-pmap_flush_tlbs(pmap_t pmap)
+pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int options, pmap_flush_context *pfc)
 {
        unsigned int    cpu;
        unsigned int    cpu_bit;
-       cpu_set         cpus_to_signal;
+       cpumask_t       cpus_to_signal;
        unsigned int    my_cpu = cpu_number();
        pmap_paddr_t    pmap_cr3 = pmap->pm_cr3;
        boolean_t       flush_self = FALSE;
        uint64_t        deadline;
+       boolean_t       pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap));
+       boolean_t       need_global_flush = FALSE;
+       uint32_t        event_code;
+       vm_map_offset_t event_startv, event_endv;
+       boolean_t       is_ept = is_ept_pmap(pmap);
 
        assert((processor_avail_count < 2) ||
               (ml_get_interrupts_enabled() && get_preemption_level() != 0));
 
+       if (pmap == kernel_pmap) {
+               event_code = PMAP_CODE(PMAP__FLUSH_KERN_TLBS);
+               event_startv = VM_KERNEL_UNSLIDE_OR_PERM(startv);
+               event_endv = VM_KERNEL_UNSLIDE_OR_PERM(endv);
+       } else if (is_ept) {
+               event_code = PMAP_CODE(PMAP__FLUSH_EPT);
+               event_startv = startv;
+               event_endv = endv;
+       } else {
+               event_code = PMAP_CODE(PMAP__FLUSH_TLBS);
+               event_startv = startv;
+               event_endv = endv;
+       }
+
+       PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_START,
+                               VM_KERNEL_UNSLIDE_OR_PERM(pmap), options, event_startv, event_endv, 0);
+
+       if (is_ept) {
+               mp_cpus_call(CPUMASK_ALL, ASYNC, invept, (void*)pmap->pm_eptp);
+               goto out;
+       }
+
        /*
         * Scan other cpus for matching active or task CR3.
         * For idle cpus (with no active map) we mark them invalid but
         * don't signal -- they'll check as they go busy.
         */
        cpus_to_signal = 0;
+
+       if (pmap_pcid_ncpus) {
+               if (pmap_is_shared)
+                       need_global_flush = TRUE;
+               pmap_pcid_invalidate_all_cpus(pmap);
+               mfence();
+       }
        for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
                if (!cpu_datap(cpu)->cpu_running)
                        continue;
@@ -3526,14 +2483,24 @@ pmap_flush_tlbs(pmap_t  pmap)
 
                if ((pmap_cr3 == cpu_task_cr3) ||
                    (pmap_cr3 == cpu_active_cr3) ||
-                   (pmap->pm_shared) ||
-                   (pmap == kernel_pmap)) {
+                   (pmap_is_shared)) {
+
+                       if (options & PMAP_DELAY_TLB_FLUSH) {
+                               if (need_global_flush == TRUE)
+                                       pfc->pfc_invalid_global |= cpu_bit;
+                               pfc->pfc_cpus |= cpu_bit;
+
+                               continue;
+                       }
                        if (cpu == my_cpu) {
                                flush_self = TRUE;
                                continue;
                        }
-                       cpu_datap(cpu)->cpu_tlb_invalid = TRUE;
-                       __asm__ volatile("mfence");
+                       if (need_global_flush == TRUE)
+                               cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE;
+                       else
+                               cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE;
+                       mfence();
 
                        /*
                         * We don't need to signal processors which will flush
@@ -3551,45 +2518,49 @@ pmap_flush_tlbs(pmap_t  pmap)
                         */
                        if (CPU_CR3_IS_ACTIVE(cpu) &&
                            (pmap_cr3 == CPU_GET_ACTIVE_CR3(cpu) ||
-                           pmap->pm_shared ||
-                           (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) {
+                            pmap->pm_shared ||
+                            (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) {
                                cpus_to_signal |= cpu_bit;
                                i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
                        }
                }
        }
-
-       PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_START,
-                  pmap, cpus_to_signal, flush_self, 0, 0);
+       if ((options & PMAP_DELAY_TLB_FLUSH))
+               goto out;
 
        /*
         * Flush local tlb if required.
         * Do this now to overlap with other processors responding.
         */
-       if (flush_self)
-               flush_tlb();
+       if (flush_self) {
+               if (pmap_pcid_ncpus) {
+                       pmap_pcid_validate_cpu(pmap, my_cpu);
+                       if (pmap_is_shared)
+                               tlb_flush_global();
+                       else
+                               flush_tlb_raw();
+               }
+               else
+                       flush_tlb_raw();
+       }
 
        if (cpus_to_signal) {
-               cpu_set cpus_to_respond = cpus_to_signal;
+               cpumask_t       cpus_to_respond = cpus_to_signal;
+
+               deadline = mach_absolute_time() +
+                               (TLBTimeOut ? TLBTimeOut : LockTimeOut);
+               boolean_t is_timeout_traced = FALSE;
 
-               deadline = mach_absolute_time() + LockTimeOut;
                /*
                 * Wait for those other cpus to acknowledge
                 */
                while (cpus_to_respond != 0) {
-                       if (mach_absolute_time() > deadline) {
-                               if (mp_recent_debugger_activity())
-                                       continue;
-                               if (!panic_active()) {
-                                       pmap_tlb_flush_timeout = TRUE;
-                                       pmap_cpuset_NMIPI(cpus_to_respond);
-                               }
-                               panic("pmap_flush_tlbs() timeout: "
-                                   "cpu(s) failing to respond to interrupts, pmap=%p cpus_to_respond=0x%lx",
-                                   pmap, cpus_to_respond);
-                       }
+                       long orig_acks = 0;
 
                        for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+                               /* Consider checking local/global invalidity
+                                * as appropriate in the PCID case.
+                                */
                                if ((cpus_to_respond & cpu_bit) != 0) {
                                        if (!cpu_datap(cpu)->cpu_running ||
                                            cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
@@ -3601,22 +2572,62 @@ pmap_flush_tlbs(pmap_t  pmap)
                                if (cpus_to_respond == 0)
                                        break;
                        }
+                       if (cpus_to_respond && (mach_absolute_time() > deadline)) {
+                               if (machine_timeout_suspended())
+                                       continue;
+                               if (TLBTimeOut == 0) {
+                                       /* cut tracepoint but don't panic */
+                                       if (is_timeout_traced)
+                                               continue;
+                                       PMAP_TRACE_CONSTANT(
+                                               PMAP_CODE(PMAP__FLUSH_TLBS_TO),
+                                               VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal, cpus_to_respond, 0, 0);
+                                       is_timeout_traced = TRUE;
+                                       continue;
+                               }
+                               pmap_tlb_flush_timeout = TRUE;
+                               orig_acks = NMIPI_acks;
+                               mp_cpus_NMIPI(cpus_to_respond);
+
+                               panic("TLB invalidation IPI timeout: "
+                                   "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%llx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
+                                   cpus_to_respond, orig_acks, NMIPI_acks);
+                       }
                }
        }
 
-       PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_END,
-                  pmap, cpus_to_signal, flush_self, 0, 0);
+       if (__improbable((pmap == kernel_pmap) && (flush_self != TRUE))) {
+               panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, pmap_cr3: 0x%llx, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, pmap_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map);
+       }
+
+out:
+       PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_END,
+                               VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal, event_startv, event_endv, 0);
+
 }
 
 void
 process_pmap_updates(void)
 {
-       assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
-
-       flush_tlb();
+       int ccpu = cpu_number();
+       pmap_assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+       if (pmap_pcid_ncpus) {
+               pmap_pcid_validate_current();
+               if (cpu_datap(ccpu)->cpu_tlb_invalid_global) {
+                       cpu_datap(ccpu)->cpu_tlb_invalid = FALSE;
+                       tlb_flush_global();
+               }
+               else {
+                       cpu_datap(ccpu)->cpu_tlb_invalid_local = FALSE;
+                       flush_tlb_raw();
+               }
+       }
+       else {
+               current_cpu_datap()->cpu_tlb_invalid = FALSE;
+               flush_tlb_raw();
+       }
 
-       current_cpu_datap()->cpu_tlb_invalid = FALSE;
-       __asm__ volatile("mfence");
+       mfence();
 }
 
 void
@@ -3625,18 +2636,122 @@ pmap_update_interrupt(void)
         PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START,
                   0, 0, 0, 0, 0);
 
-       process_pmap_updates();
+       if (current_cpu_datap()->cpu_tlb_invalid)
+               process_pmap_updates();
 
         PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END,
                   0, 0, 0, 0, 0);
 }
 
+#include <mach/mach_vm.h>      /* mach_vm_region_recurse() */
+/* Scan kernel pmap for W+X PTEs, scan kernel VM map for W+X map entries
+ * and identify ranges with mismatched VM permissions and PTE permissions
+ */
+kern_return_t
+pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset_t ev) {
+       vm_offset_t cv = sv;
+       kern_return_t rv = KERN_SUCCESS;
+       uint64_t skip4 = 0, skip2 = 0;
+
+       assert(!is_ept_pmap(ipmap));
+
+       sv &= ~PAGE_MASK_64;
+       ev &= ~PAGE_MASK_64;
+       while (cv < ev) {
+               if (__improbable((cv > 0x00007FFFFFFFFFFFULL) &&
+                       (cv < 0xFFFF800000000000ULL))) {
+                       cv = 0xFFFF800000000000ULL;
+               }
+               /* Potential inconsistencies from not holding pmap lock
+                * but harmless for the moment.
+                */
+               if (((cv & PML4MASK) == 0) && (pmap64_pml4(ipmap, cv) == 0)) {
+                       if ((cv + NBPML4) > cv)
+                               cv += NBPML4;
+                       else
+                               break;
+                       skip4++;
+                       continue;
+               }
+               if (((cv & PDMASK) == 0) && (pmap_pde(ipmap, cv) == 0)) {
+                       if ((cv + NBPD) > cv)
+                               cv += NBPD;
+                       else
+                               break;
+                       skip2++;
+                       continue;
+               }
+
+               pt_entry_t *ptep = pmap_pte(ipmap, cv);
+               if (ptep && (*ptep & INTEL_PTE_VALID)) {
+                       if (*ptep & INTEL_PTE_WRITE) {
+                               if (!(*ptep & INTEL_PTE_NX)) {
+                                       kprintf("W+X PTE at 0x%lx, P4: 0x%llx, P3: 0x%llx, P2: 0x%llx, PT: 0x%llx, VP: %u\n", cv, *pmap64_pml4(ipmap, cv), *pmap64_pdpt(ipmap, cv), *pmap64_pde(ipmap, cv), *ptep, pmap_valid_page((ppnum_t)(i386_btop(pte_to_pa(*ptep)))));
+                                       rv = KERN_FAILURE;
+                               }
+                       }
+               }
+               cv += PAGE_SIZE;
+       }
+       kprintf("Completed pmap scan\n");
+       cv = sv;
+
+       struct vm_region_submap_info_64 vbr;
+       mach_msg_type_number_t vbrcount = 0;
+       mach_vm_size_t  vmsize;
+       vm_prot_t       prot;
+       uint32_t nesting_depth = 0;
+       kern_return_t kret;
+       
+       while (cv < ev) {
+               
+               for (;;) {
+                       vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
+                       if((kret = mach_vm_region_recurse(ivmmap, 
+                                   (mach_vm_address_t *) &cv, &vmsize, &nesting_depth, 
+                                       (vm_region_recurse_info_t)&vbr,
+                                       &vbrcount)) != KERN_SUCCESS) {
+                               break;
+                       }
 
-unsigned int
-pmap_cache_attributes(ppnum_t pn)
-{
-       return IS_MANAGED_PAGE(ppn_to_pai(pn)) ? VM_WIMG_COPYBACK
-                                              : VM_WIMG_IO;
-}
+                       if(vbr.is_submap) {
+                               nesting_depth++;
+                               continue;
+                       } else {
+                               break;
+                       }
+               }
+
+               if(kret != KERN_SUCCESS)
+                       break;
+
+               prot = vbr.protection;
 
+               if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
+                       kprintf("W+X map entry at address 0x%lx\n", cv);
+                       rv = KERN_FAILURE;
+               }
+
+               if (prot) {
+                       vm_offset_t pcv;
+                       for (pcv = cv; pcv < cv + vmsize; pcv += PAGE_SIZE) {
+                               pt_entry_t *ptep = pmap_pte(ipmap, pcv);
+                               vm_prot_t tprot;
 
+                               if ((ptep == NULL) || !(*ptep & INTEL_PTE_VALID))
+                                       continue;
+                               tprot = VM_PROT_READ;
+                               if (*ptep & INTEL_PTE_WRITE)
+                                       tprot |= VM_PROT_WRITE;
+                               if ((*ptep & INTEL_PTE_NX) == 0)
+                                       tprot |= VM_PROT_EXECUTE;
+                               if (tprot != prot) {
+                                       kprintf("PTE/map entry permissions mismatch at address 0x%lx, pte: 0x%llx, protection: 0x%x\n", pcv, *ptep, prot);
+                                       rv = KERN_FAILURE;
+                               }
+                       }
+               }
+               cv += vmsize;
+       }
+       return rv;
+}