+ unsigned int vxsGsu; /* Guest mapping suspends */
+ unsigned int vxsGsuHit; /* Suspend hits entry (active only) */
+ unsigned int vxsGsuMiss; /* Suspend misses entry */
+
+ unsigned int vxsGtd; /* Guest test ref&chg */
+ unsigned int vxsGtdHit; /* Test r&c hits entry (active only) */
+ unsigned int vxsGtdMiss; /* Test r&c misses entry */
+};
+#pragma pack()
+typedef struct pmap_vmm_stats pmap_vmm_stats;
+
+/* Not wanting to tax all of our customers for the sins of those that use virtual operating
+ systems, we've built the hash table from its own primitive virtual memory. We first
+ allocate a pmap_vmm_ext with sufficient space following to accomodate the hash table
+ index (one 64-bit physical address per 4k-byte page of hash table). The allocation
+ must not cross a 4k-byte page boundary (we'll be accessing the block with relocation
+ off), so we'll try a couple of times, then just burn a whole page. We stuff the effective
+ address of the cache-aligned index into hIdxBase; the physical-mode code locates the index
+ by adding the size of a pmap_vmm_extension to its translated physical address, then rounding
+ up to the next 32-byte boundary. Now we grab enough virtual pages to contain the hash table,
+ and fill in the index with the page's physical addresses. For the final touch that's sure
+ to please, we initialize the hash table. Mmmmm, golden brown perfection.
+ */
+
+#pragma pack(4)
+struct pmap_vmm_ext {
+ addr64_t vmxSalt; /* This block's virt<->real conversion salt */
+ addr64_t vmxHostPmapPhys; /* Host pmap physical address */
+ struct pmap *vmxHostPmap; /* Host pmap effective address */
+ addr64_t *vmxHashPgIdx; /* Hash table physical index base address */
+ vm_offset_t *vmxHashPgList; /* List of virtual pages comprising the hash table */
+ unsigned int *vmxActiveBitmap; /* Bitmap of active mappings in hash table */
+ pmap_vmm_stats vmxStats; /* Stats for VMM assists */
+#define VMX_HPIDX_OFFSET ((sizeof(pmap_vmm_ext) + 127) & ~127)
+ /* The hash table physical index begins at the first
+ 128-byte boundary after the pmap_vmm_ext struct */
+#define VMX_HPLIST_OFFSET (VMX_HPIDX_OFFSET + (GV_HPAGES * sizeof(addr64_t)))
+#define VMX_ACTMAP_OFFSET (VMX_HPLIST_OFFSET + (GV_HPAGES * sizeof(vm_offset_t)))
+};
+#pragma pack()
+typedef struct pmap_vmm_ext pmap_vmm_ext;
+
+#pragma pack(4) /* Make sure the structure stays as we defined it */
+struct pmap {
+ queue_head_t pmap_link; /* MUST BE FIRST */
+ addr64_t pmapvr; /* Virtual to real conversion mask */
+ shexlock pmapSXlk; /* Shared/Exclusive lock for mapping changes */
+ unsigned int space; /* space for this pmap */
+#define invalSpace 0x00000001 /* Predefined always invalid space */
+ int ref_count; /* reference count */
+ unsigned int pmapFlags; /* Flags */
+#define pmapKeys 0x00000007 /* Keys and no execute bit to use with this pmap */
+#define pmapKeyDef 0x00000006 /* Default keys - Sup = 1, user = 1, no ex = 0 */
+#define pmapVMhost 0x00000010 /* pmap with Virtual Machines attached to it */
+#define pmapVMgsaa 0x00000020 /* Guest shadow assist active */
+#define pmapNXdisabled 0x00000040 /* no-execute disabled for this pmap */
+ unsigned int spaceNum; /* Space number */
+ unsigned int pmapCCtl; /* Cache control */
+#define pmapCCtlVal 0xFFFF0000 /* Valid entries */
+#define pmapCCtlLck 0x00008000 /* Lock bit */
+#define pmapCCtlLckb 16 /* Lock bit */
+#define pmapCCtlGen 0x00007FFF /* Generation number */
+
+#define pmapSegCacheCnt 16 /* Maximum number of cache entries */
+#define pmapSegCacheUse 16 /* Number of cache entries to use */
+
+ struct pmap *freepmap; /* Free pmaps */
+ pmap_vmm_ext *pmapVmmExt; /* VMM extension block, for VMM host and guest pmaps */
+ addr64_t pmapVmmExtPhys; /* VMM extension block physical address */
+/* 0x038 */
+ uint64_t pmapSCSubTag; /* Segment cache sub-tags. This is a 16 entry 4 bit array */
+/* 0x040 */
+ sgc pmapSegCache[pmapSegCacheCnt]; /* SLD values cached for quick load */
+
+/* 0x140 */
+/* if fanout is 4, then shift is 1, if fanout is 8 shift is 2, etc */
+#define kSkipListFanoutShift 1
+/* with n lists, we can handle (fanout**n) pages optimally */
+#define kSkipListMaxLists 12
+ unsigned char pmapCurLists; /* 0x140 - max #lists any mapping in this pmap currently has */
+ unsigned char pmapRsv2[3];
+ uint32_t pmapRandNum; /* 0x144 - used by mapSetLists() as a random number generator */
+ addr64_t pmapSkipLists[kSkipListMaxLists]; /* 0x148 - the list headers */
+/* following statistics conditionally gathered */
+ uint64_t pmapSearchVisits; /* 0x1A8 - nodes visited searching pmaps */
+ uint32_t pmapSearchCnt; /* 0x1B0 - number of calls to mapSearch or mapSearchFull */
+
+ unsigned int pmapRsv3[3];
+
+/* 0x1C0 */
+
+ struct pmap_statistics stats; /* statistics */