+/*-----------------------------------------------------------------------
+** vmm_build_shadow_hash
+**
+** Allocate and initialize a shadow hash table.
+**
+** This function assumes that PAGE_SIZE is 4k-bytes.
+**
+-----------------------------------------------------------------------*/
+static pmap_vmm_ext *vmm_build_shadow_hash(pmap_t pmap)
+{
+ pmap_vmm_ext *ext; /* VMM pmap extension we're building */
+ ppnum_t extPP; /* VMM pmap extension physical page number */
+ kern_return_t ret; /* Return code from various calls */
+ uint32_t pages = GV_HPAGES; /* Number of pages in the hash table */
+ vm_offset_t free = VMX_HPIDX_OFFSET; /* Offset into extension page of free area (128-byte aligned) */
+ uint32_t freeSize = PAGE_SIZE - free; /* Number of free bytes in the extension page */
+ uint32_t idx;
+
+ if ((pages * sizeof(addr64_t)) + (pages * sizeof(vm_offset_t)) > freeSize) {
+ panic("vmm_build_shadow_hash: too little pmap_vmm_ext free space\n");
+ }
+
+ ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&ext, PAGE_SIZE);
+ /* Allocate a page-sized extension block */
+ if (ret != KERN_SUCCESS) return (NULL); /* Return NULL for failed allocate */
+ bzero((char *)ext, PAGE_SIZE); /* Zero the entire extension block page */
+
+ extPP = pmap_find_phys(kernel_pmap, (vm_offset_t)ext);
+ /* Get extension block's physical page number */
+ if (!extPP) { /* This should not fail, but then again... */
+ panic("vmm_build_shadow_hash: could not translate pmap_vmm_ext vaddr %p\n", ext);
+ }
+
+ ext->vmxSalt = (addr64_t)(vm_offset_t)ext ^ ptoa_64(extPP);
+ /* Set effective<->physical conversion salt */
+ ext->vmxHostPmapPhys = (addr64_t)(vm_offset_t)pmap ^ pmap->pmapvr;
+ /* Set host pmap's physical address */
+ ext->vmxHostPmap = pmap; /* Set host pmap's effective address */
+ ext->vmxHashPgIdx = (addr64_t *)((vm_offset_t)ext + VMX_HPIDX_OFFSET);
+ /* Allocate physical index */
+ ext->vmxHashPgList = (vm_offset_t *)((vm_offset_t)ext + VMX_HPLIST_OFFSET);
+ /* Allocate page list */
+ ext->vmxActiveBitmap = (vm_offset_t *)((vm_offset_t)ext + VMX_ACTMAP_OFFSET);
+ /* Allocate active mapping bitmap */
+
+ /* The hash table is typically larger than a single page, but we don't require it to be in a
+ contiguous virtual or physical chunk. So, we allocate it page by page, noting the effective and
+ physical address of each page in vmxHashPgList and vmxHashPgIdx, respectively. */
+ for (idx = 0; idx < pages; idx++) {
+ mapping_t *map;
+ uint32_t mapIdx;
+ ret = kmem_alloc_wired(kernel_map, &ext->vmxHashPgList[idx], PAGE_SIZE);
+ /* Allocate a hash-table page */
+ if (ret != KERN_SUCCESS) goto fail; /* Allocation failed, exit through cleanup */
+ bzero((char *)ext->vmxHashPgList[idx], PAGE_SIZE); /* Zero the page */
+ ext->vmxHashPgIdx[idx] = ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)ext->vmxHashPgList[idx]));
+ /* Put page's physical address into index */
+ if (!ext->vmxHashPgIdx[idx]) { /* Hash-table page's LRA failed */
+ panic("vmm_build_shadow_hash: could not translate hash-table vaddr %08X\n", ext->vmxHashPgList[idx]);
+ }
+ map = (mapping_t *)ext->vmxHashPgList[idx];
+ for (mapIdx = 0; mapIdx < GV_SLTS_PPG; mapIdx++) { /* Iterate over mappings in this page */
+ map->mpFlags = (mpGuest | mpgFree); /* Mark guest type and free */
+ map = (mapping_t *)((char *)map + GV_SLOT_SZ); /* Next slot-sized mapping */
+ }
+ }
+
+ return (ext); /* Return newly-minted VMM pmap extension */
+
+fail:
+ for (idx = 0; idx < pages; idx++) { /* De-allocate any pages we managed to allocate */
+ if (ext->vmxHashPgList[idx]) {
+ kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
+ }
+ }
+ kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
+ return (NULL); /* Return NULL for failure */
+}
+
+
+/*-----------------------------------------------------------------------
+** vmm_release_shadow_hash
+**
+** Release shadow hash table and VMM extension block
+**
+-----------------------------------------------------------------------*/
+static void vmm_release_shadow_hash(pmap_vmm_ext *ext)
+{
+ uint32_t idx;
+
+ for (idx = 0; idx < GV_HPAGES; idx++) { /* Release the hash table page by page */
+ kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
+ }
+
+ kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
+}
+
+/*-----------------------------------------------------------------------
+** vmm_activate_gsa
+**
+** Activate guest shadow assist
+**
+-----------------------------------------------------------------------*/
+static kern_return_t vmm_activate_gsa(
+ thread_t act,
+ vmm_thread_index_t index)
+{
+ vmmCntrlTable *CTable = act->machine.vmmControl; /* Get VMM control table */
+ vmmCntrlEntry *CEntry;
+ pmap_t hpmap;
+ pmap_t gpmap;
+ if (!CTable) { /* Caller guarantees that this will work */
+ panic("vmm_activate_gsa: VMM control table not present; act = %p, idx = %lu\n",
+ act, index);
+ return KERN_FAILURE;
+ }
+ CEntry = vmm_get_entry(act, index); /* Get context from index */
+ if (!CEntry) { /* Caller guarantees that this will work */
+ panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %p, idx = %lu\n",
+ act, index);
+ return KERN_FAILURE;
+ }
+
+ hpmap = act->map->pmap; /* Get host pmap */
+ gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
+ if (!gpmap) { /* Caller guarantees that this will work */
+ panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %p, idx = %lu\n",
+ act, index);
+ return KERN_FAILURE;
+ }
+
+ if (!hpmap->pmapVmmExt) { /* If there's no VMM extension for this host, create one */
+ hpmap->pmapVmmExt = vmm_build_shadow_hash(hpmap); /* Build VMM extension plus shadow hash and attach */
+ if (hpmap->pmapVmmExt) { /* See if we succeeded */
+ hpmap->pmapVmmExtPhys = (addr64_t)(vm_offset_t)hpmap->pmapVmmExt ^ hpmap->pmapVmmExt->vmxSalt;
+ /* Get VMM extensions block physical address */
+ } else {
+ return KERN_RESOURCE_SHORTAGE; /* Not enough mojo to go */
+ }
+ }
+ gpmap->pmapVmmExt = hpmap->pmapVmmExt; /* Copy VMM extension block virtual address into guest */
+ gpmap->pmapVmmExtPhys = hpmap->pmapVmmExtPhys; /* and its physical address, too */
+ gpmap->pmapFlags |= pmapVMgsaa; /* Enable GSA for this guest */
+ CEntry->vmmXAFlgs |= vmmGSA; /* Show GSA active here, too */
+
+ return KERN_SUCCESS;
+}
+
+
+/*-----------------------------------------------------------------------
+** vmm_deactivate_gsa
+**
+** Deactivate guest shadow assist
+**
+-----------------------------------------------------------------------*/
+static void
+vmm_deactivate_gsa(
+ thread_t act,
+ vmm_thread_index_t index)
+{
+ vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
+ pmap_t gpmap;
+ if (!CEntry) { /* Caller guarantees that this will work */
+ panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %p, idx = %lu\n",
+ act, index);
+ }
+
+ gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
+ if (!gpmap) { /* Caller guarantees that this will work */
+ panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %p, idx = %lu\n",
+ act, index);
+ }
+
+ gpmap->pmapFlags &= ~pmapVMgsaa; /* Deactivate GSA for this guest */
+ CEntry->vmmXAFlgs &= ~vmmGSA; /* Show GSA deactivated here, too */