+/*-----------------------------------------------------------------------
+** vmm_activate_gsa
+**
+** Activate guest shadow assist
+**
+-----------------------------------------------------------------------*/
+static kern_return_t vmm_activate_gsa(
+ thread_t act,
+ vmm_thread_index_t index)
+{
+ vmmCntrlTable *CTable = act->machine.vmmControl; /* Get VMM control table */
+ if (!CTable) { /* Caller guarantees that this will work */
+ panic("vmm_activate_gsa: VMM control table not present; act = %08X, idx = %d\n",
+ act, index);
+ return KERN_FAILURE;
+ }
+ vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
+ if (!CEntry) { /* Caller guarantees that this will work */
+ panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
+ act, index);
+ return KERN_FAILURE;
+ }
+
+ pmap_t hpmap = act->map->pmap; /* Get host pmap */
+ pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
+ if (!gpmap) { /* Caller guarantees that this will work */
+ panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
+ act, index);
+ return KERN_FAILURE;
+ }
+
+ if (!hpmap->pmapVmmExt) { /* If there's no VMM extension for this host, create one */
+ hpmap->pmapVmmExt = vmm_build_shadow_hash(hpmap); /* Build VMM extension plus shadow hash and attach */
+ if (hpmap->pmapVmmExt) { /* See if we succeeded */
+ hpmap->pmapVmmExtPhys = (addr64_t)(vm_offset_t)hpmap->pmapVmmExt ^ hpmap->pmapVmmExt->vmxSalt;
+ /* Get VMM extensions block physical address */
+ } else {
+ return KERN_RESOURCE_SHORTAGE; /* Not enough mojo to go */
+ }
+ }
+ gpmap->pmapVmmExt = hpmap->pmapVmmExt; /* Copy VMM extension block virtual address into guest */
+ gpmap->pmapVmmExtPhys = hpmap->pmapVmmExtPhys; /* and its physical address, too */
+ gpmap->pmapFlags |= pmapVMgsaa; /* Enable GSA for this guest */
+ CEntry->vmmXAFlgs |= vmmGSA; /* Show GSA active here, too */
+
+ return KERN_SUCCESS;
+}
+
+
+/*-----------------------------------------------------------------------
+** vmm_deactivate_gsa
+**
+** Deactivate guest shadow assist
+**
+-----------------------------------------------------------------------*/
+static void vmm_deactivate_gsa(
+ thread_t act,
+ vmm_thread_index_t index)
+{
+ vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
+ if (!CEntry) { /* Caller guarantees that this will work */
+ panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
+ act, index);
+ return KERN_FAILURE;
+ }
+
+ pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
+ if (!gpmap) { /* Caller guarantees that this will work */
+ panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
+ act, index);
+ return KERN_FAILURE;
+ }
+
+ gpmap->pmapFlags &= ~pmapVMgsaa; /* Deactivate GSA for this guest */
+ CEntry->vmmXAFlgs &= ~vmmGSA; /* Show GSA deactivated here, too */
+}
+
+
+/*-----------------------------------------------------------------------
+** vmm_flush_context
+**
+** Flush specified guest context, purging all guest mappings and clearing
+** the context page.
+**
+-----------------------------------------------------------------------*/
+static void vmm_flush_context(
+ thread_t act,
+ vmm_thread_index_t index)
+{
+ vmmCntrlEntry *CEntry;
+ vmmCntrlTable *CTable;
+ vmm_state_page_t *vks;
+ vmm_version_t version;
+
+ CEntry = vmm_get_entry(act, index); /* Convert index to entry */
+ if (!CEntry) { /* Caller guarantees that this will work */
+ panic("vmm_flush_context: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
+ act, index);
+ return;
+ }
+
+ if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
+ toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
+ save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
+ }
+
+ if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
+ toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
+ save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
+ }
+
+ vmm_unmap_all_pages(act, index); /* Blow away all mappings for this context */
+
+ CTable = act->machine.vmmControl; /* Get the control table address */
+ CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
+
+ CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */
+ CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
+ CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
+ CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
+ CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
+ CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
+ CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
+
+ vks = CEntry->vmmContextKern; /* Get address of the context page */
+ version = vks->interface_version; /* Save the version code */
+ bzero((char *)vks, 4096); /* Clear all */
+
+ vks->interface_version = version; /* Set our version code */
+ vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */
+
+ return; /* Context is now flushed */
+}