X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/9bccf70c0258c7cac2dcb80011b2a964d884c552..4a3eedf9ecc9bbe3f3a5c6ce5e53ad199d639d32:/osfmk/ppc/vmachmon.c?ds=inline diff --git a/osfmk/ppc/vmachmon.c b/osfmk/ppc/vmachmon.c index ae22aab36..ec7ab941b 100644 --- a/osfmk/ppc/vmachmon.c +++ b/osfmk/ppc/vmachmon.c @@ -1,51 +1,57 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*----------------------------------------------------------------------- ** vmachmon.c ** ** C routines that we are adding to the MacOS X kernel. ** -** Weird Apple PSL stuff goes here... -** -** Until then, Copyright 2000, Connectix -----------------------------------------------------------------------*/ #include #include #include #include +#include #include #include #include -#include #include #include -#include -#include +#include +#include +#include +#include #include +#include #include +#include -extern struct Saveanchor saveanchor; /* Aligned savearea anchor */ extern double FloatInit; extern unsigned long QNaNbarbarian[4]; @@ -66,17 +72,19 @@ extern unsigned long QNaNbarbarian[4]; ** address of a vmmCntrlEntry or 0 if not found -----------------------------------------------------------------------*/ -vmmCntrlEntry *vmm_get_entry( - thread_act_t act, +static vmmCntrlEntry *vmm_get_entry( + thread_t act, vmm_thread_index_t index) { vmmCntrlTable *CTable; vmmCntrlEntry *CEntry; - if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */ - if ((index - 1) >= kVmmMaxContextsPerThread) return NULL; /* Index not in range */ + index = index & vmmTInum; /* Clean up the index */ + + if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */ + if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */ - CTable = act->mact.vmmControl; /* Make the address a bit more convienient */ + CTable = act->machine.vmmControl; /* Make the address a bit more convienient */ CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */ if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */ @@ -84,6 +92,265 @@ vmmCntrlEntry *vmm_get_entry( return CEntry; } +/*----------------------------------------------------------------------- +** vmm_get_adsp +** +** This function verifies and returns the pmap for an address space. +** If there is none and the request is valid, a pmap will be created. +** +** Inputs: +** act - pointer to current thread activation +** index - index into vmm control table (this is a "one based" value) +** +** Outputs: +** address of a pmap or 0 if not found or could no be created +** Note that if there is no pmap for the address space it will be created. +-----------------------------------------------------------------------*/ + +static pmap_t vmm_get_adsp(thread_t act, vmm_thread_index_t index) +{ + pmap_t pmap; + + if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */ + if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */ + + pmap = act->machine.vmmControl->vmmAdsp[index - 1]; /* Get the pmap */ + return (pmap); /* and return it. */ +} + +/*----------------------------------------------------------------------- +** vmm_build_shadow_hash +** +** Allocate and initialize a shadow hash table. +** +** This function assumes that PAGE_SIZE is 4k-bytes. +** +-----------------------------------------------------------------------*/ +static pmap_vmm_ext *vmm_build_shadow_hash(pmap_t pmap) +{ + pmap_vmm_ext *ext; /* VMM pmap extension we're building */ + ppnum_t extPP; /* VMM pmap extension physical page number */ + kern_return_t ret; /* Return code from various calls */ + uint32_t pages = GV_HPAGES; /* Number of pages in the hash table */ + vm_offset_t free = VMX_HPIDX_OFFSET; /* Offset into extension page of free area (128-byte aligned) */ + uint32_t freeSize = PAGE_SIZE - free; /* Number of free bytes in the extension page */ + uint32_t idx; + + if ((pages * sizeof(addr64_t)) + (pages * sizeof(vm_offset_t)) > freeSize) { + panic("vmm_build_shadow_hash: too little pmap_vmm_ext free space\n"); + } + + ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&ext, PAGE_SIZE); + /* Allocate a page-sized extension block */ + if (ret != KERN_SUCCESS) return (NULL); /* Return NULL for failed allocate */ + bzero((char *)ext, PAGE_SIZE); /* Zero the entire extension block page */ + + extPP = pmap_find_phys(kernel_pmap, (vm_offset_t)ext); + /* Get extension block's physical page number */ + if (!extPP) { /* This should not fail, but then again... */ + panic("vmm_build_shadow_hash: could not translate pmap_vmm_ext vaddr %p\n", ext); + } + + ext->vmxSalt = (addr64_t)(vm_offset_t)ext ^ ptoa_64(extPP); + /* Set effective<->physical conversion salt */ + ext->vmxHostPmapPhys = (addr64_t)(vm_offset_t)pmap ^ pmap->pmapvr; + /* Set host pmap's physical address */ + ext->vmxHostPmap = pmap; /* Set host pmap's effective address */ + ext->vmxHashPgIdx = (addr64_t *)((vm_offset_t)ext + VMX_HPIDX_OFFSET); + /* Allocate physical index */ + ext->vmxHashPgList = (vm_offset_t *)((vm_offset_t)ext + VMX_HPLIST_OFFSET); + /* Allocate page list */ + ext->vmxActiveBitmap = (vm_offset_t *)((vm_offset_t)ext + VMX_ACTMAP_OFFSET); + /* Allocate active mapping bitmap */ + + /* The hash table is typically larger than a single page, but we don't require it to be in a + contiguous virtual or physical chunk. So, we allocate it page by page, noting the effective and + physical address of each page in vmxHashPgList and vmxHashPgIdx, respectively. */ + for (idx = 0; idx < pages; idx++) { + mapping_t *map; + uint32_t mapIdx; + ret = kmem_alloc_wired(kernel_map, &ext->vmxHashPgList[idx], PAGE_SIZE); + /* Allocate a hash-table page */ + if (ret != KERN_SUCCESS) goto fail; /* Allocation failed, exit through cleanup */ + bzero((char *)ext->vmxHashPgList[idx], PAGE_SIZE); /* Zero the page */ + ext->vmxHashPgIdx[idx] = ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)ext->vmxHashPgList[idx])); + /* Put page's physical address into index */ + if (!ext->vmxHashPgIdx[idx]) { /* Hash-table page's LRA failed */ + panic("vmm_build_shadow_hash: could not translate hash-table vaddr %08X\n", ext->vmxHashPgList[idx]); + } + map = (mapping_t *)ext->vmxHashPgList[idx]; + for (mapIdx = 0; mapIdx < GV_SLTS_PPG; mapIdx++) { /* Iterate over mappings in this page */ + map->mpFlags = (mpGuest | mpgFree); /* Mark guest type and free */ + map = (mapping_t *)((char *)map + GV_SLOT_SZ); /* Next slot-sized mapping */ + } + } + + return (ext); /* Return newly-minted VMM pmap extension */ + +fail: + for (idx = 0; idx < pages; idx++) { /* De-allocate any pages we managed to allocate */ + if (ext->vmxHashPgList[idx]) { + kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE); + } + } + kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */ + return (NULL); /* Return NULL for failure */ +} + + +/*----------------------------------------------------------------------- +** vmm_release_shadow_hash +** +** Release shadow hash table and VMM extension block +** +-----------------------------------------------------------------------*/ +static void vmm_release_shadow_hash(pmap_vmm_ext *ext) +{ + uint32_t idx; + + for (idx = 0; idx < GV_HPAGES; idx++) { /* Release the hash table page by page */ + kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE); + } + + kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */ +} + +/*----------------------------------------------------------------------- +** vmm_activate_gsa +** +** Activate guest shadow assist +** +-----------------------------------------------------------------------*/ +static kern_return_t vmm_activate_gsa( + thread_t act, + vmm_thread_index_t index) +{ + vmmCntrlTable *CTable = act->machine.vmmControl; /* Get VMM control table */ + vmmCntrlEntry *CEntry; + pmap_t hpmap; + pmap_t gpmap; + if (!CTable) { /* Caller guarantees that this will work */ + panic("vmm_activate_gsa: VMM control table not present; act = %p, idx = %lu\n", + act, index); + return KERN_FAILURE; + } + CEntry = vmm_get_entry(act, index); /* Get context from index */ + if (!CEntry) { /* Caller guarantees that this will work */ + panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %p, idx = %lu\n", + act, index); + return KERN_FAILURE; + } + + hpmap = act->map->pmap; /* Get host pmap */ + gpmap = vmm_get_adsp(act, index); /* Get guest pmap */ + if (!gpmap) { /* Caller guarantees that this will work */ + panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %p, idx = %lu\n", + act, index); + return KERN_FAILURE; + } + + if (!hpmap->pmapVmmExt) { /* If there's no VMM extension for this host, create one */ + hpmap->pmapVmmExt = vmm_build_shadow_hash(hpmap); /* Build VMM extension plus shadow hash and attach */ + if (hpmap->pmapVmmExt) { /* See if we succeeded */ + hpmap->pmapVmmExtPhys = (addr64_t)(vm_offset_t)hpmap->pmapVmmExt ^ hpmap->pmapVmmExt->vmxSalt; + /* Get VMM extensions block physical address */ + } else { + return KERN_RESOURCE_SHORTAGE; /* Not enough mojo to go */ + } + } + gpmap->pmapVmmExt = hpmap->pmapVmmExt; /* Copy VMM extension block virtual address into guest */ + gpmap->pmapVmmExtPhys = hpmap->pmapVmmExtPhys; /* and its physical address, too */ + gpmap->pmapFlags |= pmapVMgsaa; /* Enable GSA for this guest */ + CEntry->vmmXAFlgs |= vmmGSA; /* Show GSA active here, too */ + + return KERN_SUCCESS; +} + + +/*----------------------------------------------------------------------- +** vmm_deactivate_gsa +** +** Deactivate guest shadow assist +** +-----------------------------------------------------------------------*/ +static void +vmm_deactivate_gsa( + thread_t act, + vmm_thread_index_t index) +{ + vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */ + pmap_t gpmap; + if (!CEntry) { /* Caller guarantees that this will work */ + panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %p, idx = %lu\n", + act, index); + } + + gpmap = vmm_get_adsp(act, index); /* Get guest pmap */ + if (!gpmap) { /* Caller guarantees that this will work */ + panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %p, idx = %lu\n", + act, index); + } + + gpmap->pmapFlags &= ~pmapVMgsaa; /* Deactivate GSA for this guest */ + CEntry->vmmXAFlgs &= ~vmmGSA; /* Show GSA deactivated here, too */ +} + + +/*----------------------------------------------------------------------- +** vmm_flush_context +** +** Flush specified guest context, purging all guest mappings and clearing +** the context page. +** +-----------------------------------------------------------------------*/ +static void vmm_flush_context( + thread_t act, + vmm_thread_index_t index) +{ + vmmCntrlEntry *CEntry; + vmmCntrlTable *CTable; + vmm_state_page_t *vks; + vmm_version_t version; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (!CEntry) { /* Caller guarantees that this will work */ + panic("vmm_flush_context: Unexpected failure of vmm_get_entry; act = %p, idx = %lu\n", + act, index); + return; + } + + if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */ + toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */ + save_release((struct savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */ + } + + if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */ + toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */ + save_release((struct savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */ + } + + vmm_unmap_all_pages(act, index); /* Blow away all mappings for this context */ + + CTable = act->machine.vmmControl; /* Get the control table address */ + CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */ + + CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */ + CEntry->vmmFacCtx.FPUsave = NULL; /* Clear facility context control */ + CEntry->vmmFacCtx.FPUlevel = NULL; /* Clear facility context control */ + CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */ + CEntry->vmmFacCtx.VMXsave = NULL; /* Clear facility context control */ + CEntry->vmmFacCtx.VMXlevel = NULL; /* Clear facility context control */ + CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ + + vks = CEntry->vmmContextKern; /* Get address of the context page */ + version = vks->interface_version; /* Save the version code */ + bzero((char *)vks, 4096); /* Clear all */ + + vks->interface_version = version; /* Set our version code */ + vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */ + + /* Context is now flushed */ +} /************************************************************************************* @@ -137,10 +404,61 @@ int vmm_get_version(struct savearea *save) int vmm_get_features(struct savearea *save) { save->save_r3 = kVmmCurrentFeatures; /* Return the features */ + if(getPerProc()->pf.Available & pf64Bit) { + save->save_r3 &= ~kVmmFeature_LittleEndian; /* No little endian here */ + save->save_r3 |= kVmmFeature_SixtyFourBit; /* Set that we can do 64-bit */ + } return 1; } +/*----------------------------------------------------------------------- +** vmm_max_addr +** +** This function returns the maximum addressable virtual address sported +** +** Outputs: +** Returns max address +-----------------------------------------------------------------------*/ + +addr64_t +vmm_max_addr(__unused thread_t act) +{ + return vm_max_address; /* Return the maximum address */ +} + +/*----------------------------------------------------------------------- +** vmm_get_XA +** +** This function retrieves the eXtended Architecture flags for the specifed VM. +** +** We need to return the result in the return code rather than in the return parameters +** because we need an architecture independent format so the results are actually +** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs. +** 4 for 32-bit. +** +** +** Inputs: +** act - pointer to current thread activation structure +** index - index returned by vmm_init_context +** +** Outputs: +** Return code is set to the XA flags. If the index is invalid or the +** context has not been created, we return 0. +-----------------------------------------------------------------------*/ + +unsigned int vmm_get_XA( + thread_t act, + vmm_thread_index_t index) +{ + vmmCntrlEntry *CEntry; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return 0; /* Either this isn't a vmm or the index is bogus */ + + return CEntry->vmmXAFlgs; /* Return the flags */ +} + /*----------------------------------------------------------------------- ** vmm_init_context ** @@ -164,20 +482,21 @@ int vmm_get_features(struct savearea *save) int vmm_init_context(struct savearea *save) { - thread_act_t act; + thread_t act; vmm_version_t version; vmm_state_page_t * vmm_user_state; vmmCntrlTable *CTable; vm_offset_t conkern; vmm_state_page_t * vks; - vm_offset_t conphys; + ppnum_t conphys; kern_return_t ret; - pmap_t new_pmap; int cvi, i; task_t task; - thread_act_t fact, gact; + thread_t fact, gact; + pmap_t hpmap; + pmap_t gpmap; - vmm_user_state = (vmm_state_page_t *)save->save_r4; /* Get the user address of the comm area */ + vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4); /* Get the user address of the comm area */ if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */ save->save_r3 = KERN_FAILURE; /* Return failure */ return 1; @@ -195,7 +514,7 @@ int vmm_init_context(struct savearea *save) return 1; } - act = current_act(); /* Pick up our activation */ + act = current_thread(); /* Pick up our activation */ ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ @@ -203,15 +522,15 @@ int vmm_init_context(struct savearea *save) task_lock(task); /* Lock our task */ - fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ - gact = 0; /* Pretend we didn't find it yet */ + fact = (thread_t)task->threads.next; /* Get the first activation on task */ + gact = NULL; /* Pretend we didn't find it yet */ - for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */ - if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */ + for(i = 0; i < task->thread_count; i++) { /* All of the activations */ + if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */ gact = fact; /* Yeah... */ break; /* Bail the loop... */ } - fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ + fact = (thread_t)fact->task_threads.next; /* Go to the next one */ } @@ -231,28 +550,28 @@ int vmm_init_context(struct savearea *save) return 1; } - if(!gact) act->mact.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */ + if(!gact) act->machine.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */ task_unlock(task); /* Safe to release now (because we've marked ourselves) */ - CTable = act->mact.vmmControl; /* Get the control table address */ + CTable = act->machine.vmmControl; /* Get the control table address */ if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */ if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */ - act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */ + act->machine.vmmControl = NULL; /* Unmark us as vmm 'cause we failed */ ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */ return 1; } bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */ - act->mact.vmmControl = CTable; /* Initialize the table anchor */ + act->machine.vmmControl = CTable; /* Initialize the table anchor */ } - for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */ + for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */ if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */ } - if(cvi >= kVmmMaxContextsPerThread) { /* Did we find one? */ + if(cvi >= kVmmMaxContexts) { /* Did we find one? */ ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */ return 1; @@ -269,7 +588,7 @@ int vmm_init_context(struct savearea *save) goto return_in_shame; /* Map the vmm state into the kernel's address space. */ - conphys = pmap_extract(act->map->pmap, (vm_offset_t)vmm_user_state); + conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state)); /* Find a virtual address to use. */ ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE); @@ -282,6 +601,7 @@ int vmm_init_context(struct savearea *save) } /* Map it into the kernel's address space. */ + pmap_enter(kernel_pmap, conkern, conphys, VM_PROT_READ | VM_PROT_WRITE, VM_WIMG_USE_DEFAULT, TRUE); @@ -290,17 +610,6 @@ int vmm_init_context(struct savearea *save) vks = (vmm_state_page_t *)conkern; bzero((char *)vks, PAGE_SIZE); - /* Allocate a new pmap for the new vmm context. */ - new_pmap = pmap_create(0); - if (new_pmap == PMAP_NULL) { - (void) vm_map_unwire(act->map, /* Couldn't get a pmap, unwire the user page */ - (vm_offset_t)vmm_user_state, - (vm_offset_t)vmm_user_state + PAGE_SIZE, - TRUE); - - kmem_free(kernel_map, conkern, PAGE_SIZE); /* Release the kernel address */ - goto return_in_shame; - } /* We're home free now. Simply fill in the necessary info and return. */ @@ -308,27 +617,43 @@ int vmm_init_context(struct savearea *save) vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */ CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */ - CTable->vmmc[cvi].vmmPmap = new_pmap; /* Remember the pmap for this guy */ CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */ + CTable->vmmc[cvi].vmmContextPhys = conphys; /* Remember the state page physical addr */ CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */ - CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */ - CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */ + CTable->vmmc[cvi].vmmFacCtx.FPUsave = NULL; /* Clear facility context control */ + CTable->vmmc[cvi].vmmFacCtx.FPUlevel = NULL; /* Clear facility context control */ CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */ - CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */ - CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */ + CTable->vmmc[cvi].vmmFacCtx.VMXsave = NULL; /* Clear facility context control */ + CTable->vmmc[cvi].vmmFacCtx.VMXlevel = NULL; /* Clear facility context control */ CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */ - hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */ + (void)hw_atomic_add(&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */ + + hpmap = act->map->pmap; /* Get host pmap */ + gpmap = pmap_create(0, FALSE); /* Make a fresh guest pmap */ + if (gpmap) { /* Did we succeed ? */ + CTable->vmmAdsp[cvi] = gpmap; /* Remember guest pmap for new context */ + if (lowGlo.lgVMMforcedFeats & vmmGSA) { /* Forcing on guest shadow assist ? */ + vmm_activate_gsa(act, cvi+1); /* Activate GSA */ + } + } else { + ret = KERN_RESOURCE_SHORTAGE; /* We've failed to allocate a guest pmap */ + goto return_in_shame; /* Shame on us. */ + } + + if (!(hpmap->pmapFlags & pmapVMhost)) { /* Do this stuff if this is our first time hosting */ + hpmap->pmapFlags |= pmapVMhost; /* We're now hosting */ + } ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */ return 1; return_in_shame: - if(!gact) kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */ - act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */ + if(!gact) kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */ + act->machine.vmmControl = NULL; /* Unmark us as vmm 'cause we failed */ ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ save->save_r3 = ret; /* Pass back return code... */ return 1; @@ -348,38 +673,54 @@ return_in_shame: ** ** Outputs: ** kernel return code indicating success or failure +** +** Strangeness note: +** This call will also trash the address space with the same ID. While this +** is really not too cool, we have to do it because we need to make +** sure that old VMM users (not that we really have any) who depend upon +** the address space going away with the context still work the same. -----------------------------------------------------------------------*/ kern_return_t vmm_tear_down_context( - thread_act_t act, + thread_t act, vmm_thread_index_t index) { vmmCntrlEntry *CEntry; vmmCntrlTable *CTable; int cvi; - register savearea *sv; + pmap_t gpmap; + pmap_t pmap; - CEntry = vmm_get_entry(act, index); /* Convert index to entry */ - if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ - ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ + ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ - hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */ + (void)hw_atomic_sub(&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */ - if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */ - toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */ - save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */ + if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */ + toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */ + save_release((struct savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */ } - if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */ - toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */ - save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */ + if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */ + toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */ + save_release((struct savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */ + } + + CEntry->vmmPmap = NULL; /* Remove this trace */ + gpmap = act->machine.vmmControl->vmmAdsp[index - 1]; + /* Get context's guest pmap (if any) */ + if (gpmap) { /* Check if there is an address space assigned here */ + if (gpmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist case specially */ + hw_rem_all_gv(gpmap); /* Remove all guest mappings from shadow hash table */ + } else { + mapping_remove(gpmap, 0xFFFFFFFFFFFFF000LL);/* Remove final page explicitly because we might have mapped it */ + pmap_remove(gpmap, 0, 0xFFFFFFFFFFFFF000LL);/* Remove all entries from this map */ + } + pmap_destroy(gpmap); /* Toss the pmap for this context */ + act->machine.vmmControl->vmmAdsp[index - 1] = NULL; /* Clean it up */ } - - mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */ - pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */ - pmap_destroy(CEntry->vmmPmap); /* Toss the pmap for this context */ - CEntry->vmmPmap = NULL; /* Clean it up */ (void) vm_map_unwire( /* Unwire the user comm page */ act->map, @@ -389,35 +730,148 @@ kern_return_t vmm_tear_down_context( kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */ + CTable = act->machine.vmmControl; /* Get the control table address */ + CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */ + CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */ - CEntry->vmmPmap = 0; /* Clear pmap pointer */ - CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */ - CEntry->vmmContextUser = 0; /* Clear the user address of comm area */ + CEntry->vmmContextKern = NULL; /* Clear the kernel address of comm area */ + CEntry->vmmContextUser = NULL; /* Clear the user address of comm area */ - CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */ - CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */ + CEntry->vmmFacCtx.FPUsave = NULL; /* Clear facility context control */ + CEntry->vmmFacCtx.FPUlevel = NULL; /* Clear facility context control */ CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */ - CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */ - CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */ + CEntry->vmmFacCtx.VMXsave = NULL; /* Clear facility context control */ + CEntry->vmmFacCtx.VMXlevel = NULL; /* Clear facility context control */ CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ - CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */ + CEntry->vmmFacCtx.facAct = NULL; /* Clear facility context control */ - CTable = act->mact.vmmControl; /* Get the control table address */ - for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */ + for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */ if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */ ml_set_interrupts_enabled(FALSE); /* No more interruptions */ return KERN_SUCCESS; /* Leave... */ } } - kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */ - act->mact.vmmControl = 0; /* Unmark us as vmm */ +/* + * When we have tossed the last context, toss any address spaces left over before releasing + * the VMM control block + */ + + for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */ + if(!act->machine.vmmControl->vmmAdsp[index - 1]) continue; /* Nothing to remove here */ + mapping_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */ + pmap_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */ + pmap_destroy(act->machine.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */ + act->machine.vmmControl->vmmAdsp[index - 1] = NULL; /* Clear just in case */ + } + + pmap = act->map->pmap; /* Get our pmap */ + if (pmap->pmapVmmExt) { /* Release any VMM pmap extension block and shadow hash table */ + vmm_release_shadow_hash(pmap->pmapVmmExt); /* Release extension block and shadow hash table */ + pmap->pmapVmmExt = NULL; /* Forget extension block */ + pmap->pmapVmmExtPhys = 0; /* Forget extension block's physical address, too */ + } + pmap->pmapFlags &= ~pmapVMhost; /* We're no longer hosting */ + + kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */ + act->machine.vmmControl = NULL; /* Unmark us as vmm */ ml_set_interrupts_enabled(FALSE); /* No more interruptions */ return KERN_SUCCESS; } + +/*----------------------------------------------------------------------- +** vmm_activate_XA +** +** This function activates the eXtended Architecture flags for the specifed VM. +** +** We need to return the result in the return code rather than in the return parameters +** because we need an architecture independent format so the results are actually +** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs. +** 4 for 32-bit. +** +** Note that this function does a lot of the same stuff as vmm_tear_down_context +** and vmm_init_context. +** +** Inputs: +** act - pointer to current thread activation structure +** index - index returned by vmm_init_context +** flags - the extended architecture flags +** +** +** Outputs: +** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not. +** Also, the internal flags are set and, additionally, the VM is completely reset. +-----------------------------------------------------------------------*/ +kern_return_t vmm_activate_XA( + thread_t act, + vmm_thread_index_t index, + unsigned int xaflags) +{ + vmmCntrlEntry *CEntry; + kern_return_t result = KERN_SUCCESS; /* Assume success */ + + if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (!getPerProc()->pf.Available & pf64Bit))) + return (KERN_FAILURE); /* Unknown or unsupported feature requested */ + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */ + + ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ + + vmm_flush_context(act, index); /* Flush the context */ + + if (xaflags & vmm64Bit) { /* Activating 64-bit mode ? */ + CEntry->vmmXAFlgs |= vmm64Bit; /* Activate 64-bit mode */ + } + + if (xaflags & vmmGSA) { /* Activating guest shadow assist ? */ + result = vmm_activate_gsa(act, index); /* Activate guest shadow assist */ + } + + ml_set_interrupts_enabled(FALSE); /* No more interruptions */ + + return result; /* Return activate result */ +} + +/*----------------------------------------------------------------------- +** vmm_deactivate_XA +** +-----------------------------------------------------------------------*/ +kern_return_t vmm_deactivate_XA( + thread_t act, + vmm_thread_index_t index, + unsigned int xaflags) +{ + vmmCntrlEntry *CEntry; + kern_return_t result = KERN_SUCCESS; /* Assume success */ + + if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (getPerProc()->pf.Available & pf64Bit))) + return (KERN_FAILURE); /* Unknown or unsupported feature requested */ + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */ + + ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ + + vmm_flush_context(act, index); /* Flush the context */ + + if (xaflags & vmm64Bit) { /* Deactivating 64-bit mode ? */ + CEntry->vmmXAFlgs &= ~vmm64Bit; /* Deactivate 64-bit mode */ + } + + if (xaflags & vmmGSA) { /* Deactivating guest shadow assist ? */ + vmm_deactivate_gsa(act, index); /* Deactivate guest shadow assist */ + } + + ml_set_interrupts_enabled(FALSE); /* No more interruptions */ + + return result; /* Return deactivate result */ +} + + /*----------------------------------------------------------------------- ** vmm_tear_down_all ** @@ -435,15 +889,15 @@ kern_return_t vmm_tear_down_context( ** Outputs: ** All vmm contexts released and VMM shut down -----------------------------------------------------------------------*/ -void vmm_tear_down_all(thread_act_t act) { +void vmm_tear_down_all(thread_t act) { vmmCntrlTable *CTable; int cvi; kern_return_t ret; - savearea *save; + struct savearea *save; spl_t s; - if(act->mact.specFlags & runningVM) { /* Are we actually in a context right now? */ + if(act->machine.specFlags & runningVM) { /* Are we actually in a context right now? */ save = find_user_regs(act); /* Find the user state context */ if(!save) { /* Did we find it? */ panic("vmm_tear_down_all: runningVM marked but no user state context\n"); @@ -456,31 +910,32 @@ void vmm_tear_down_all(thread_act_t act) { splx(s); /* Restore interrupts */ } - if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */ - - for(cvi = 1; cvi <= kVmmMaxContextsPerThread; cvi++) { /* Look at all slots */ + if(act->machine.vmmControl) { /* Do we have a vmm control block? */ + CTable = act->machine.vmmControl; + for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */ if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */ ret = vmm_tear_down_context(act, cvi); /* Take down the found context */ if(ret != KERN_SUCCESS) { /* Did it go away? */ - panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n", + panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %p, cvi = %d\n", ret, act, cvi); } } } - if(act->mact.vmmControl) { /* Did we find one? */ + +/* + * Note that all address apces should be gone here. + */ + if(act->machine.vmmControl) { /* Did we find one? */ panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */ } } - - return; } /*----------------------------------------------------------------------- ** vmm_map_page ** ** This function maps a page from within the client's logical -** address space into the alternate address space of the -** Virtual Machine Monitor context. +** address space into the alternate address space. ** ** The page need not be locked or resident. If not resident, it will be faulted ** in by this code, which may take some time. Also, if the page is not locked, @@ -495,7 +950,7 @@ void vmm_tear_down_all(thread_act_t act) { ** ** Inputs: ** act - pointer to current thread activation -** index - index of vmm state for this page +** index - index of address space to map into ** va - virtual address within the client's address ** space ** ava - virtual address within the alternate address @@ -514,74 +969,117 @@ void vmm_tear_down_all(thread_act_t act) { -----------------------------------------------------------------------*/ kern_return_t vmm_map_page( - thread_act_t act, - vmm_thread_index_t index, - vm_offset_t cva, - vm_offset_t ava, + thread_t act, + vmm_adsp_id_t index, + addr64_t cva, + addr64_t ava, vm_prot_t prot) { kern_return_t ret; - vmmCntrlEntry *CEntry; - vm_offset_t phys_addr; - register mapping *mpv, *mp, *nmpv, *nmp; - struct phys_entry *pp; - pmap_t mpmap; + register mapping_t *mp; vm_map_t map; + addr64_t ova, nextva; + pmap_t pmap; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return KERN_FAILURE; /* No good, failure... */ - -/* - * Find out if we have already mapped the address and toss it out if so. - */ - mp = hw_lock_phys_vir(CEntry->vmmPmap->space, ava); /* See if there is already a mapping */ - if((unsigned int)mp & 1) { /* Did we timeout? */ - panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava); /* Yeah, scream about it! */ - return KERN_FAILURE; /* Bad hair day, return FALSE... */ - } - if(mp) { /* If it was there, toss it */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ - (void)mapping_remove(CEntry->vmmPmap, ava); /* Throw away the mapping. we're about to replace it */ - } - map = current_act()->map; /* Get the current map */ - - while(1) { /* Keep trying until we get it or until we fail */ - if(hw_cvp_blk(map->pmap, cva)) return KERN_FAILURE; /* Make sure that there is no block map at this address */ + pmap = vmm_get_adsp(act, index); /* Get the guest pmap for this address space */ + if(!pmap) return KERN_FAILURE; /* Bogus address space, no VMs, or we can't make a pmap, failure... */ - mp = hw_lock_phys_vir(map->pmap->space, cva); /* Lock the physical entry for emulator's page */ - if((unsigned int)mp&1) { /* Did we timeout? */ - panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva); /* Yeah, scream about it! */ - return KERN_FAILURE; /* Bad hair day, return FALSE... */ - } - - if(mp) { /* We found it... */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ + if(ava > vm_max_address) return kVmmInvalidAddress; /* Does the machine support an address of this size? */ + + map = current_thread()->map; /* Get the host's map */ + + if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist active ? */ + ret = hw_res_map_gv(map->pmap, pmap, cva, ava, getProtPPC(prot, TRUE)); + /* Attempt to resume an existing gv->phys mapping */ + if (mapRtOK != ret) { /* Nothing to resume, construct a new mapping */ + unsigned int pindex; + phys_entry_t *physent; + unsigned int pattr; + unsigned int wimg; + unsigned int mflags; + addr64_t gva; - if(!mpv->physent) return KERN_FAILURE; /* If there is no physical entry (e.g., I/O area), we won't map it */ + while (1) { /* Find host mapping or fail */ + mp = mapping_find(map->pmap, cva, &nextva, 0); + /* Attempt to find host mapping and pin it */ + if (mp) break; /* Got it */ + + ml_set_interrupts_enabled(TRUE); + /* Open 'rupt window */ + ret = vm_fault(map, /* Didn't find it, try to fault in host page read/write */ + vm_map_trunc_page(cva), + VM_PROT_READ | VM_PROT_WRITE, + FALSE, /* change wiring */ + THREAD_UNINT, + NULL, + 0); + ml_set_interrupts_enabled(FALSE); + /* Close 'rupt window */ + if (ret != KERN_SUCCESS) + return KERN_FAILURE; /* Fault failed, return failure */ + } - if(!(mpv->PTEr & 1)) break; /* If we are writable go ahead and map it... */ + if (mpNormal != (mp->mpFlags & mpType)) { + /* Host mapping must be a vanilla page */ + mapping_drop_busy(mp); /* Un-pin host mapping */ + return KERN_FAILURE; /* Return failure */ + } - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the map before we try to fault the write bit on */ + /* Partially construct gv->phys mapping */ + physent = mapping_phys_lookup(mp->mpPAddr, &pindex); + if (!physent) { + mapping_drop_busy(mp); + return KERN_FAILURE; + } + pattr = ((physent->ppLink & (ppI | ppG)) >> 60); + wimg = 0x2; + if (pattr & mmFlgCInhib) wimg |= 0x4; + if (pattr & mmFlgGuarded) wimg |= 0x1; + mflags = (pindex << 16) | mpGuest; + gva = ((ava & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot, TRUE)); + + hw_add_map_gv(map->pmap, pmap, gva, mflags, mp->mpPAddr); + /* Construct new guest->phys mapping */ + + mapping_drop_busy(mp); /* Un-pin host mapping */ } - - ml_set_interrupts_enabled(TRUE); /* Enable interruptions */ - ret = vm_fault(map, trunc_page(cva), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* Didn't find it, try to fault it in read/write... */ - ml_set_interrupts_enabled(FALSE); /* Disable interruptions */ - if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */ + } else { + while(1) { /* Keep trying until we get it or until we fail */ + + mp = mapping_find(map->pmap, cva, &nextva, 0); /* Find the mapping for this address */ + + if(mp) break; /* We found it */ + + ml_set_interrupts_enabled(TRUE); /* Enable interruptions */ + ret = vm_fault(map, /* Didn't find it, try to fault it in read/write... */ + vm_map_trunc_page(cva), + VM_PROT_READ | VM_PROT_WRITE, + FALSE, /*change wiring */ + THREAD_UNINT, + NULL, + 0); + ml_set_interrupts_enabled(FALSE); /* Disable interruptions */ + if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */ + } + + if((mp->mpFlags & mpType) != mpNormal) { /* If this is a block, a nest, or some other special thing, we can't map it */ + mapping_drop_busy(mp); /* We have everything we need from the mapping */ + return KERN_FAILURE; /* Leave in shame */ + } + + while(1) { /* Keep trying the enter until it goes in */ + ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */ + if(!ova) break; /* If there were no collisions, we are done... */ + mapping_remove(pmap, ova); /* Remove the mapping that collided */ + } + + mapping_drop_busy(mp); /* We have everything we need from the mapping */ } -/* - * Now we make a mapping using all of the attributes of the source page except for protection. - * Also specify that the physical entry is locked. - */ - nmpv = mapping_make(CEntry->vmmPmap, mpv->physent, (ava & -PAGE_SIZE), - (mpv->physent->pte1 & -PAGE_SIZE), prot, ((mpv->physent->pte1 >> 3) & 0xF), 1); - - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry now, we're done with it */ - - CEntry->vmmLastMap = ava & -PAGE_SIZE; /* Remember the last mapping we made */ - CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ + if (!((getPerProc()->spcFlags) & FamVMmode)) { + act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ + act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */ + } return KERN_SUCCESS; } @@ -596,6 +1094,11 @@ kern_return_t vmm_map_page( ** ** See description of vmm_map_page for details. ** +** Inputs: +** Index is used for both the context and the address space ID. +** index[24:31] is the context id and index[16:23] is the address space. +** if the address space ID is 0, the context ID is used for it. +** ** Outputs: ** Normal exit is to run the VM. Abnormal exit is triggered via a ** non-KERN_SUCCESS return from vmm_map_page or later during the @@ -603,143 +1106,171 @@ kern_return_t vmm_map_page( -----------------------------------------------------------------------*/ vmm_return_code_t vmm_map_execute( - thread_act_t act, + thread_t act, vmm_thread_index_t index, - vm_offset_t cva, - vm_offset_t ava, + addr64_t cva, + addr64_t ava, vm_prot_t prot) { kern_return_t ret; vmmCntrlEntry *CEntry; + unsigned int adsp; + vmm_thread_index_t cndx; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ + cndx = index & 0xFF; /* Clean it up */ + CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */ if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ - ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */ + if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry)) + return kVmmBogusContext; /* Yes, invalid index in Fam */ + + adsp = (index >> 8) & 0xFF; /* Get any requested address space */ + if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */ - if(ret == KERN_SUCCESS) vmm_execute_vm(act, index); /* Return was ok, launch the VM */ + ret = vmm_map_page(act, adsp, cva, ava, prot); /* Go try to map the page on in */ + + + if(ret == KERN_SUCCESS) { + act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ + act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */ + vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */ + } - return kVmmInvalidAddress; /* We had trouble mapping in the page */ + return ret; /* We had trouble mapping in the page */ } /*----------------------------------------------------------------------- ** vmm_map_list ** -** This function maps a list of pages into the alternate's logical -** address space. +** This function maps a list of pages into various address spaces ** ** Inputs: ** act - pointer to current thread activation -** index - index of vmm state for this page +** index - index of default address space (used if not specifed in list entry ** count - number of pages to release +** flavor - 0 if 32-bit version, 1 if 64-bit ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map ** ** Outputs: ** kernel return code indicating success or failure ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded ** or the vmm_map_page call fails. +** We return kVmmInvalidAddress if virtual address size is not supported -----------------------------------------------------------------------*/ kern_return_t vmm_map_list( - thread_act_t act, - vmm_thread_index_t index, - unsigned int cnt) + thread_t act, + vmm_adsp_id_t index, + unsigned int cnt, + unsigned int flavor) { vmmCntrlEntry *CEntry; boolean_t ret; unsigned int i; - vmmMapList *lst; - vm_offset_t cva; - vm_offset_t ava; + vmmMList *lst; + vmmMList64 *lstx; + addr64_t cva; + addr64_t ava; vm_prot_t prot; + vmm_adsp_id_t adsp; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return -1; /* No good, failure... */ + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */ if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */ if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */ - lst = &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ + lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ + lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ - cva = lst[i].vmlva; /* Get the actual address */ - ava = lst[i].vmlava & -vmlFlgs; /* Get the alternate address */ - prot = lst[i].vmlava & vmlProt; /* Get the protection bits */ + if(flavor) { /* Check if 32- or 64-bit addresses */ + cva = lstx[i].vmlva; /* Get the 64-bit actual address */ + ava = lstx[i].vmlava; /* Get the 64-bit guest address */ + } + else { + cva = lst[i].vmlva; /* Get the 32-bit actual address */ + ava = lst[i].vmlava; /* Get the 32-bit guest address */ + } + + prot = ava & vmmlProt; /* Extract the protection bits */ + adsp = (ava & vmmlAdID) >> 4; /* Extract an explicit address space request */ + if(!adsp) /* If no explicit, use supplied default */ + adsp = index - 1; + ava &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */ + ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */ - if(ret != KERN_SUCCESS) return KERN_FAILURE; /* Bail if any error */ + if(ret != KERN_SUCCESS) /* Bail if any error */ + return ret; } - return KERN_SUCCESS ; /* Return... */ + return KERN_SUCCESS; } /*----------------------------------------------------------------------- ** vmm_get_page_mapping ** -** This function determines whether the specified VMM -** virtual address is mapped. +** Given a context index and a guest virtual address, convert the address +** to its corresponding host virtual address. ** ** Inputs: ** act - pointer to current thread activation -** index - index of vmm state for this page -** va - virtual address within the alternate's address -** space +** index - context index +** gva - guest virtual address ** ** Outputs: -** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure +** Host virtual address (page aligned) or -1 if not mapped or any failure ** ** Note: -** If there are aliases to the page in the non-alternate address space, -** this call could return the wrong one. Moral of the story: no aliases. +** If the host address space contains multiple virtual addresses mapping +** to the physical address corresponding to the specified guest virtual +** address (i.e., host virtual aliases), it is unpredictable which host +** virtual address (alias) will be returned. Moral of the story: No host +** virtual aliases. -----------------------------------------------------------------------*/ -vm_offset_t vmm_get_page_mapping( - thread_act_t act, - vmm_thread_index_t index, - vm_offset_t va) +addr64_t vmm_get_page_mapping( + thread_t act, + vmm_adsp_id_t index, + addr64_t gva) { - vmmCntrlEntry *CEntry; - vm_offset_t ova; - register mapping *mpv, *mp, *nmpv, *nmp; + register mapping_t *mp; pmap_t pmap; + addr64_t nextva, hva; + ppnum_t pa; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return -1; /* No good, failure... */ + pmap = vmm_get_adsp(act, index); /* Get and validate the index */ + if (!pmap)return -1; /* No good, failure... */ + + if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist (GSA) active ? */ + return (hw_gva_to_hva(pmap, gva)); /* Convert guest to host virtual address */ + } else { + mp = mapping_find(pmap, gva, &nextva, 0); /* Find guest mapping for this virtual address */ + + if(!mp) return -1; /* Not mapped, return -1 */ - mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ - if((unsigned int)mp & 1) { /* Did we timeout? */ - panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */ - return -1; /* Bad hair day, return FALSE... */ - } - if(!mp) return -1; /* Not mapped, return -1 */ + pa = mp->mpPAddr; /* Remember the physical page address */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ - pmap = current_act()->map->pmap; /* Get the current pmap */ - ova = -1; /* Assume failure for now */ + mapping_drop_busy(mp); /* Go ahead and relase the mapping now */ - for(nmpv = hw_cpv(mpv->physent->phys_link); nmpv; nmpv = hw_cpv(nmpv->next)) { /* Scan 'em all */ - - if(nmpv->pmap != pmap) continue; /* Skip all the rest if this is not the right pmap... */ - - ova = ((((unsigned int)nmpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */ - ova = ova | ((nmpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */ - ova = ova | ((nmpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */ - break; /* We're done now, pass virtual address back */ - } + pmap = current_thread()->map->pmap; /* Get the host pmap */ + hva = mapping_p2v(pmap, pa); /* Now find the source virtual */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + if(hva != 0) return hva; /* We found it... */ - if(ova == -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va); /* We are bad wrong if we can't find it */ + panic("vmm_get_page_mapping: could not back-map guest va (%016llX)\n", gva); + /* We are bad wrong if we can't find it */ - return ova; + return -1; /* Never executed, prevents compiler warning */ + } } /*----------------------------------------------------------------------- ** vmm_unmap_page ** -** This function unmaps a page from the alternate's logical -** address space. +** This function unmaps a page from the guest address space. ** ** Inputs: ** act - pointer to current thread activation @@ -752,20 +1283,24 @@ vm_offset_t vmm_get_page_mapping( -----------------------------------------------------------------------*/ kern_return_t vmm_unmap_page( - thread_act_t act, - vmm_thread_index_t index, - vm_offset_t va) + thread_t act, + vmm_adsp_id_t index, + addr64_t va) { - vmmCntrlEntry *CEntry; - boolean_t ret; - kern_return_t kern_result = KERN_SUCCESS; + addr64_t nadd; + pmap_t pmap; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return -1; /* No good, failure... */ + pmap = vmm_get_adsp(act, index); /* Get and validate the index */ + if (!pmap)return -1; /* No good, failure... */ - ret = mapping_remove(CEntry->vmmPmap, va); /* Toss the mapping */ - - return (ret ? KERN_SUCCESS : KERN_FAILURE); /* Return... */ + if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ + hw_susp_map_gv(act->map->pmap, pmap, va); /* Suspend the mapping */ + return (KERN_SUCCESS); /* Always returns success */ + } else { + nadd = mapping_remove(pmap, va); /* Toss the mapping */ + + return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS); /* Return... */ + } } /*----------------------------------------------------------------------- @@ -778,6 +1313,7 @@ kern_return_t vmm_unmap_page( ** act - pointer to current thread activation ** index - index of vmm state for this page ** count - number of pages to release +** flavor - 0 if 32-bit, 1 if 64-bit ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap ** ** Outputs: @@ -786,29 +1322,64 @@ kern_return_t vmm_unmap_page( -----------------------------------------------------------------------*/ kern_return_t vmm_unmap_list( - thread_act_t act, - vmm_thread_index_t index, - unsigned int cnt) + thread_t act, + vmm_adsp_id_t index, + unsigned int cnt, + unsigned int flavor) { vmmCntrlEntry *CEntry; - boolean_t ret; kern_return_t kern_result = KERN_SUCCESS; - unsigned int *pgaddr, i; + unsigned int i; + addr64_t gva; + vmmUMList *lst; + vmmUMList64 *lstx; + pmap_t pmap; + int adsp; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - if (CEntry == NULL)return -1; /* No good, failure... */ - - if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */ - if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */ + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) { /* Either this isn't a vmm or the index is bogus */ + kern_result = KERN_FAILURE; + goto out; + } - pgaddr = &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ + if(cnt > kVmmMaxUnmapPages) { /* They tried to unmap too many */ + kern_result = KERN_FAILURE; + goto out; + } + if(!cnt) { /* If they said none, we're done... */ + kern_result = KERN_SUCCESS; + goto out; + } - for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ + lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ + lst = (vmmUMList *)lstx; - (void)mapping_remove(CEntry->vmmPmap, pgaddr[i]); /* Toss the mapping */ + for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ + if(flavor) { /* Check if 32- or 64-bit addresses */ + gva = lstx[i].vmlava; /* Get the 64-bit guest address */ + } + else { + gva = lst[i].vmlava; /* Get the 32-bit guest address */ + } + + adsp = (gva & vmmlAdID) >> 4; /* Extract an explicit address space request */ + if(!adsp) /* If no explicit, use supplied default */ + adsp = index - 1; + pmap = act->machine.vmmControl->vmmAdsp[adsp]; /* Get the pmap for this request */ + if(!pmap) + continue; /* Ain't nuthin' mapped here, no durn map... */ + + gva &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */ + if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ + hw_susp_map_gv(act->map->pmap, pmap, gva); + /* Suspend the mapping */ + } else { + (void)mapping_remove(pmap, gva); /* Toss the mapping */ + } } - return KERN_SUCCESS ; /* Return... */ +out: + return kern_result; } /*----------------------------------------------------------------------- @@ -829,20 +1400,23 @@ kern_return_t vmm_unmap_list( -----------------------------------------------------------------------*/ void vmm_unmap_all_pages( - thread_act_t act, - vmm_thread_index_t index) + thread_t act, + vmm_adsp_id_t index) { - vmmCntrlEntry *CEntry; + pmap_t pmap; - CEntry = vmm_get_entry(act, index); /* Convert index to entry */ - if (CEntry == NULL) return; /* Either this isn't vmm thread or the index is bogus */ - -/* - * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly - */ - mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */ - pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */ - return; + pmap = vmm_get_adsp(act, index); /* Convert index to entry */ + if (!pmap) return; /* Either this isn't vmm thread or the index is bogus */ + + if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ + hw_rem_all_gv(pmap); /* Remove all guest's mappings from shadow hash table */ + } else { + /* + * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly + */ + mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */ + pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */ + } } @@ -868,31 +1442,39 @@ void vmm_unmap_all_pages( -----------------------------------------------------------------------*/ boolean_t vmm_get_page_dirty_flag( - thread_act_t act, - vmm_thread_index_t index, - vm_offset_t va, + thread_t act, + vmm_adsp_id_t index, + addr64_t va, unsigned int reset) { - vmmCntrlEntry *CEntry; - register mapping *mpv, *mp; unsigned int RC; + pmap_t pmap; - CEntry = vmm_get_entry(act, index); /* Convert index to entry */ - if (CEntry == NULL) return 1; /* Either this isn't vmm thread or the index is bogus */ - - mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ - if((unsigned int)mp & 1) { /* Did we timeout? */ - panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */ - return 1; /* Bad hair day, return dirty... */ + pmap = vmm_get_adsp(act, index); /* Convert index to entry */ + if (!pmap) return 1; /* Either this isn't vmm thread or the index is bogus */ + + if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ + RC = hw_test_rc_gv(act->map->pmap, pmap, va, reset);/* Fetch the RC bits and clear if requested */ + } else { + RC = hw_test_rc(pmap, (addr64_t)va, reset); /* Fetch the RC bits and clear if requested */ } - if(!mp) return 1; /* Not mapped, return dirty... */ - - RC = hw_test_rc(mp, reset); /* Fetch the RC bits and clear if requested */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + switch (RC & mapRetCode) { /* Decode return code */ + + case mapRtOK: /* Changed */ + return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */ + break; + + case mapRtNotFnd: /* Didn't find it */ + return 1; /* Return dirty */ + break; + + default: + panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %p, va = %016llX\n", RC, pmap, va); + + } - return (RC & 1); /* Return the change bit */ + return 1; /* Return the change bit */ } @@ -915,32 +1497,42 @@ boolean_t vmm_get_page_dirty_flag( -----------------------------------------------------------------------*/ kern_return_t vmm_protect_page( - thread_act_t act, - vmm_thread_index_t index, - vm_offset_t va, + thread_t act, + vmm_adsp_id_t index, + addr64_t va, vm_prot_t prot) { - vmmCntrlEntry *CEntry; - register mapping *mpv, *mp; - unsigned int RC; + addr64_t nextva; + int ret; + pmap_t pmap; - CEntry = vmm_get_entry(act, index); /* Convert index to entry */ - if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ + pmap = vmm_get_adsp(act, index); /* Convert index to entry */ + if (!pmap) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ - mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ - if((unsigned int)mp & 1) { /* Did we timeout? */ - panic("vmm_protect_page: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */ - return 1; /* Bad hair day, return dirty... */ + if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ + ret = hw_protect_gv(pmap, va, prot); /* Try to change protection, GSA varient */ + } else { + ret = hw_protect(pmap, va, prot, &nextva); /* Try to change protection */ } - if(!mp) return KERN_SUCCESS; /* Not mapped, just return... */ - - hw_prot_virt(mp, prot); /* Set the protection */ - mpv = hw_cpv(mp); /* Convert mapping block to virtual */ - hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + switch (ret) { /* Decode return code */ + + case mapRtOK: /* All ok... */ + break; /* Outta here */ + + case mapRtNotFnd: /* Didn't find it */ + return KERN_SUCCESS; /* Ok, return... */ + break; + + default: + panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %p, va = %016llX\n", ret, pmap, (addr64_t)va); + + } - CEntry->vmmLastMap = va & -PAGE_SIZE; /* Remember the last mapping we changed */ - CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ + if (!((getPerProc()->spcFlags) & FamVMmode)) { + act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ + act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */ + } return KERN_SUCCESS; /* Return */ } @@ -952,7 +1544,10 @@ kern_return_t vmm_protect_page( ** This function sets the protection bits of a mapped page ** and then directly starts executing. ** -** See description of vmm_protect_page for details. +** See description of vmm_protect_page for details +** +** Inputs: +** See vmm_protect_page and vmm_map_execute ** ** Outputs: ** Normal exit is to run the VM. Abnormal exit is triggered via a @@ -961,23 +1556,35 @@ kern_return_t vmm_protect_page( -----------------------------------------------------------------------*/ vmm_return_code_t vmm_protect_execute( - thread_act_t act, + thread_t act, vmm_thread_index_t index, - vm_offset_t va, + addr64_t va, vm_prot_t prot) { kern_return_t ret; vmmCntrlEntry *CEntry; + unsigned int adsp; + vmm_thread_index_t cndx; - CEntry = vmm_get_entry(act, index); /* Get and validate the index */ - - if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ + cndx = index & 0xFF; /* Clean it up */ + CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */ + if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ - ret = vmm_protect_page(act, index, va, prot); /* Go try to change access */ + adsp = (index >> 8) & 0xFF; /* Get any requested address space */ + if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */ - if(ret == KERN_SUCCESS) vmm_execute_vm(act, index); /* Return was ok, launch the VM */ + if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry)) + return kVmmBogusContext; /* Yes, invalid index in Fam */ + + ret = vmm_protect_page(act, adsp, va, prot); /* Go try to change access */ + + if(ret == KERN_SUCCESS) { + act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ + act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */ + vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */ + } - return kVmmInvalidAddress; /* We had trouble of some kind (shouldn't happen) */ + return ret; /* We had trouble of some kind (shouldn't happen) */ } @@ -998,26 +1605,23 @@ vmm_return_code_t vmm_protect_execute( -----------------------------------------------------------------------*/ kern_return_t vmm_get_float_state( - thread_act_t act, + thread_t act, vmm_thread_index_t index) { vmmCntrlEntry *CEntry; - vmmCntrlTable *CTable; int i; register struct savearea_fpu *sv; CEntry = vmm_get_entry(act, index); /* Convert index to entry */ if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ - act->mact.specFlags &= ~floatCng; /* Clear the special flag */ + act->machine.specFlags &= ~floatCng; /* Clear the special flag */ CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */ fpu_save(&CEntry->vmmFacCtx); /* Save context if live */ - CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[0] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[0]; /* Copy FPSCR */ - CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[1] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[1]; /* Copy FPSCR */ - - if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */ + if(CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */ + sv = CEntry->vmmFacCtx.FPUsave; bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */ return KERN_SUCCESS; } @@ -1046,11 +1650,10 @@ kern_return_t vmm_get_float_state( -----------------------------------------------------------------------*/ kern_return_t vmm_get_vector_state( - thread_act_t act, + thread_t act, vmm_thread_index_t index) { vmmCntrlEntry *CEntry; - vmmCntrlTable *CTable; int i, j; unsigned int vrvalidwrk; register struct savearea_vec *sv; @@ -1060,15 +1663,11 @@ kern_return_t vmm_get_vector_state( vec_save(&CEntry->vmmFacCtx); /* Save context if live */ - act->mact.specFlags &= ~vectorCng; /* Clear the special flag */ + act->machine.specFlags &= ~vectorCng; /* Clear the special flag */ CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */ - for(j=0; j < 4; j++) { /* Set value for vscr */ - CEntry->vmmContextKern->vmm_proc_state.ppcVSCRshadow.i[j] = CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j]; - } - - if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */ - + if(CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */ + sv = CEntry->vmmFacCtx.VMXsave; vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */ for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */ @@ -1123,7 +1722,7 @@ kern_return_t vmm_get_vector_state( -----------------------------------------------------------------------*/ kern_return_t vmm_set_timer( - thread_act_t act, + thread_t act, vmm_thread_index_t index, unsigned int timerhi, unsigned int timerlo) @@ -1145,6 +1744,10 @@ kern_return_t vmm_set_timer( ** ** This function causes the timer for a specified VM to be ** returned in return_params[0] and return_params[1]. +** Note that this is kind of funky for 64-bit VMs because we +** split the timer into two parts so that we still set parms 0 and 1. +** Obviously, we don't need to do this because the parms are 8 bytes +** wide. ** ** ** Inputs: @@ -1157,23 +1760,26 @@ kern_return_t vmm_set_timer( -----------------------------------------------------------------------*/ kern_return_t vmm_get_timer( - thread_act_t act, + thread_t act, vmm_thread_index_t index) { vmmCntrlEntry *CEntry; - vmmCntrlTable *CTable; CEntry = vmm_get_entry(act, index); /* Convert index to entry */ if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ - CEntry->vmmContextKern->return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */ - CEntry->vmmContextKern->return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */ - + if(CEntry->vmmXAFlgs & vmm64Bit) { /* A 64-bit virtual machine? */ + CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32); /* Return the last timer value */ + CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */ + } + else { + CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */ + CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */ + } return KERN_SUCCESS; } - /*----------------------------------------------------------------------- ** vmm_timer_pop ** @@ -1192,26 +1798,25 @@ kern_return_t vmm_get_timer( -----------------------------------------------------------------------*/ void vmm_timer_pop( - thread_act_t act) + thread_t act) { - vmmCntrlEntry *CEntry; vmmCntrlTable *CTable; int cvi, any; uint64_t now, soonest; - savearea *sv; + struct savearea *sv; - if(!((unsigned int)act->mact.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */ - panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act); + if(!((unsigned int)act->machine.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */ + panic("vmm_timer_pop: No virtual machines defined; act = %p\n", act); } soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */ clock_get_uptime(&now); /* What time is it? */ - CTable = act->mact.vmmControl; /* Make this easier */ + CTable = act->machine.vmmControl; /* Make this easier */ any = 0; /* Haven't found a running unexpired timer yet */ - for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Cycle through all and check time now */ + for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Cycle through all and check time now */ if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */ @@ -1224,10 +1829,10 @@ void vmm_timer_pop( if (CTable->vmmc[cvi].vmmTimer <= now) { CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */ CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */ - if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->mact.vmmCEntry) { /* Is this the running VM? */ + if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->machine.vmmCEntry) { /* Is this the running VM? */ sv = find_user_regs(act); /* Get the user state registers */ if(!sv) { /* Did we find something? */ - panic("vmm_timer_pop: no user context; act = %08X\n", act); + panic("vmm_timer_pop: no user context; act = %p\n", act); } sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */ vmm_force_exit(act, sv); /* Intercept a running VM */ @@ -1246,11 +1851,9 @@ void vmm_timer_pop( } if(any) { - if (act->mact.qactTimer == 0 || soonest <= act->mact.qactTimer) - act->mact.qactTimer = soonest; /* Set lowest timer */ + if (act->machine.qactTimer == 0 || soonest <= act->machine.qactTimer) + act->machine.qactTimer = soonest; /* Set lowest timer */ } - - return; } @@ -1280,11 +1883,11 @@ void vmm_timer_pop( int vmm_stop_vm(struct savearea *save) { - thread_act_t act; + thread_t act; vmmCntrlTable *CTable; int cvi, i; task_t task; - thread_act_t fact; + thread_t fact; unsigned int vmmask; ReturnHandler *stopapc; @@ -1294,15 +1897,15 @@ int vmm_stop_vm(struct savearea *save) task_lock(task); /* Lock our task */ - fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ - act = 0; /* Pretend we didn't find it yet */ + fact = (thread_t)task->threads.next; /* Get the first activation on task */ + act = NULL; /* Pretend we didn't find it yet */ - for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */ - if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */ + for(i = 0; i < task->thread_count; i++) { /* All of the activations */ + if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */ act = fact; /* Yeah... */ break; /* Bail the loop... */ } - fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ + fact = (thread_t)fact->task_threads.next; /* Go to the next one */ } if(!((unsigned int)act)) { /* See if we have VMMs yet */ @@ -1311,41 +1914,48 @@ int vmm_stop_vm(struct savearea *save) return 0; /* Go generate a syscall exception */ } - act_lock_thread(act); /* Make sure this stays 'round */ + thread_reference(act); + task_unlock(task); /* Safe to release now */ - CTable = act->mact.vmmControl; /* Get the pointer to the table */ + thread_mtx_lock(act); + + CTable = act->machine.vmmControl; /* Get the pointer to the table */ if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */ - act_unlock_thread(act); /* Unlock the activation */ + thread_mtx_unlock(act); /* Unlock the activation */ + thread_deallocate(act); ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ return 0; /* Go generate a syscall exception */ } if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */ - act_unlock_thread(act); /* Unlock the activation */ + thread_mtx_unlock(act); /* Unlock the activation */ + thread_deallocate(act); ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ save->save_r3 = KERN_SUCCESS; /* Set success */ return 1; /* Return... */ } - for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search slots */ + for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search slots */ if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */ - hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */ + (void)hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */ } vmmask = vmmask << 1; /* Slide mask over */ } - if(hw_compare_and_store(0, 1, &act->mact.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */ - act_unlock_thread(act); /* Already one pending, unlock the activation */ + if(hw_compare_and_store(0, 1, &act->machine.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */ + thread_mtx_unlock(act); /* Already one pending, unlock the activation */ + thread_deallocate(act); ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ save->save_r3 = KERN_SUCCESS; /* Say we did it... */ return 1; /* Leave */ } if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */ - act->mact.emPendRupts = 0; /* No memory, say we have given up request */ - act_unlock_thread(act); /* Unlock the activation */ + act->machine.emPendRupts = 0; /* No memory, say we have given up request */ + thread_mtx_unlock(act); /* Unlock the activation */ + thread_deallocate(act); ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */ return 1; /* Return... */ @@ -1361,7 +1971,8 @@ int vmm_stop_vm(struct savearea *save) act_set_apc(act); /* Set an APC AST */ ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */ - act_unlock_thread(act); /* Unlock the activation */ + thread_mtx_unlock(act); /* Unlock the activation */ + thread_deallocate(act); ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */ @@ -1378,38 +1989,36 @@ int vmm_stop_vm(struct savearea *save) ** ** Inputs: ** ReturnHandler *rh - the return handler control block as required by the APC. -** thread_act_t act - the activation +** thread_t act - the activation ** ** Outputs: ** Whatever needed to be done is done. -----------------------------------------------------------------------*/ -void vmm_interrupt(ReturnHandler *rh, thread_act_t act) { +void vmm_interrupt(ReturnHandler *rh, thread_t act) { vmmCntrlTable *CTable; - savearea *sv; + struct savearea *sv; boolean_t inter; - kfree((vm_offset_t)rh, sizeof(ReturnHandler)); /* Release the return handler block */ + kfree(rh, sizeof(ReturnHandler)); /* Release the return handler block */ inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */ - act->mact.emPendRupts = 0; /* Say that there are no more interrupts pending */ - CTable = act->mact.vmmControl; /* Get the pointer to the table */ + act->machine.emPendRupts = 0; /* Say that there are no more interrupts pending */ + CTable = act->machine.vmmControl; /* Get the pointer to the table */ if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */ - if(act->mact.vmmCEntry && (act->mact.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */ + if(act->machine.vmmCEntry && (act->machine.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */ sv = find_user_regs(act); /* Get the user state registers */ if(!sv) { /* Did we find something? */ - panic("vmm_interrupt: no user context; act = %08X\n", act); + panic("vmm_interrupt: no user context; act = %p\n", act); } sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */ vmm_force_exit(act, sv); /* Intercept a running VM */ } ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */ - - return; }