2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
23 /*-----------------------------------------------------------------------
26 ** C routines that we are adding to the MacOS X kernel.
28 -----------------------------------------------------------------------*/
30 #include <mach/mach_types.h>
31 #include <mach/kern_return.h>
32 #include <mach/host_info.h>
33 #include <kern/kern_types.h>
34 #include <kern/kalloc.h>
35 #include <kern/host.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <ppc/exception.h>
39 #include <ppc/mappings.h>
40 #include <ppc/thread.h>
41 #include <vm/vm_kern.h>
42 #include <vm/vm_fault.h>
44 #include <ppc/vmachmon.h>
45 #include <ppc/lowglobals.h>
47 extern double FloatInit
;
48 extern unsigned long QNaNbarbarian
[4];
50 /*************************************************************************************
51 Virtual Machine Monitor Internal Routines
52 **************************************************************************************/
54 /*-----------------------------------------------------------------------
57 ** This function verifies and return a vmm context entry index
60 ** act - pointer to current thread activation
61 ** index - index into vmm control table (this is a "one based" value)
64 ** address of a vmmCntrlEntry or 0 if not found
65 -----------------------------------------------------------------------*/
67 static vmmCntrlEntry
*vmm_get_entry(
69 vmm_thread_index_t index
)
71 vmmCntrlTable
*CTable
;
72 vmmCntrlEntry
*CEntry
;
74 index
= index
& vmmTInum
; /* Clean up the index */
76 if (act
->machine
.vmmControl
== 0) return NULL
; /* No control table means no vmm */
77 if ((index
- 1) >= kVmmMaxContexts
) return NULL
; /* Index not in range */
79 CTable
= act
->machine
.vmmControl
; /* Make the address a bit more convienient */
80 CEntry
= &CTable
->vmmc
[index
- 1]; /* Point to the entry */
82 if (!(CEntry
->vmmFlags
& vmmInUse
)) return NULL
; /* See if the slot is actually in use */
87 /*-----------------------------------------------------------------------
90 ** This function verifies and returns the pmap for an address space.
91 ** If there is none and the request is valid, a pmap will be created.
94 ** act - pointer to current thread activation
95 ** index - index into vmm control table (this is a "one based" value)
98 ** address of a pmap or 0 if not found or could no be created
99 ** Note that if there is no pmap for the address space it will be created.
100 -----------------------------------------------------------------------*/
102 static pmap_t
vmm_get_adsp(thread_t act
, vmm_thread_index_t index
)
106 if (act
->machine
.vmmControl
== 0) return NULL
; /* No control table means no vmm */
107 if ((index
- 1) >= kVmmMaxContexts
) return NULL
; /* Index not in range */
109 pmap
= act
->machine
.vmmControl
->vmmAdsp
[index
- 1]; /* Get the pmap */
110 return (pmap
); /* and return it. */
113 /*-----------------------------------------------------------------------
114 ** vmm_build_shadow_hash
116 ** Allocate and initialize a shadow hash table.
118 ** This function assumes that PAGE_SIZE is 4k-bytes.
120 -----------------------------------------------------------------------*/
121 static pmap_vmm_ext
*vmm_build_shadow_hash(pmap_t pmap
)
123 pmap_vmm_ext
*ext
; /* VMM pmap extension we're building */
124 ppnum_t extPP
; /* VMM pmap extension physical page number */
125 kern_return_t ret
; /* Return code from various calls */
126 uint32_t pages
= GV_HPAGES
; /* Number of pages in the hash table */
127 vm_offset_t free
= VMX_HPIDX_OFFSET
; /* Offset into extension page of free area (128-byte aligned) */
128 uint32_t freeSize
= PAGE_SIZE
- free
; /* Number of free bytes in the extension page */
130 if ((pages
* sizeof(addr64_t
)) + (pages
* sizeof(vm_offset_t
)) > freeSize
) {
131 panic("vmm_build_shadow_hash: too little pmap_vmm_ext free space\n");
134 ret
= kmem_alloc_wired(kernel_map
, (vm_offset_t
*)&ext
, PAGE_SIZE
);
135 /* Allocate a page-sized extension block */
136 if (ret
!= KERN_SUCCESS
) return (NULL
); /* Return NULL for failed allocate */
137 bzero((char *)ext
, PAGE_SIZE
); /* Zero the entire extension block page */
139 extPP
= pmap_find_phys(kernel_pmap
, (vm_offset_t
)ext
);
140 /* Get extension block's physical page number */
141 if (!extPP
) { /* This should not fail, but then again... */
142 panic("vmm_build_shadow_hash: could not translate pmap_vmm_ext vaddr %08X\n", ext
);
145 ext
->vmxSalt
= (addr64_t
)(vm_offset_t
)ext
^ ptoa_64(extPP
);
146 /* Set effective<->physical conversion salt */
147 ext
->vmxHostPmapPhys
= (addr64_t
)(vm_offset_t
)pmap
^ pmap
->pmapvr
;
148 /* Set host pmap's physical address */
149 ext
->vmxHostPmap
= pmap
; /* Set host pmap's effective address */
150 ext
->vmxHashPgIdx
= (addr64_t
*)((vm_offset_t
)ext
+ VMX_HPIDX_OFFSET
);
151 /* Allocate physical index */
152 ext
->vmxHashPgList
= (vm_offset_t
*)((vm_offset_t
)ext
+ VMX_HPLIST_OFFSET
);
153 /* Allocate page list */
154 ext
->vmxActiveBitmap
= (vm_offset_t
*)((vm_offset_t
)ext
+ VMX_ACTMAP_OFFSET
);
155 /* Allocate active mapping bitmap */
157 /* The hash table is typically larger than a single page, but we don't require it to be in a
158 contiguous virtual or physical chunk. So, we allocate it page by page, noting the effective and
159 physical address of each page in vmxHashPgList and vmxHashPgIdx, respectively. */
161 for (idx
= 0; idx
< pages
; idx
++) {
162 ret
= kmem_alloc_wired(kernel_map
, &ext
->vmxHashPgList
[idx
], PAGE_SIZE
);
163 /* Allocate a hash-table page */
164 if (ret
!= KERN_SUCCESS
) goto fail
; /* Allocation failed, exit through cleanup */
165 bzero((char *)ext
->vmxHashPgList
[idx
], PAGE_SIZE
); /* Zero the page */
166 ext
->vmxHashPgIdx
[idx
] = ptoa_64(pmap_find_phys(kernel_pmap
, (addr64_t
)ext
->vmxHashPgList
[idx
]));
167 /* Put page's physical address into index */
168 if (!ext
->vmxHashPgIdx
[idx
]) { /* Hash-table page's LRA failed */
169 panic("vmm_build_shadow_hash: could not translate hash-table vaddr %08X\n", ext
->vmxHashPgList
[idx
]);
171 mapping_t
*map
= (mapping_t
*)ext
->vmxHashPgList
[idx
];
173 for (mapIdx
= 0; mapIdx
< GV_SLTS_PPG
; mapIdx
++) { /* Iterate over mappings in this page */
174 map
->mpFlags
= (mpGuest
| mpgFree
); /* Mark guest type and free */
175 map
= (mapping_t
*)((char *)map
+ GV_SLOT_SZ
); /* Next slot-sized mapping */
179 return (ext
); /* Return newly-minted VMM pmap extension */
182 for (idx
= 0; idx
< pages
; idx
++) { /* De-allocate any pages we managed to allocate */
183 if (ext
->vmxHashPgList
[idx
]) {
184 kmem_free(kernel_map
, ext
->vmxHashPgList
[idx
], PAGE_SIZE
);
187 kmem_free(kernel_map
, (vm_offset_t
)ext
, PAGE_SIZE
); /* Release the VMM pmap extension page */
188 return (NULL
); /* Return NULL for failure */
192 /*-----------------------------------------------------------------------
193 ** vmm_release_shadow_hash
195 ** Release shadow hash table and VMM extension block
197 -----------------------------------------------------------------------*/
198 static void vmm_release_shadow_hash(pmap_vmm_ext
*ext
)
202 for (idx
= 0; idx
< GV_HPAGES
; idx
++) { /* Release the hash table page by page */
203 kmem_free(kernel_map
, ext
->vmxHashPgList
[idx
], PAGE_SIZE
);
206 kmem_free(kernel_map
, (vm_offset_t
)ext
, PAGE_SIZE
); /* Release the VMM pmap extension page */
209 /*-----------------------------------------------------------------------
212 ** Activate guest shadow assist
214 -----------------------------------------------------------------------*/
215 static kern_return_t
vmm_activate_gsa(
217 vmm_thread_index_t index
)
219 vmmCntrlTable
*CTable
= act
->machine
.vmmControl
; /* Get VMM control table */
220 if (!CTable
) { /* Caller guarantees that this will work */
221 panic("vmm_activate_gsa: VMM control table not present; act = %08X, idx = %d\n",
225 vmmCntrlEntry
*CEntry
= vmm_get_entry(act
, index
); /* Get context from index */
226 if (!CEntry
) { /* Caller guarantees that this will work */
227 panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
232 pmap_t hpmap
= act
->map
->pmap
; /* Get host pmap */
233 pmap_t gpmap
= vmm_get_adsp(act
, index
); /* Get guest pmap */
234 if (!gpmap
) { /* Caller guarantees that this will work */
235 panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
240 if (!hpmap
->pmapVmmExt
) { /* If there's no VMM extension for this host, create one */
241 hpmap
->pmapVmmExt
= vmm_build_shadow_hash(hpmap
); /* Build VMM extension plus shadow hash and attach */
242 if (hpmap
->pmapVmmExt
) { /* See if we succeeded */
243 hpmap
->pmapVmmExtPhys
= (addr64_t
)(vm_offset_t
)hpmap
->pmapVmmExt
^ hpmap
->pmapVmmExt
->vmxSalt
;
244 /* Get VMM extensions block physical address */
246 return KERN_RESOURCE_SHORTAGE
; /* Not enough mojo to go */
249 gpmap
->pmapVmmExt
= hpmap
->pmapVmmExt
; /* Copy VMM extension block virtual address into guest */
250 gpmap
->pmapVmmExtPhys
= hpmap
->pmapVmmExtPhys
; /* and its physical address, too */
251 gpmap
->pmapFlags
|= pmapVMgsaa
; /* Enable GSA for this guest */
252 CEntry
->vmmXAFlgs
|= vmmGSA
; /* Show GSA active here, too */
258 /*-----------------------------------------------------------------------
259 ** vmm_deactivate_gsa
261 ** Deactivate guest shadow assist
263 -----------------------------------------------------------------------*/
264 static void vmm_deactivate_gsa(
266 vmm_thread_index_t index
)
268 vmmCntrlEntry
*CEntry
= vmm_get_entry(act
, index
); /* Get context from index */
269 if (!CEntry
) { /* Caller guarantees that this will work */
270 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
275 pmap_t gpmap
= vmm_get_adsp(act
, index
); /* Get guest pmap */
276 if (!gpmap
) { /* Caller guarantees that this will work */
277 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
282 gpmap
->pmapFlags
&= ~pmapVMgsaa
; /* Deactivate GSA for this guest */
283 CEntry
->vmmXAFlgs
&= ~vmmGSA
; /* Show GSA deactivated here, too */
287 /*-----------------------------------------------------------------------
290 ** Flush specified guest context, purging all guest mappings and clearing
293 -----------------------------------------------------------------------*/
294 static void vmm_flush_context(
296 vmm_thread_index_t index
)
298 vmmCntrlEntry
*CEntry
;
299 vmmCntrlTable
*CTable
;
300 vmm_state_page_t
*vks
;
301 vmm_version_t version
;
303 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
304 if (!CEntry
) { /* Caller guarantees that this will work */
305 panic("vmm_flush_context: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
310 if(CEntry
->vmmFacCtx
.FPUsave
) { /* Is there any floating point context? */
311 toss_live_fpu(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
312 save_release((savearea
*)CEntry
->vmmFacCtx
.FPUsave
); /* Release it */
315 if(CEntry
->vmmFacCtx
.VMXsave
) { /* Is there any vector context? */
316 toss_live_vec(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
317 save_release((savearea
*)CEntry
->vmmFacCtx
.VMXsave
); /* Release it */
320 vmm_unmap_all_pages(act
, index
); /* Blow away all mappings for this context */
322 CTable
= act
->machine
.vmmControl
; /* Get the control table address */
323 CTable
->vmmGFlags
= CTable
->vmmGFlags
& ~vmmLastAdSp
; /* Make sure we don't try to automap into this */
325 CEntry
->vmmFlags
&= vmmInUse
; /* Clear out all of the flags for this entry except in use */
326 CEntry
->vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
327 CEntry
->vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
328 CEntry
->vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
329 CEntry
->vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
330 CEntry
->vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
331 CEntry
->vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
333 vks
= CEntry
->vmmContextKern
; /* Get address of the context page */
334 version
= vks
->interface_version
; /* Save the version code */
335 bzero((char *)vks
, 4096); /* Clear all */
337 vks
->interface_version
= version
; /* Set our version code */
338 vks
->thread_index
= index
% vmmTInum
; /* Tell the user the index for this virtual machine */
340 return; /* Context is now flushed */
344 /*************************************************************************************
345 Virtual Machine Monitor Exported Functionality
347 The following routines are used to implement a quick-switch mechanism for
348 virtual machines that need to execute within their own processor envinroment
349 (including register and MMU state).
350 **************************************************************************************/
352 /*-----------------------------------------------------------------------
355 ** This function returns the current version of the virtual machine
356 ** interface. It is divided into two portions. The top 16 bits
357 ** represent the major version number, and the bottom 16 bits
358 ** represent the minor version number. Clients using the Vmm
359 ** functionality should make sure they are using a verison new
366 ** 32-bit number representing major/minor version of
368 -----------------------------------------------------------------------*/
370 int vmm_get_version(struct savearea
*save
)
372 save
->save_r3
= kVmmCurrentVersion
; /* Return the version */
377 /*-----------------------------------------------------------------------
380 ** This function returns a set of flags that represents the functionality
381 ** supported by the current verison of the Vmm interface. Clients should
382 ** use this to determine whether they can run on this system.
388 ** 32-bit number representing functionality supported by this
389 ** version of the Vmm module
390 -----------------------------------------------------------------------*/
392 int vmm_get_features(struct savearea
*save
)
394 save
->save_r3
= kVmmCurrentFeatures
; /* Return the features */
395 if(getPerProc()->pf
.Available
& pf64Bit
) {
396 save
->save_r3
&= ~kVmmFeature_LittleEndian
; /* No little endian here */
397 save
->save_r3
|= kVmmFeature_SixtyFourBit
; /* Set that we can do 64-bit */
403 /*-----------------------------------------------------------------------
406 ** This function returns the maximum addressable virtual address sported
409 ** Returns max address
410 -----------------------------------------------------------------------*/
412 addr64_t
vmm_max_addr(thread_t act
)
414 return vm_max_address
; /* Return the maximum address */
417 /*-----------------------------------------------------------------------
420 ** This function retrieves the eXtended Architecture flags for the specifed VM.
422 ** We need to return the result in the return code rather than in the return parameters
423 ** because we need an architecture independent format so the results are actually
424 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
429 ** act - pointer to current thread activation structure
430 ** index - index returned by vmm_init_context
433 ** Return code is set to the XA flags. If the index is invalid or the
434 ** context has not been created, we return 0.
435 -----------------------------------------------------------------------*/
437 unsigned int vmm_get_XA(
439 vmm_thread_index_t index
)
441 vmmCntrlEntry
*CEntry
;
443 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
444 if (CEntry
== NULL
) return 0; /* Either this isn't a vmm or the index is bogus */
446 return CEntry
->vmmXAFlgs
; /* Return the flags */
449 /*-----------------------------------------------------------------------
452 ** This function initializes an emulation context. It allocates
453 ** a new pmap (address space) and fills in the initial processor
454 ** state within the specified structure. The structure, mapped
455 ** into the client's logical address space, must be page-aligned.
458 ** act - pointer to current thread activation
459 ** version - requested version of the Vmm interface (allowing
460 ** future versions of the interface to change, but still
461 ** support older clients)
462 ** vmm_user_state - pointer to a logical page within the
463 ** client's address space
466 ** kernel return code indicating success or failure
467 -----------------------------------------------------------------------*/
469 int vmm_init_context(struct savearea
*save
)
473 vmm_version_t version
;
474 vmm_state_page_t
* vmm_user_state
;
475 vmmCntrlTable
*CTable
;
477 vmm_state_page_t
* vks
;
484 vmm_user_state
= CAST_DOWN(vmm_state_page_t
*, save
->save_r4
); /* Get the user address of the comm area */
485 if ((unsigned int)vmm_user_state
& (PAGE_SIZE
- 1)) { /* Make sure the comm area is page aligned */
486 save
->save_r3
= KERN_FAILURE
; /* Return failure */
490 /* Make sure that the version requested is supported */
491 version
= save
->save_r3
; /* Pick up passed in version */
492 if (((version
>> 16) < kVmmMinMajorVersion
) || ((version
>> 16) > (kVmmCurrentVersion
>> 16))) {
493 save
->save_r3
= KERN_FAILURE
; /* Return failure */
497 if((version
& 0xFFFF) > kVmmCurMinorVersion
) { /* Check for valid minor */
498 save
->save_r3
= KERN_FAILURE
; /* Return failure */
502 act
= current_thread(); /* Pick up our activation */
504 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
506 task
= current_task(); /* Figure out who we are */
508 task_lock(task
); /* Lock our task */
510 fact
= (thread_t
)task
->threads
.next
; /* Get the first activation on task */
511 gact
= 0; /* Pretend we didn't find it yet */
513 for(i
= 0; i
< task
->thread_count
; i
++) { /* All of the activations */
514 if(fact
->machine
.vmmControl
) { /* Is this a virtual machine monitor? */
515 gact
= fact
; /* Yeah... */
516 break; /* Bail the loop... */
518 fact
= (thread_t
)fact
->task_threads
.next
; /* Go to the next one */
523 * We only allow one thread per task to be a virtual machine monitor right now. This solves
524 * a number of potential problems that I can't put my finger on right now.
526 * Utlimately, I think we want to move the controls and make all this task based instead of
527 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
528 * VM (if they want) rather than hand dispatch contexts.
531 if(gact
&& (gact
!= act
)) { /* Check if another thread is a vmm or trying to be */
532 task_unlock(task
); /* Release task lock */
533 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
534 save
->save_r3
= KERN_FAILURE
; /* We must play alone... */
538 if(!gact
) act
->machine
.vmmControl
= (vmmCntrlTable
*)1; /* Temporarily mark that we are the vmm thread */
540 task_unlock(task
); /* Safe to release now (because we've marked ourselves) */
542 CTable
= act
->machine
.vmmControl
; /* Get the control table address */
543 if ((unsigned int)CTable
== 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
544 if(!(CTable
= (vmmCntrlTable
*)kalloc(sizeof(vmmCntrlTable
)))) { /* Get a fresh emulation control table */
545 act
->machine
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
546 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
547 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No storage... */
551 bzero((void *)CTable
, sizeof(vmmCntrlTable
)); /* Clean it up */
552 act
->machine
.vmmControl
= CTable
; /* Initialize the table anchor */
555 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Search to find a free slot */
556 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) break; /* Bail if we find an unused slot */
559 if(cvi
>= kVmmMaxContexts
) { /* Did we find one? */
560 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
561 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No empty slots... */
565 ret
= vm_map_wire( /* Wire the virtual machine monitor's context area */
567 (vm_offset_t
)vmm_user_state
,
568 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
569 VM_PROT_READ
| VM_PROT_WRITE
,
572 if (ret
!= KERN_SUCCESS
) /* The wire failed, return the code */
573 goto return_in_shame
;
575 /* Map the vmm state into the kernel's address space. */
576 conphys
= pmap_find_phys(act
->map
->pmap
, (addr64_t
)((uintptr_t)vmm_user_state
));
578 /* Find a virtual address to use. */
579 ret
= kmem_alloc_pageable(kernel_map
, &conkern
, PAGE_SIZE
);
580 if (ret
!= KERN_SUCCESS
) { /* Did we find an address? */
581 (void) vm_map_unwire(act
->map
, /* No, unwire the context area */
582 (vm_offset_t
)vmm_user_state
,
583 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
585 goto return_in_shame
;
588 /* Map it into the kernel's address space. */
590 pmap_enter(kernel_pmap
, conkern
, conphys
,
591 VM_PROT_READ
| VM_PROT_WRITE
,
592 VM_WIMG_USE_DEFAULT
, TRUE
);
594 /* Clear the vmm state structure. */
595 vks
= (vmm_state_page_t
*)conkern
;
596 bzero((char *)vks
, PAGE_SIZE
);
599 /* We're home free now. Simply fill in the necessary info and return. */
601 vks
->interface_version
= version
; /* Set our version code */
602 vks
->thread_index
= cvi
+ 1; /* Tell the user the index for this virtual machine */
604 CTable
->vmmc
[cvi
].vmmFlags
= vmmInUse
; /* Mark the slot in use and make sure the rest are clear */
605 CTable
->vmmc
[cvi
].vmmContextKern
= vks
; /* Remember the kernel address of comm area */
606 CTable
->vmmc
[cvi
].vmmContextPhys
= conphys
; /* Remember the state page physical addr */
607 CTable
->vmmc
[cvi
].vmmContextUser
= vmm_user_state
; /* Remember user address of comm area */
609 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
610 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
611 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
612 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
613 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
614 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
615 CTable
->vmmc
[cvi
].vmmFacCtx
.facAct
= act
; /* Point back to the activation */
617 hw_atomic_add((int *)&saveanchor
.savetarget
, 2); /* Account for the number of extra saveareas we think we might "need" */
619 pmap_t hpmap
= act
->map
->pmap
; /* Get host pmap */
620 pmap_t gpmap
= pmap_create(0); /* Make a fresh guest pmap */
621 if (gpmap
) { /* Did we succeed ? */
622 CTable
->vmmAdsp
[cvi
] = gpmap
; /* Remember guest pmap for new context */
623 if (lowGlo
.lgVMMforcedFeats
& vmmGSA
) { /* Forcing on guest shadow assist ? */
624 vmm_activate_gsa(act
, cvi
+1); /* Activate GSA */
627 ret
= KERN_RESOURCE_SHORTAGE
; /* We've failed to allocate a guest pmap */
628 goto return_in_shame
; /* Shame on us. */
631 if (!(hpmap
->pmapFlags
& pmapVMhost
)) { /* Do this stuff if this is our first time hosting */
632 hpmap
->pmapFlags
|= pmapVMhost
; /* We're now hosting */
635 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
636 save
->save_r3
= KERN_SUCCESS
; /* Hip, hip, horay... */
640 if(!gact
) kfree(CTable
, sizeof(vmmCntrlTable
)); /* Toss the table if we just allocated it */
641 act
->machine
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
642 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
643 save
->save_r3
= ret
; /* Pass back return code... */
649 /*-----------------------------------------------------------------------
650 ** vmm_tear_down_context
652 ** This function uninitializes an emulation context. It deallocates
653 ** internal resources associated with the context block.
656 ** act - pointer to current thread activation structure
657 ** index - index returned by vmm_init_context
660 ** kernel return code indicating success or failure
663 ** This call will also trash the address space with the same ID. While this
664 ** is really not too cool, we have to do it because we need to make
665 ** sure that old VMM users (not that we really have any) who depend upon
666 ** the address space going away with the context still work the same.
667 -----------------------------------------------------------------------*/
669 kern_return_t
vmm_tear_down_context(
671 vmm_thread_index_t index
)
673 vmmCntrlEntry
*CEntry
;
674 vmmCntrlTable
*CTable
;
676 register savearea
*sv
;
678 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
679 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
681 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
683 hw_atomic_sub((int *)&saveanchor
.savetarget
, 2); /* We don't need these extra saveareas anymore */
685 if(CEntry
->vmmFacCtx
.FPUsave
) { /* Is there any floating point context? */
686 toss_live_fpu(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
687 save_release((savearea
*)CEntry
->vmmFacCtx
.FPUsave
); /* Release it */
690 if(CEntry
->vmmFacCtx
.VMXsave
) { /* Is there any vector context? */
691 toss_live_vec(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
692 save_release((savearea
*)CEntry
->vmmFacCtx
.VMXsave
); /* Release it */
695 CEntry
->vmmPmap
= 0; /* Remove this trace */
696 pmap_t gpmap
= act
->machine
.vmmControl
->vmmAdsp
[index
- 1];
697 /* Get context's guest pmap (if any) */
698 if (gpmap
) { /* Check if there is an address space assigned here */
699 if (gpmap
->pmapFlags
& pmapVMgsaa
) { /* Handle guest shadow assist case specially */
700 hw_rem_all_gv(gpmap
); /* Remove all guest mappings from shadow hash table */
702 mapping_remove(gpmap
, 0xFFFFFFFFFFFFF000LL
);/* Remove final page explicitly because we might have mapped it */
703 pmap_remove(gpmap
, 0, 0xFFFFFFFFFFFFF000LL
);/* Remove all entries from this map */
705 pmap_destroy(gpmap
); /* Toss the pmap for this context */
706 act
->machine
.vmmControl
->vmmAdsp
[index
- 1] = NULL
; /* Clean it up */
709 (void) vm_map_unwire( /* Unwire the user comm page */
711 (vm_offset_t
)CEntry
->vmmContextUser
,
712 (vm_offset_t
)CEntry
->vmmContextUser
+ PAGE_SIZE
,
715 kmem_free(kernel_map
, (vm_offset_t
)CEntry
->vmmContextKern
, PAGE_SIZE
); /* Remove kernel's view of the comm page */
717 CTable
= act
->machine
.vmmControl
; /* Get the control table address */
718 CTable
->vmmGFlags
= CTable
->vmmGFlags
& ~vmmLastAdSp
; /* Make sure we don't try to automap into this */
720 CEntry
->vmmFlags
= 0; /* Clear out all of the flags for this entry including in use */
721 CEntry
->vmmContextKern
= 0; /* Clear the kernel address of comm area */
722 CEntry
->vmmContextUser
= 0; /* Clear the user address of comm area */
724 CEntry
->vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
725 CEntry
->vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
726 CEntry
->vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
727 CEntry
->vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
728 CEntry
->vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
729 CEntry
->vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
730 CEntry
->vmmFacCtx
.facAct
= 0; /* Clear facility context control */
732 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Search to find a free slot */
733 if(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
) { /* Return if there are still some in use */
734 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
735 return KERN_SUCCESS
; /* Leave... */
740 * When we have tossed the last context, toss any address spaces left over before releasing
741 * the VMM control block
744 for(cvi
= 1; cvi
<= kVmmMaxContexts
; cvi
++) { /* Look at all slots */
745 if(!act
->machine
.vmmControl
->vmmAdsp
[index
- 1]) continue; /* Nothing to remove here */
746 mapping_remove(act
->machine
.vmmControl
->vmmAdsp
[index
- 1], 0xFFFFFFFFFFFFF000LL
); /* Remove final page explicitly because we might have mapped it */
747 pmap_remove(act
->machine
.vmmControl
->vmmAdsp
[index
- 1], 0, 0xFFFFFFFFFFFFF000LL
); /* Remove all entries from this map */
748 pmap_destroy(act
->machine
.vmmControl
->vmmAdsp
[index
- 1]); /* Toss the pmap for this context */
749 act
->machine
.vmmControl
->vmmAdsp
[index
- 1] = 0; /* Clear just in case */
752 pmap_t pmap
= act
->map
->pmap
; /* Get our pmap */
753 if (pmap
->pmapVmmExt
) { /* Release any VMM pmap extension block and shadow hash table */
754 vmm_release_shadow_hash(pmap
->pmapVmmExt
); /* Release extension block and shadow hash table */
755 pmap
->pmapVmmExt
= 0; /* Forget extension block */
756 pmap
->pmapVmmExtPhys
= 0; /* Forget extension block's physical address, too */
758 pmap
->pmapFlags
&= ~pmapVMhost
; /* We're no longer hosting */
760 kfree(CTable
, sizeof(vmmCntrlTable
)); /* Toss the table because to tossed the last context */
761 act
->machine
.vmmControl
= 0; /* Unmark us as vmm */
763 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
769 /*-----------------------------------------------------------------------
772 ** This function activates the eXtended Architecture flags for the specifed VM.
774 ** We need to return the result in the return code rather than in the return parameters
775 ** because we need an architecture independent format so the results are actually
776 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
779 ** Note that this function does a lot of the same stuff as vmm_tear_down_context
780 ** and vmm_init_context.
783 ** act - pointer to current thread activation structure
784 ** index - index returned by vmm_init_context
785 ** flags - the extended architecture flags
789 ** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
790 ** Also, the internal flags are set and, additionally, the VM is completely reset.
791 -----------------------------------------------------------------------*/
792 kern_return_t
vmm_activate_XA(
794 vmm_thread_index_t index
,
795 unsigned int xaflags
)
797 vmmCntrlEntry
*CEntry
;
798 kern_return_t result
= KERN_SUCCESS
; /* Assume success */
800 if ((xaflags
& ~kVmmSupportedSetXA
) || ((xaflags
& vmm64Bit
) && (!getPerProc()->pf
.Available
& pf64Bit
)))
801 return (KERN_FAILURE
); /* Unknown or unsupported feature requested */
803 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
804 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
806 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
808 vmm_flush_context(act
, index
); /* Flush the context */
810 if (xaflags
& vmm64Bit
) { /* Activating 64-bit mode ? */
811 CEntry
->vmmXAFlgs
|= vmm64Bit
; /* Activate 64-bit mode */
814 if (xaflags
& vmmGSA
) { /* Activating guest shadow assist ? */
815 result
= vmm_activate_gsa(act
, index
); /* Activate guest shadow assist */
818 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
820 return result
; /* Return activate result */
823 /*-----------------------------------------------------------------------
826 -----------------------------------------------------------------------*/
827 kern_return_t
vmm_deactivate_XA(
829 vmm_thread_index_t index
,
830 unsigned int xaflags
)
832 vmmCntrlEntry
*CEntry
;
833 kern_return_t result
= KERN_SUCCESS
; /* Assume success */
835 if ((xaflags
& ~kVmmSupportedSetXA
) || ((xaflags
& vmm64Bit
) && (getPerProc()->pf
.Available
& pf64Bit
)))
836 return (KERN_FAILURE
); /* Unknown or unsupported feature requested */
838 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
839 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
841 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
843 vmm_flush_context(act
, index
); /* Flush the context */
845 if (xaflags
& vmm64Bit
) { /* Deactivating 64-bit mode ? */
846 CEntry
->vmmXAFlgs
&= ~vmm64Bit
; /* Deactivate 64-bit mode */
849 if (xaflags
& vmmGSA
) { /* Deactivating guest shadow assist ? */
850 vmm_deactivate_gsa(act
, index
); /* Deactivate guest shadow assist */
853 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
855 return result
; /* Return deactivate result */
859 /*-----------------------------------------------------------------------
862 ** This function uninitializes all emulation contexts. If there are
863 ** any vmm contexts, it calls vmm_tear_down_context for each one.
865 ** Note: this can also be called from normal thread termination. Because of
866 ** that, we will context switch out of an alternate if we are currenty in it.
867 ** It will be terminated with no valid return code set because we don't expect
868 ** the activation to ever run again.
871 ** activation to tear down
874 ** All vmm contexts released and VMM shut down
875 -----------------------------------------------------------------------*/
876 void vmm_tear_down_all(thread_t act
) {
878 vmmCntrlTable
*CTable
;
884 if(act
->machine
.specFlags
& runningVM
) { /* Are we actually in a context right now? */
885 save
= find_user_regs(act
); /* Find the user state context */
886 if(!save
) { /* Did we find it? */
887 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
891 save
->save_exception
= kVmmBogusContext
*4; /* Indicate that this context is bogus now */
892 s
= splhigh(); /* Make sure interrupts are off */
893 vmm_force_exit(act
, save
); /* Force and exit from VM state */
894 splx(s
); /* Restore interrupts */
897 if(CTable
= act
->machine
.vmmControl
) { /* Do we have a vmm control block? */
900 for(cvi
= 1; cvi
<= kVmmMaxContexts
; cvi
++) { /* Look at all slots */
901 if(CTable
->vmmc
[cvi
- 1].vmmFlags
& vmmInUse
) { /* Is this one in use */
902 ret
= vmm_tear_down_context(act
, cvi
); /* Take down the found context */
903 if(ret
!= KERN_SUCCESS
) { /* Did it go away? */
904 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
911 * Note that all address apces should be gone here.
913 if(act
->machine
.vmmControl
) { /* Did we find one? */
914 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
921 /*-----------------------------------------------------------------------
924 ** This function maps a page from within the client's logical
925 ** address space into the alternate address space.
927 ** The page need not be locked or resident. If not resident, it will be faulted
928 ** in by this code, which may take some time. Also, if the page is not locked,
929 ** it, and this mapping may disappear at any time, even before it gets used. Note also
930 ** that reference and change information is NOT preserved when a page is unmapped, either
931 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
932 ** space). This means that if RC is needed, the page MUST be wired.
934 ** Note that if there is already a mapping at the address, it is removed and all
935 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
936 ** if the map call fails, the old address is still unmapped..
939 ** act - pointer to current thread activation
940 ** index - index of address space to map into
941 ** va - virtual address within the client's address
943 ** ava - virtual address within the alternate address
945 ** prot - protection flags
947 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
948 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
951 ** Interrupts disabled (from fast trap)
954 ** kernel return code indicating success or failure
955 ** if success, va resident and alternate mapping made
956 -----------------------------------------------------------------------*/
958 kern_return_t
vmm_map_page(
966 register mapping_t
*mp
;
968 addr64_t ova
, nextva
;
971 pmap
= vmm_get_adsp(act
, index
); /* Get the guest pmap for this address space */
972 if(!pmap
) return KERN_FAILURE
; /* Bogus address space, no VMs, or we can't make a pmap, failure... */
974 if(ava
> vm_max_address
) return kVmmInvalidAddress
; /* Does the machine support an address of this size? */
976 map
= current_thread()->map
; /* Get the host's map */
978 if (pmap
->pmapFlags
& pmapVMgsaa
) { /* Guest shadow assist active ? */
979 ret
= hw_res_map_gv(map
->pmap
, pmap
, cva
, ava
, getProtPPC(prot
));
980 /* Attempt to resume an existing gv->phys mapping */
981 if (mapRtOK
!= ret
) { /* Nothing to resume, construct a new mapping */
983 while (1) { /* Find host mapping or fail */
984 mp
= mapping_find(map
->pmap
, cva
, &nextva
, 0);
985 /* Attempt to find host mapping and pin it */
986 if (mp
) break; /* Got it */
988 ml_set_interrupts_enabled(TRUE
);
989 /* Open 'rupt window */
990 ret
= vm_fault(map
, /* Didn't find it, try to fault in host page read/write */
991 vm_map_trunc_page(cva
),
992 VM_PROT_READ
| VM_PROT_WRITE
,
993 FALSE
, /* change wiring */
997 ml_set_interrupts_enabled(FALSE
);
998 /* Close 'rupt window */
999 if (ret
!= KERN_SUCCESS
)
1000 return KERN_FAILURE
; /* Fault failed, return failure */
1003 if (mpNormal
!= (mp
->mpFlags
& mpType
)) {
1004 /* Host mapping must be a vanilla page */
1005 mapping_drop_busy(mp
); /* Un-pin host mapping */
1006 return KERN_FAILURE
; /* Return failure */
1009 /* Partially construct gv->phys mapping */
1010 unsigned int pindex
;
1011 phys_entry_t
*physent
= mapping_phys_lookup(mp
->mpPAddr
, &pindex
);
1013 mapping_drop_busy(mp
);
1014 return KERN_FAILURE
;
1016 unsigned int pattr
= ((physent
->ppLink
& (ppI
| ppG
)) >> 60);
1017 unsigned int wimg
= 0x2;
1018 if (pattr
& mmFlgCInhib
) wimg
|= 0x4;
1019 if (pattr
& mmFlgGuarded
) wimg
|= 0x1;
1020 unsigned int mflags
= (pindex
<< 16) | mpGuest
;
1021 addr64_t gva
= ((ava
& ~mpHWFlags
) | (wimg
<< 3) | getProtPPC(prot
));
1023 hw_add_map_gv(map
->pmap
, pmap
, gva
, mflags
, mp
->mpPAddr
);
1024 /* Construct new guest->phys mapping */
1026 mapping_drop_busy(mp
); /* Un-pin host mapping */
1029 while(1) { /* Keep trying until we get it or until we fail */
1031 mp
= mapping_find(map
->pmap
, cva
, &nextva
, 0); /* Find the mapping for this address */
1033 if(mp
) break; /* We found it */
1035 ml_set_interrupts_enabled(TRUE
); /* Enable interruptions */
1036 ret
= vm_fault(map
, /* Didn't find it, try to fault it in read/write... */
1037 vm_map_trunc_page(cva
),
1038 VM_PROT_READ
| VM_PROT_WRITE
,
1039 FALSE
, /*change wiring */
1043 ml_set_interrupts_enabled(FALSE
); /* Disable interruptions */
1044 if (ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* There isn't a page there, return... */
1047 if((mp
->mpFlags
& mpType
) != mpNormal
) { /* If this is a block, a nest, or some other special thing, we can't map it */
1048 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
1049 return KERN_FAILURE
; /* Leave in shame */
1052 while(1) { /* Keep trying the enter until it goes in */
1053 ova
= mapping_make(pmap
, ava
, mp
->mpPAddr
, 0, 1, prot
); /* Enter the mapping into the pmap */
1054 if(!ova
) break; /* If there were no collisions, we are done... */
1055 mapping_remove(pmap
, ova
); /* Remove the mapping that collided */
1058 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
1061 if (!((getPerProc()->spcFlags
) & FamVMmode
)) {
1062 act
->machine
.vmmControl
->vmmLastMap
= ava
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
1063 act
->machine
.vmmControl
->vmmGFlags
= (act
->machine
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | index
; /* Remember last address space */
1066 return KERN_SUCCESS
;
1070 /*-----------------------------------------------------------------------
1073 ** This function maps a page from within the client's logical
1074 ** address space into the alternate address space of the
1075 ** Virtual Machine Monitor context and then directly starts executing.
1077 ** See description of vmm_map_page for details.
1080 ** Index is used for both the context and the address space ID.
1081 ** index[24:31] is the context id and index[16:23] is the address space.
1082 ** if the address space ID is 0, the context ID is used for it.
1085 ** Normal exit is to run the VM. Abnormal exit is triggered via a
1086 ** non-KERN_SUCCESS return from vmm_map_page or later during the
1087 ** attempt to transition into the VM.
1088 -----------------------------------------------------------------------*/
1090 vmm_return_code_t
vmm_map_execute(
1092 vmm_thread_index_t index
,
1098 vmmCntrlEntry
*CEntry
;
1100 vmm_thread_index_t cndx
;
1102 cndx
= index
& 0xFF; /* Clean it up */
1104 CEntry
= vmm_get_entry(act
, cndx
); /* Get and validate the index */
1105 if (CEntry
== NULL
) return kVmmBogusContext
; /* Return bogus context */
1107 if (((getPerProc()->spcFlags
) & FamVMmode
) && (CEntry
!= act
->machine
.vmmCEntry
))
1108 return kVmmBogusContext
; /* Yes, invalid index in Fam */
1110 adsp
= (index
>> 8) & 0xFF; /* Get any requested address space */
1111 if(!adsp
) adsp
= (index
& 0xFF); /* If 0, use context ID as address space ID */
1113 ret
= vmm_map_page(act
, adsp
, cva
, ava
, prot
); /* Go try to map the page on in */
1116 if(ret
== KERN_SUCCESS
) {
1117 act
->machine
.vmmControl
->vmmLastMap
= ava
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
1118 act
->machine
.vmmControl
->vmmGFlags
= (act
->machine
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | cndx
; /* Remember last address space */
1119 vmm_execute_vm(act
, cndx
); /* Return was ok, launch the VM */
1122 return ret
; /* We had trouble mapping in the page */
1126 /*-----------------------------------------------------------------------
1129 ** This function maps a list of pages into various address spaces
1132 ** act - pointer to current thread activation
1133 ** index - index of default address space (used if not specifed in list entry
1134 ** count - number of pages to release
1135 ** flavor - 0 if 32-bit version, 1 if 64-bit
1136 ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
1139 ** kernel return code indicating success or failure
1140 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1141 ** or the vmm_map_page call fails.
1142 ** We return kVmmInvalidAddress if virtual address size is not supported
1143 -----------------------------------------------------------------------*/
1145 kern_return_t
vmm_map_list(
1147 vmm_adsp_id_t index
,
1149 unsigned int flavor
)
1151 vmmCntrlEntry
*CEntry
;
1161 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1162 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
1164 if(cnt
> kVmmMaxMapPages
) return KERN_FAILURE
; /* They tried to map too many */
1165 if(!cnt
) return KERN_SUCCESS
; /* If they said none, we're done... */
1167 lst
= (vmmMList
*)&((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
1168 lstx
= (vmmMList64
*)&((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
1170 for(i
= 0; i
< cnt
; i
++) { /* Step and release all pages in list */
1171 if(flavor
) { /* Check if 32- or 64-bit addresses */
1172 cva
= lstx
[i
].vmlva
; /* Get the 64-bit actual address */
1173 ava
= lstx
[i
].vmlava
; /* Get the 64-bit guest address */
1176 cva
= lst
[i
].vmlva
; /* Get the 32-bit actual address */
1177 ava
= lst
[i
].vmlava
; /* Get the 32-bit guest address */
1180 prot
= ava
& vmmlProt
; /* Extract the protection bits */
1181 adsp
= (ava
& vmmlAdID
) >> 4; /* Extract an explicit address space request */
1182 if(!adsp
) adsp
= index
- 1; /* If no explicit, use supplied default */
1183 ava
= ava
&= 0xFFFFFFFFFFFFF000ULL
; /* Clean up the address */
1185 ret
= vmm_map_page(act
, index
, cva
, ava
, prot
); /* Go try to map the page on in */
1186 if(ret
!= KERN_SUCCESS
) return ret
; /* Bail if any error */
1189 return KERN_SUCCESS
; /* Return... */
1192 /*-----------------------------------------------------------------------
1193 ** vmm_get_page_mapping
1195 ** Given a context index and a guest virtual address, convert the address
1196 ** to its corresponding host virtual address.
1199 ** act - pointer to current thread activation
1200 ** index - context index
1201 ** gva - guest virtual address
1204 ** Host virtual address (page aligned) or -1 if not mapped or any failure
1207 ** If the host address space contains multiple virtual addresses mapping
1208 ** to the physical address corresponding to the specified guest virtual
1209 ** address (i.e., host virtual aliases), it is unpredictable which host
1210 ** virtual address (alias) will be returned. Moral of the story: No host
1212 -----------------------------------------------------------------------*/
1214 addr64_t
vmm_get_page_mapping(
1216 vmm_adsp_id_t index
,
1219 register mapping_t
*mp
;
1221 addr64_t nextva
, hva
;
1224 pmap
= vmm_get_adsp(act
, index
); /* Get and validate the index */
1225 if (!pmap
)return -1; /* No good, failure... */
1227 if (pmap
->pmapFlags
& pmapVMgsaa
) { /* Guest shadow assist (GSA) active ? */
1228 return (hw_gva_to_hva(pmap
, gva
)); /* Convert guest to host virtual address */
1230 mp
= mapping_find(pmap
, gva
, &nextva
, 0); /* Find guest mapping for this virtual address */
1232 if(!mp
) return -1; /* Not mapped, return -1 */
1234 pa
= mp
->mpPAddr
; /* Remember the physical page address */
1236 mapping_drop_busy(mp
); /* Go ahead and relase the mapping now */
1238 pmap
= current_thread()->map
->pmap
; /* Get the host pmap */
1239 hva
= mapping_p2v(pmap
, pa
); /* Now find the source virtual */
1241 if(hva
!= 0) return hva
; /* We found it... */
1243 panic("vmm_get_page_mapping: could not back-map guest va (%016llX)\n", gva
);
1244 /* We are bad wrong if we can't find it */
1246 return -1; /* Never executed, prevents compiler warning */
1250 /*-----------------------------------------------------------------------
1253 ** This function unmaps a page from the guest address space.
1256 ** act - pointer to current thread activation
1257 ** index - index of vmm state for this page
1258 ** va - virtual address within the vmm's address
1262 ** kernel return code indicating success or failure
1263 -----------------------------------------------------------------------*/
1265 kern_return_t
vmm_unmap_page(
1267 vmm_adsp_id_t index
,
1270 vmmCntrlEntry
*CEntry
;
1274 pmap
= vmm_get_adsp(act
, index
); /* Get and validate the index */
1275 if (!pmap
)return -1; /* No good, failure... */
1277 if (pmap
->pmapFlags
& pmapVMgsaa
) { /* Handle guest shadow assist specially */
1278 hw_susp_map_gv(act
->map
->pmap
, pmap
, va
); /* Suspend the mapping */
1279 return (KERN_SUCCESS
); /* Always returns success */
1281 nadd
= mapping_remove(pmap
, va
); /* Toss the mapping */
1283 return ((nadd
& 1) ? KERN_FAILURE
: KERN_SUCCESS
); /* Return... */
1287 /*-----------------------------------------------------------------------
1290 ** This function unmaps a list of pages from the alternate's logical
1294 ** act - pointer to current thread activation
1295 ** index - index of vmm state for this page
1296 ** count - number of pages to release
1297 ** flavor - 0 if 32-bit, 1 if 64-bit
1298 ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
1301 ** kernel return code indicating success or failure
1302 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1303 -----------------------------------------------------------------------*/
1305 kern_return_t
vmm_unmap_list(
1307 vmm_adsp_id_t index
,
1309 unsigned int flavor
)
1311 vmmCntrlEntry
*CEntry
;
1313 kern_return_t kern_result
= KERN_SUCCESS
;
1314 unsigned int *pgaddr
, i
;
1321 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1322 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
1324 if(cnt
> kVmmMaxUnmapPages
) return KERN_FAILURE
; /* They tried to unmap too many */
1325 if(!cnt
) return KERN_SUCCESS
; /* If they said none, we're done... */
1327 lst
= (vmmUMList
*)lstx
= (vmmUMList64
*) &((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
1329 for(i
= 0; i
< cnt
; i
++) { /* Step and release all pages in list */
1330 if(flavor
) { /* Check if 32- or 64-bit addresses */
1331 gva
= lstx
[i
].vmlava
; /* Get the 64-bit guest address */
1334 gva
= lst
[i
].vmlava
; /* Get the 32-bit guest address */
1337 adsp
= (gva
& vmmlAdID
) >> 4; /* Extract an explicit address space request */
1338 if(!adsp
) adsp
= index
- 1; /* If no explicit, use supplied default */
1339 pmap
= act
->machine
.vmmControl
->vmmAdsp
[adsp
]; /* Get the pmap for this request */
1340 if(!pmap
) continue; /* Ain't nuthin' mapped here, no durn map... */
1342 gva
= gva
&= 0xFFFFFFFFFFFFF000ULL
; /* Clean up the address */
1343 if (pmap
->pmapFlags
& pmapVMgsaa
) { /* Handle guest shadow assist specially */
1344 hw_susp_map_gv(act
->map
->pmap
, pmap
, gva
);
1345 /* Suspend the mapping */
1347 (void)mapping_remove(pmap
, gva
); /* Toss the mapping */
1351 return KERN_SUCCESS
; /* Return... */
1354 /*-----------------------------------------------------------------------
1355 ** vmm_unmap_all_pages
1357 ** This function unmaps all pages from the alternates's logical
1361 ** act - pointer to current thread activation
1362 ** index - index of context state
1368 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
1369 -----------------------------------------------------------------------*/
1371 void vmm_unmap_all_pages(
1373 vmm_adsp_id_t index
)
1375 vmmCntrlEntry
*CEntry
;
1378 pmap
= vmm_get_adsp(act
, index
); /* Convert index to entry */
1379 if (!pmap
) return; /* Either this isn't vmm thread or the index is bogus */
1381 if (pmap
->pmapFlags
& pmapVMgsaa
) { /* Handle guest shadow assist specially */
1382 hw_rem_all_gv(pmap
); /* Remove all guest's mappings from shadow hash table */
1385 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
1387 mapping_remove(pmap
, 0xFFFFFFFFFFFFF000LL
); /* Remove final page explicitly because we might have mapped it */
1388 pmap_remove(pmap
, 0, 0xFFFFFFFFFFFFF000LL
); /* Remove all entries from this map */
1394 /*-----------------------------------------------------------------------
1395 ** vmm_get_page_dirty_flag
1397 ** This function returns the changed flag of the page
1398 ** and optionally clears clears the flag.
1401 ** act - pointer to current thread activation
1402 ** index - index of vmm state for this page
1403 ** va - virtual address within the vmm's address
1405 ** reset - Clears dirty if true, untouched if not
1409 ** clears the dirty bit in the pte if requested
1412 ** The RC bits are merged into the global physical entry
1413 -----------------------------------------------------------------------*/
1415 boolean_t
vmm_get_page_dirty_flag(
1417 vmm_adsp_id_t index
,
1421 vmmCntrlEntry
*CEntry
;
1422 register mapping_t
*mpv
, *mp
;
1426 pmap
= vmm_get_adsp(act
, index
); /* Convert index to entry */
1427 if (!pmap
) return 1; /* Either this isn't vmm thread or the index is bogus */
1429 if (pmap
->pmapFlags
& pmapVMgsaa
) { /* Handle guest shadow assist specially */
1430 RC
= hw_test_rc_gv(act
->map
->pmap
, pmap
, va
, reset
);/* Fetch the RC bits and clear if requested */
1432 RC
= hw_test_rc(pmap
, (addr64_t
)va
, reset
); /* Fetch the RC bits and clear if requested */
1435 switch (RC
& mapRetCode
) { /* Decode return code */
1437 case mapRtOK
: /* Changed */
1438 return ((RC
& (unsigned int)mpC
) == (unsigned int)mpC
); /* Return if dirty or not */
1441 case mapRtNotFnd
: /* Didn't find it */
1442 return 1; /* Return dirty */
1446 panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC
, pmap
, va
);
1450 return 1; /* Return the change bit */
1454 /*-----------------------------------------------------------------------
1457 ** This function sets the protection bits of a mapped page
1460 ** act - pointer to current thread activation
1461 ** index - index of vmm state for this page
1462 ** va - virtual address within the vmm's address
1464 ** prot - Protection flags
1468 ** Protection bits of the mapping are modifed
1470 -----------------------------------------------------------------------*/
1472 kern_return_t
vmm_protect_page(
1474 vmm_adsp_id_t index
,
1478 vmmCntrlEntry
*CEntry
;
1483 pmap
= vmm_get_adsp(act
, index
); /* Convert index to entry */
1484 if (!pmap
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1486 if (pmap
->pmapFlags
& pmapVMgsaa
) { /* Handle guest shadow assist specially */
1487 ret
= hw_protect_gv(pmap
, va
, prot
); /* Try to change protection, GSA varient */
1489 ret
= hw_protect(pmap
, va
, prot
, &nextva
); /* Try to change protection */
1492 switch (ret
) { /* Decode return code */
1494 case mapRtOK
: /* All ok... */
1495 break; /* Outta here */
1497 case mapRtNotFnd
: /* Didn't find it */
1498 return KERN_SUCCESS
; /* Ok, return... */
1502 panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret
, pmap
, (addr64_t
)va
);
1506 if (!((getPerProc()->spcFlags
) & FamVMmode
)) {
1507 act
->machine
.vmmControl
->vmmLastMap
= va
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
1508 act
->machine
.vmmControl
->vmmGFlags
= (act
->machine
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | index
; /* Remember last address space */
1511 return KERN_SUCCESS
; /* Return */
1515 /*-----------------------------------------------------------------------
1516 ** vmm_protect_execute
1518 ** This function sets the protection bits of a mapped page
1519 ** and then directly starts executing.
1521 ** See description of vmm_protect_page for details
1524 ** See vmm_protect_page and vmm_map_execute
1527 ** Normal exit is to run the VM. Abnormal exit is triggered via a
1528 ** non-KERN_SUCCESS return from vmm_map_page or later during the
1529 ** attempt to transition into the VM.
1530 -----------------------------------------------------------------------*/
1532 vmm_return_code_t
vmm_protect_execute(
1534 vmm_thread_index_t index
,
1539 vmmCntrlEntry
*CEntry
;
1541 vmm_thread_index_t cndx
;
1543 cndx
= index
& 0xFF; /* Clean it up */
1544 CEntry
= vmm_get_entry(act
, cndx
); /* Get and validate the index */
1545 if (CEntry
== NULL
) return kVmmBogusContext
; /* Return bogus context */
1547 adsp
= (index
>> 8) & 0xFF; /* Get any requested address space */
1548 if(!adsp
) adsp
= (index
& 0xFF); /* If 0, use context ID as address space ID */
1550 if (((getPerProc()->spcFlags
) & FamVMmode
) && (CEntry
!= act
->machine
.vmmCEntry
))
1551 return kVmmBogusContext
; /* Yes, invalid index in Fam */
1553 ret
= vmm_protect_page(act
, adsp
, va
, prot
); /* Go try to change access */
1555 if(ret
== KERN_SUCCESS
) {
1556 act
->machine
.vmmControl
->vmmLastMap
= va
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
1557 act
->machine
.vmmControl
->vmmGFlags
= (act
->machine
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | cndx
; /* Remember last address space */
1558 vmm_execute_vm(act
, cndx
); /* Return was ok, launch the VM */
1561 return ret
; /* We had trouble of some kind (shouldn't happen) */
1566 /*-----------------------------------------------------------------------
1567 ** vmm_get_float_state
1569 ** This function causes the current floating point state to
1570 ** be saved into the shared context area. It also clears the
1571 ** vmmFloatCngd changed flag.
1574 ** act - pointer to current thread activation structure
1575 ** index - index returned by vmm_init_context
1579 -----------------------------------------------------------------------*/
1581 kern_return_t
vmm_get_float_state(
1583 vmm_thread_index_t index
)
1585 vmmCntrlEntry
*CEntry
;
1586 vmmCntrlTable
*CTable
;
1588 register struct savearea_fpu
*sv
;
1590 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1591 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1593 act
->machine
.specFlags
&= ~floatCng
; /* Clear the special flag */
1594 CEntry
->vmmContextKern
->vmmStat
&= ~vmmFloatCngd
; /* Clear the change indication */
1596 fpu_save(&CEntry
->vmmFacCtx
); /* Save context if live */
1598 if(sv
= CEntry
->vmmFacCtx
.FPUsave
) { /* Is there context yet? */
1599 bcopy((char *)&sv
->save_fp0
, (char *)&(CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
), 32 * 8); /* 32 registers */
1600 return KERN_SUCCESS
;
1604 for(i
= 0; i
< 32; i
++) { /* Initialize floating points */
1605 CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
[i
].d
= FloatInit
; /* Initial value */
1608 return KERN_SUCCESS
;
1611 /*-----------------------------------------------------------------------
1612 ** vmm_get_vector_state
1614 ** This function causes the current vector state to
1615 ** be saved into the shared context area. It also clears the
1616 ** vmmVectorCngd changed flag.
1619 ** act - pointer to current thread activation structure
1620 ** index - index returned by vmm_init_context
1624 -----------------------------------------------------------------------*/
1626 kern_return_t
vmm_get_vector_state(
1628 vmm_thread_index_t index
)
1630 vmmCntrlEntry
*CEntry
;
1631 vmmCntrlTable
*CTable
;
1633 unsigned int vrvalidwrk
;
1634 register struct savearea_vec
*sv
;
1636 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1637 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1639 vec_save(&CEntry
->vmmFacCtx
); /* Save context if live */
1641 act
->machine
.specFlags
&= ~vectorCng
; /* Clear the special flag */
1642 CEntry
->vmmContextKern
->vmmStat
&= ~vmmVectCngd
; /* Clear the change indication */
1644 if(sv
= CEntry
->vmmFacCtx
.VMXsave
) { /* Is there context yet? */
1646 vrvalidwrk
= sv
->save_vrvalid
; /* Get the valid flags */
1648 for(i
= 0; i
< 32; i
++) { /* Copy the saved registers and invalidate the others */
1649 if(vrvalidwrk
& 0x80000000) { /* Do we have a valid value here? */
1650 for(j
= 0; j
< 4; j
++) { /* If so, copy it over */
1651 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = ((unsigned int *)&(sv
->save_vr0
))[(i
* 4) + j
];
1655 for(j
= 0; j
< 4; j
++) { /* Otherwise set to empty value */
1656 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
];
1660 vrvalidwrk
= vrvalidwrk
<< 1; /* Shift over to the next */
1664 return KERN_SUCCESS
;
1667 for(i
= 0; i
< 32; i
++) { /* Initialize vector registers */
1668 for(j
=0; j
< 4; j
++) { /* Do words */
1669 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
]; /* Initial value */
1673 return KERN_SUCCESS
;
1676 /*-----------------------------------------------------------------------
1679 ** This function causes a timer (in AbsoluteTime) for a specific time
1680 ** to be set It also clears the vmmTimerPop flag if the timer is actually
1681 ** set, it is cleared otherwise.
1683 ** A timer is cleared by setting setting the time to 0. This will clear
1684 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
1685 ** current time clears the internal timer request, but leaves the
1686 ** vmmTimerPop flag set.
1690 ** act - pointer to current thread activation structure
1691 ** index - index returned by vmm_init_context
1692 ** timerhi - high order word of AbsoluteTime to pop
1693 ** timerlo - low order word of AbsoluteTime to pop
1696 ** timer set, vmmTimerPop cleared
1697 -----------------------------------------------------------------------*/
1699 kern_return_t
vmm_set_timer(
1701 vmm_thread_index_t index
,
1702 unsigned int timerhi
,
1703 unsigned int timerlo
)
1705 vmmCntrlEntry
*CEntry
;
1707 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1708 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1710 CEntry
->vmmTimer
= ((uint64_t)timerhi
<< 32) | timerlo
;
1712 vmm_timer_pop(act
); /* Go adjust all of the timer stuff */
1713 return KERN_SUCCESS
; /* Leave now... */
1717 /*-----------------------------------------------------------------------
1720 ** This function causes the timer for a specified VM to be
1721 ** returned in return_params[0] and return_params[1].
1722 ** Note that this is kind of funky for 64-bit VMs because we
1723 ** split the timer into two parts so that we still set parms 0 and 1.
1724 ** Obviously, we don't need to do this because the parms are 8 bytes
1729 ** act - pointer to current thread activation structure
1730 ** index - index returned by vmm_init_context
1733 ** Timer value set in return_params[0] and return_params[1].
1734 ** Set to 0 if timer is not set.
1735 -----------------------------------------------------------------------*/
1737 kern_return_t
vmm_get_timer(
1739 vmm_thread_index_t index
)
1741 vmmCntrlEntry
*CEntry
;
1742 vmmCntrlTable
*CTable
;
1744 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1745 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1747 if(CEntry
->vmmXAFlgs
& vmm64Bit
) { /* A 64-bit virtual machine? */
1748 CEntry
->vmmContextKern
->vmmRet
.vmmrp64
.return_params
[0] = (uint32_t)(CEntry
->vmmTimer
>> 32); /* Return the last timer value */
1749 CEntry
->vmmContextKern
->vmmRet
.vmmrp64
.return_params
[1] = (uint32_t)CEntry
->vmmTimer
; /* Return the last timer value */
1752 CEntry
->vmmContextKern
->vmmRet
.vmmrp32
.return_params
[0] = (CEntry
->vmmTimer
>> 32); /* Return the last timer value */
1753 CEntry
->vmmContextKern
->vmmRet
.vmmrp32
.return_params
[1] = (uint32_t)CEntry
->vmmTimer
; /* Return the last timer value */
1755 return KERN_SUCCESS
;
1759 /*-----------------------------------------------------------------------
1762 ** This function causes all timers in the array of VMs to be updated.
1763 ** All appropriate flags are set or reset. If a VM is currently
1764 ** running and its timer expired, it is intercepted.
1766 ** The qactTimer value is set to the lowest unexpired timer. It is
1767 ** zeroed if all timers are expired or have been reset.
1770 ** act - pointer to current thread activation structure
1773 ** timers set, vmmTimerPop cleared or set
1774 -----------------------------------------------------------------------*/
1779 vmmCntrlEntry
*CEntry
;
1780 vmmCntrlTable
*CTable
;
1782 uint64_t now
, soonest
;
1785 if(!((unsigned int)act
->machine
.vmmControl
& 0xFFFFFFFE)) { /* Are there any virtual machines? */
1786 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act
);
1789 soonest
= 0xFFFFFFFFFFFFFFFFULL
; /* Max time */
1791 clock_get_uptime(&now
); /* What time is it? */
1793 CTable
= act
->machine
.vmmControl
; /* Make this easier */
1794 any
= 0; /* Haven't found a running unexpired timer yet */
1796 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Cycle through all and check time now */
1798 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) continue; /* Do not check if the entry is empty */
1800 if(CTable
->vmmc
[cvi
].vmmTimer
== 0) { /* Is the timer reset? */
1801 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Clear timer popped */
1802 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Clear timer popped */
1803 continue; /* Check next */
1806 if (CTable
->vmmc
[cvi
].vmmTimer
<= now
) {
1807 CTable
->vmmc
[cvi
].vmmFlags
|= vmmTimerPop
; /* Set timer popped here */
1808 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
|= vmmTimerPop
; /* Set timer popped here */
1809 if((unsigned int)&CTable
->vmmc
[cvi
] == (unsigned int)act
->machine
.vmmCEntry
) { /* Is this the running VM? */
1810 sv
= find_user_regs(act
); /* Get the user state registers */
1811 if(!sv
) { /* Did we find something? */
1812 panic("vmm_timer_pop: no user context; act = %08X\n", act
);
1814 sv
->save_exception
= kVmmReturnNull
*4; /* Indicate that this is a null exception */
1815 vmm_force_exit(act
, sv
); /* Intercept a running VM */
1817 continue; /* Check the rest */
1819 else { /* It hasn't popped yet */
1820 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Set timer not popped here */
1821 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Set timer not popped here */
1824 any
= 1; /* Show we found an active unexpired timer */
1826 if (CTable
->vmmc
[cvi
].vmmTimer
< soonest
)
1827 soonest
= CTable
->vmmc
[cvi
].vmmTimer
;
1831 if (act
->machine
.qactTimer
== 0 || soonest
<= act
->machine
.qactTimer
)
1832 act
->machine
.qactTimer
= soonest
; /* Set lowest timer */
1840 /*-----------------------------------------------------------------------
1843 ** This function prevents the specified VM(s) to from running.
1844 ** If any is currently executing, the execution is intercepted
1845 ** with a code of kVmmStopped. Note that execution of the VM is
1846 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
1847 ** This provides the ability for a thread to stop execution of a VM and
1848 ** insure that it will not be run until the emulator has processed the
1849 ** "virtual" interruption.
1852 ** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1853 ** NOTE: if this mask is all 0s, any executing VM is intercepted with
1854 * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1855 ** note that there is a potential race here and the VM may not stop.
1858 ** kernel return code indicating success
1859 ** or if no VMs are enabled, an invalid syscall exception.
1860 -----------------------------------------------------------------------*/
1862 int vmm_stop_vm(struct savearea
*save
)
1866 vmmCntrlTable
*CTable
;
1870 unsigned int vmmask
;
1871 ReturnHandler
*stopapc
;
1873 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
1875 task
= current_task(); /* Figure out who we are */
1877 task_lock(task
); /* Lock our task */
1879 fact
= (thread_t
)task
->threads
.next
; /* Get the first activation on task */
1880 act
= 0; /* Pretend we didn't find it yet */
1882 for(i
= 0; i
< task
->thread_count
; i
++) { /* All of the activations */
1883 if(fact
->machine
.vmmControl
) { /* Is this a virtual machine monitor? */
1884 act
= fact
; /* Yeah... */
1885 break; /* Bail the loop... */
1887 fact
= (thread_t
)fact
->task_threads
.next
; /* Go to the next one */
1890 if(!((unsigned int)act
)) { /* See if we have VMMs yet */
1891 task_unlock(task
); /* No, unlock the task */
1892 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1893 return 0; /* Go generate a syscall exception */
1896 thread_reference(act
);
1898 task_unlock(task
); /* Safe to release now */
1900 thread_mtx_lock(act
);
1902 CTable
= act
->machine
.vmmControl
; /* Get the pointer to the table */
1904 if(!((unsigned int)CTable
& -2)) { /* Are there any all the way up yet? */
1905 thread_mtx_unlock(act
); /* Unlock the activation */
1906 thread_deallocate(act
);
1907 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1908 return 0; /* Go generate a syscall exception */
1911 if(!(vmmask
= save
->save_r3
)) { /* Get the stop mask and check if all zeros */
1912 thread_mtx_unlock(act
); /* Unlock the activation */
1913 thread_deallocate(act
);
1914 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1915 save
->save_r3
= KERN_SUCCESS
; /* Set success */
1916 return 1; /* Return... */
1919 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Search slots */
1920 if((0x80000000 & vmmask
) && (CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) { /* See if we need to stop and if it is in use */
1921 hw_atomic_or(&CTable
->vmmc
[cvi
].vmmFlags
, vmmXStop
); /* Set this one to stop */
1923 vmmask
= vmmask
<< 1; /* Slide mask over */
1926 if(hw_compare_and_store(0, 1, &act
->machine
.emPendRupts
)) { /* See if there is already a stop pending and lock out others if not */
1927 thread_mtx_unlock(act
); /* Already one pending, unlock the activation */
1928 thread_deallocate(act
);
1929 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1930 save
->save_r3
= KERN_SUCCESS
; /* Say we did it... */
1931 return 1; /* Leave */
1934 if(!(stopapc
= (ReturnHandler
*)kalloc(sizeof(ReturnHandler
)))) { /* Get a return handler control block */
1935 act
->machine
.emPendRupts
= 0; /* No memory, say we have given up request */
1936 thread_mtx_unlock(act
); /* Unlock the activation */
1937 thread_deallocate(act
);
1938 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1939 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No storage... */
1940 return 1; /* Return... */
1943 ml_set_interrupts_enabled(FALSE
); /* Disable interruptions for now */
1945 stopapc
->handler
= vmm_interrupt
; /* Set interruption routine */
1947 stopapc
->next
= act
->handlers
; /* Put our interrupt at the start of the list */
1948 act
->handlers
= stopapc
; /* Point to us */
1950 act_set_apc(act
); /* Set an APC AST */
1951 ml_set_interrupts_enabled(TRUE
); /* Enable interruptions now */
1953 thread_mtx_unlock(act
); /* Unlock the activation */
1954 thread_deallocate(act
);
1956 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1957 save
->save_r3
= KERN_SUCCESS
; /* Hip, hip, horay... */
1961 /*-----------------------------------------------------------------------
1964 ** This function is executed asynchronously from an APC AST.
1965 ** It is to be used for anything that needs to interrupt a running VM.
1966 ** This include any kind of interruption generation (other than timer pop)
1967 ** or entering the stopped state.
1970 ** ReturnHandler *rh - the return handler control block as required by the APC.
1971 ** thread_t act - the activation
1974 ** Whatever needed to be done is done.
1975 -----------------------------------------------------------------------*/
1977 void vmm_interrupt(ReturnHandler
*rh
, thread_t act
) {
1979 vmmCntrlTable
*CTable
;
1985 kfree(rh
, sizeof(ReturnHandler
)); /* Release the return handler block */
1987 inter
= ml_set_interrupts_enabled(FALSE
); /* Disable interruptions for now */
1989 act
->machine
.emPendRupts
= 0; /* Say that there are no more interrupts pending */
1990 CTable
= act
->machine
.vmmControl
; /* Get the pointer to the table */
1992 if(!((unsigned int)CTable
& -2)) return; /* Leave if we aren't doing VMs any more... */
1994 if(act
->machine
.vmmCEntry
&& (act
->machine
.vmmCEntry
->vmmFlags
& vmmXStop
)) { /* Do we need to stop the running guy? */
1995 sv
= find_user_regs(act
); /* Get the user state registers */
1996 if(!sv
) { /* Did we find something? */
1997 panic("vmm_interrupt: no user context; act = %08X\n", act
);
1999 sv
->save_exception
= kVmmStopped
*4; /* Set a "stopped" exception */
2000 vmm_force_exit(act
, sv
); /* Intercept a running VM */
2002 ml_set_interrupts_enabled(inter
); /* Put interrupts back to what they were */