]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/vmachmon.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*-----------------------------------------------------------------------
29** vmachmon.c
30**
31** C routines that we are adding to the MacOS X kernel.
32**
1c79356b
A
33-----------------------------------------------------------------------*/
34
35#include <mach/mach_types.h>
36#include <mach/kern_return.h>
37#include <mach/host_info.h>
38#include <kern/kern_types.h>
91447636 39#include <kern/kalloc.h>
1c79356b
A
40#include <kern/host.h>
41#include <kern/task.h>
42#include <kern/thread.h>
43#include <ppc/exception.h>
44#include <ppc/mappings.h>
91447636 45#include <ppc/thread.h>
1c79356b 46#include <vm/vm_kern.h>
91447636 47#include <vm/vm_fault.h>
1c79356b
A
48
49#include <ppc/vmachmon.h>
91447636 50#include <ppc/lowglobals.h>
1c79356b 51
1c79356b
A
52extern double FloatInit;
53extern unsigned long QNaNbarbarian[4];
54
55/*************************************************************************************
56 Virtual Machine Monitor Internal Routines
57**************************************************************************************/
58
59/*-----------------------------------------------------------------------
60** vmm_get_entry
61**
62** This function verifies and return a vmm context entry index
63**
64** Inputs:
65** act - pointer to current thread activation
66** index - index into vmm control table (this is a "one based" value)
67**
68** Outputs:
69** address of a vmmCntrlEntry or 0 if not found
70-----------------------------------------------------------------------*/
71
91447636
A
72static vmmCntrlEntry *vmm_get_entry(
73 thread_t act,
1c79356b
A
74 vmm_thread_index_t index)
75{
76 vmmCntrlTable *CTable;
77 vmmCntrlEntry *CEntry;
78
55e303ae
A
79 index = index & vmmTInum; /* Clean up the index */
80
91447636 81 if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */
55e303ae 82 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
1c79356b 83
91447636 84 CTable = act->machine.vmmControl; /* Make the address a bit more convienient */
1c79356b
A
85 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
86
87 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
88
89 return CEntry;
90}
91
55e303ae
A
92/*-----------------------------------------------------------------------
93** vmm_get_adsp
94**
95** This function verifies and returns the pmap for an address space.
96** If there is none and the request is valid, a pmap will be created.
97**
98** Inputs:
99** act - pointer to current thread activation
100** index - index into vmm control table (this is a "one based" value)
101**
102** Outputs:
103** address of a pmap or 0 if not found or could no be created
104** Note that if there is no pmap for the address space it will be created.
105-----------------------------------------------------------------------*/
106
91447636 107static pmap_t vmm_get_adsp(thread_t act, vmm_thread_index_t index)
55e303ae
A
108{
109 pmap_t pmap;
110
91447636 111 if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */
55e303ae
A
112 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
113
91447636
A
114 pmap = act->machine.vmmControl->vmmAdsp[index - 1]; /* Get the pmap */
115 return (pmap); /* and return it. */
116}
55e303ae 117
91447636
A
118/*-----------------------------------------------------------------------
119** vmm_build_shadow_hash
120**
121** Allocate and initialize a shadow hash table.
122**
123** This function assumes that PAGE_SIZE is 4k-bytes.
124**
125-----------------------------------------------------------------------*/
126static pmap_vmm_ext *vmm_build_shadow_hash(pmap_t pmap)
127{
128 pmap_vmm_ext *ext; /* VMM pmap extension we're building */
129 ppnum_t extPP; /* VMM pmap extension physical page number */
130 kern_return_t ret; /* Return code from various calls */
131 uint32_t pages = GV_HPAGES; /* Number of pages in the hash table */
132 vm_offset_t free = VMX_HPIDX_OFFSET; /* Offset into extension page of free area (128-byte aligned) */
133 uint32_t freeSize = PAGE_SIZE - free; /* Number of free bytes in the extension page */
134
135 if ((pages * sizeof(addr64_t)) + (pages * sizeof(vm_offset_t)) > freeSize) {
136 panic("vmm_build_shadow_hash: too little pmap_vmm_ext free space\n");
137 }
138
139 ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&ext, PAGE_SIZE);
140 /* Allocate a page-sized extension block */
141 if (ret != KERN_SUCCESS) return (NULL); /* Return NULL for failed allocate */
142 bzero((char *)ext, PAGE_SIZE); /* Zero the entire extension block page */
143
144 extPP = pmap_find_phys(kernel_pmap, (vm_offset_t)ext);
145 /* Get extension block's physical page number */
146 if (!extPP) { /* This should not fail, but then again... */
147 panic("vmm_build_shadow_hash: could not translate pmap_vmm_ext vaddr %08X\n", ext);
148 }
149
150 ext->vmxSalt = (addr64_t)(vm_offset_t)ext ^ ptoa_64(extPP);
151 /* Set effective<->physical conversion salt */
152 ext->vmxHostPmapPhys = (addr64_t)(vm_offset_t)pmap ^ pmap->pmapvr;
153 /* Set host pmap's physical address */
154 ext->vmxHostPmap = pmap; /* Set host pmap's effective address */
155 ext->vmxHashPgIdx = (addr64_t *)((vm_offset_t)ext + VMX_HPIDX_OFFSET);
156 /* Allocate physical index */
157 ext->vmxHashPgList = (vm_offset_t *)((vm_offset_t)ext + VMX_HPLIST_OFFSET);
158 /* Allocate page list */
159 ext->vmxActiveBitmap = (vm_offset_t *)((vm_offset_t)ext + VMX_ACTMAP_OFFSET);
160 /* Allocate active mapping bitmap */
161
162 /* The hash table is typically larger than a single page, but we don't require it to be in a
163 contiguous virtual or physical chunk. So, we allocate it page by page, noting the effective and
164 physical address of each page in vmxHashPgList and vmxHashPgIdx, respectively. */
165 uint32_t idx;
166 for (idx = 0; idx < pages; idx++) {
167 ret = kmem_alloc_wired(kernel_map, &ext->vmxHashPgList[idx], PAGE_SIZE);
168 /* Allocate a hash-table page */
169 if (ret != KERN_SUCCESS) goto fail; /* Allocation failed, exit through cleanup */
170 bzero((char *)ext->vmxHashPgList[idx], PAGE_SIZE); /* Zero the page */
171 ext->vmxHashPgIdx[idx] = ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)ext->vmxHashPgList[idx]));
172 /* Put page's physical address into index */
173 if (!ext->vmxHashPgIdx[idx]) { /* Hash-table page's LRA failed */
174 panic("vmm_build_shadow_hash: could not translate hash-table vaddr %08X\n", ext->vmxHashPgList[idx]);
175 }
176 mapping_t *map = (mapping_t *)ext->vmxHashPgList[idx];
177 uint32_t mapIdx;
178 for (mapIdx = 0; mapIdx < GV_SLTS_PPG; mapIdx++) { /* Iterate over mappings in this page */
179 map->mpFlags = (mpGuest | mpgFree); /* Mark guest type and free */
180 map = (mapping_t *)((char *)map + GV_SLOT_SZ); /* Next slot-sized mapping */
181 }
182 }
183
184 return (ext); /* Return newly-minted VMM pmap extension */
185
186fail:
187 for (idx = 0; idx < pages; idx++) { /* De-allocate any pages we managed to allocate */
188 if (ext->vmxHashPgList[idx]) {
189 kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
190 }
191 }
192 kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
193 return (NULL); /* Return NULL for failure */
194}
195
196
197/*-----------------------------------------------------------------------
198** vmm_release_shadow_hash
199**
200** Release shadow hash table and VMM extension block
201**
202-----------------------------------------------------------------------*/
203static void vmm_release_shadow_hash(pmap_vmm_ext *ext)
204{
205 uint32_t idx;
206
207 for (idx = 0; idx < GV_HPAGES; idx++) { /* Release the hash table page by page */
208 kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
209 }
210
211 kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
212}
213
214/*-----------------------------------------------------------------------
215** vmm_activate_gsa
216**
217** Activate guest shadow assist
218**
219-----------------------------------------------------------------------*/
220static kern_return_t vmm_activate_gsa(
221 thread_t act,
222 vmm_thread_index_t index)
223{
224 vmmCntrlTable *CTable = act->machine.vmmControl; /* Get VMM control table */
225 if (!CTable) { /* Caller guarantees that this will work */
226 panic("vmm_activate_gsa: VMM control table not present; act = %08X, idx = %d\n",
227 act, index);
228 return KERN_FAILURE;
229 }
230 vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
231 if (!CEntry) { /* Caller guarantees that this will work */
232 panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
233 act, index);
234 return KERN_FAILURE;
235 }
236
237 pmap_t hpmap = act->map->pmap; /* Get host pmap */
238 pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
239 if (!gpmap) { /* Caller guarantees that this will work */
240 panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
241 act, index);
242 return KERN_FAILURE;
243 }
244
245 if (!hpmap->pmapVmmExt) { /* If there's no VMM extension for this host, create one */
246 hpmap->pmapVmmExt = vmm_build_shadow_hash(hpmap); /* Build VMM extension plus shadow hash and attach */
247 if (hpmap->pmapVmmExt) { /* See if we succeeded */
248 hpmap->pmapVmmExtPhys = (addr64_t)(vm_offset_t)hpmap->pmapVmmExt ^ hpmap->pmapVmmExt->vmxSalt;
249 /* Get VMM extensions block physical address */
250 } else {
251 return KERN_RESOURCE_SHORTAGE; /* Not enough mojo to go */
252 }
253 }
254 gpmap->pmapVmmExt = hpmap->pmapVmmExt; /* Copy VMM extension block virtual address into guest */
255 gpmap->pmapVmmExtPhys = hpmap->pmapVmmExtPhys; /* and its physical address, too */
256 gpmap->pmapFlags |= pmapVMgsaa; /* Enable GSA for this guest */
257 CEntry->vmmXAFlgs |= vmmGSA; /* Show GSA active here, too */
258
259 return KERN_SUCCESS;
260}
261
262
263/*-----------------------------------------------------------------------
264** vmm_deactivate_gsa
265**
266** Deactivate guest shadow assist
267**
268-----------------------------------------------------------------------*/
269static void vmm_deactivate_gsa(
270 thread_t act,
271 vmm_thread_index_t index)
272{
273 vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
274 if (!CEntry) { /* Caller guarantees that this will work */
275 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
276 act, index);
277 return KERN_FAILURE;
278 }
279
280 pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
281 if (!gpmap) { /* Caller guarantees that this will work */
282 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
283 act, index);
284 return KERN_FAILURE;
285 }
286
287 gpmap->pmapFlags &= ~pmapVMgsaa; /* Deactivate GSA for this guest */
288 CEntry->vmmXAFlgs &= ~vmmGSA; /* Show GSA deactivated here, too */
55e303ae
A
289}
290
1c79356b 291
91447636
A
292/*-----------------------------------------------------------------------
293** vmm_flush_context
294**
295** Flush specified guest context, purging all guest mappings and clearing
296** the context page.
297**
298-----------------------------------------------------------------------*/
299static void vmm_flush_context(
300 thread_t act,
301 vmm_thread_index_t index)
302{
303 vmmCntrlEntry *CEntry;
304 vmmCntrlTable *CTable;
305 vmm_state_page_t *vks;
306 vmm_version_t version;
307
308 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
309 if (!CEntry) { /* Caller guarantees that this will work */
310 panic("vmm_flush_context: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
311 act, index);
312 return;
313 }
314
315 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
316 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
317 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
318 }
319
320 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
321 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
322 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
323 }
324
325 vmm_unmap_all_pages(act, index); /* Blow away all mappings for this context */
326
327 CTable = act->machine.vmmControl; /* Get the control table address */
328 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
329
330 CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */
331 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
332 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
333 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
334 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
335 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
336 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
337
338 vks = CEntry->vmmContextKern; /* Get address of the context page */
339 version = vks->interface_version; /* Save the version code */
340 bzero((char *)vks, 4096); /* Clear all */
341
342 vks->interface_version = version; /* Set our version code */
343 vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */
344
345 return; /* Context is now flushed */
346}
347
1c79356b
A
348
349/*************************************************************************************
350 Virtual Machine Monitor Exported Functionality
351
352 The following routines are used to implement a quick-switch mechanism for
353 virtual machines that need to execute within their own processor envinroment
354 (including register and MMU state).
355**************************************************************************************/
356
357/*-----------------------------------------------------------------------
358** vmm_get_version
359**
360** This function returns the current version of the virtual machine
361** interface. It is divided into two portions. The top 16 bits
362** represent the major version number, and the bottom 16 bits
363** represent the minor version number. Clients using the Vmm
364** functionality should make sure they are using a verison new
365** enough for them.
366**
367** Inputs:
368** none
369**
370** Outputs:
371** 32-bit number representing major/minor version of
372** the Vmm module
373-----------------------------------------------------------------------*/
374
375int vmm_get_version(struct savearea *save)
376{
377 save->save_r3 = kVmmCurrentVersion; /* Return the version */
378 return 1;
379}
380
381
382/*-----------------------------------------------------------------------
383** Vmm_get_features
384**
385** This function returns a set of flags that represents the functionality
386** supported by the current verison of the Vmm interface. Clients should
387** use this to determine whether they can run on this system.
388**
389** Inputs:
390** none
391**
392** Outputs:
393** 32-bit number representing functionality supported by this
394** version of the Vmm module
395-----------------------------------------------------------------------*/
396
397int vmm_get_features(struct savearea *save)
398{
399 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
91447636 400 if(getPerProc()->pf.Available & pf64Bit) {
55e303ae
A
401 save->save_r3 &= ~kVmmFeature_LittleEndian; /* No little endian here */
402 save->save_r3 |= kVmmFeature_SixtyFourBit; /* Set that we can do 64-bit */
403 }
1c79356b
A
404 return 1;
405}
406
407
55e303ae
A
408/*-----------------------------------------------------------------------
409** vmm_max_addr
410**
411** This function returns the maximum addressable virtual address sported
412**
413** Outputs:
414** Returns max address
415-----------------------------------------------------------------------*/
416
91447636 417addr64_t vmm_max_addr(thread_t act)
55e303ae
A
418{
419 return vm_max_address; /* Return the maximum address */
420}
421
422/*-----------------------------------------------------------------------
423** vmm_get_XA
424**
425** This function retrieves the eXtended Architecture flags for the specifed VM.
426**
427** We need to return the result in the return code rather than in the return parameters
428** because we need an architecture independent format so the results are actually
429** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
430** 4 for 32-bit.
431**
432**
433** Inputs:
434** act - pointer to current thread activation structure
435** index - index returned by vmm_init_context
436**
437** Outputs:
438** Return code is set to the XA flags. If the index is invalid or the
439** context has not been created, we return 0.
440-----------------------------------------------------------------------*/
441
442unsigned int vmm_get_XA(
91447636 443 thread_t act,
55e303ae
A
444 vmm_thread_index_t index)
445{
446 vmmCntrlEntry *CEntry;
447
448 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
449 if (CEntry == NULL) return 0; /* Either this isn't a vmm or the index is bogus */
450
451 return CEntry->vmmXAFlgs; /* Return the flags */
452}
453
1c79356b
A
454/*-----------------------------------------------------------------------
455** vmm_init_context
456**
457** This function initializes an emulation context. It allocates
458** a new pmap (address space) and fills in the initial processor
459** state within the specified structure. The structure, mapped
460** into the client's logical address space, must be page-aligned.
461**
462** Inputs:
463** act - pointer to current thread activation
464** version - requested version of the Vmm interface (allowing
465** future versions of the interface to change, but still
466** support older clients)
467** vmm_user_state - pointer to a logical page within the
468** client's address space
469**
470** Outputs:
471** kernel return code indicating success or failure
472-----------------------------------------------------------------------*/
473
474int vmm_init_context(struct savearea *save)
475{
476
91447636 477 thread_t act;
1c79356b
A
478 vmm_version_t version;
479 vmm_state_page_t * vmm_user_state;
480 vmmCntrlTable *CTable;
481 vm_offset_t conkern;
482 vmm_state_page_t * vks;
55e303ae 483 ppnum_t conphys;
1c79356b 484 kern_return_t ret;
1c79356b
A
485 int cvi, i;
486 task_t task;
91447636 487 thread_t fact, gact;
1c79356b 488
55e303ae 489 vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4); /* Get the user address of the comm area */
1c79356b
A
490 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
491 save->save_r3 = KERN_FAILURE; /* Return failure */
492 return 1;
493 }
494
0b4e3aa0 495 /* Make sure that the version requested is supported */
1c79356b 496 version = save->save_r3; /* Pick up passed in version */
0b4e3aa0
A
497 if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
498 save->save_r3 = KERN_FAILURE; /* Return failure */
499 return 1;
1c79356b 500 }
0b4e3aa0
A
501
502 if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */
503 save->save_r3 = KERN_FAILURE; /* Return failure */
504 return 1;
505 }
506
91447636 507 act = current_thread(); /* Pick up our activation */
1c79356b
A
508
509 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
510
511 task = current_task(); /* Figure out who we are */
512
513 task_lock(task); /* Lock our task */
514
91447636 515 fact = (thread_t)task->threads.next; /* Get the first activation on task */
1c79356b
A
516 gact = 0; /* Pretend we didn't find it yet */
517
55e303ae 518 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
91447636 519 if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */
1c79356b
A
520 gact = fact; /* Yeah... */
521 break; /* Bail the loop... */
522 }
91447636 523 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
1c79356b
A
524 }
525
526
527/*
528 * We only allow one thread per task to be a virtual machine monitor right now. This solves
529 * a number of potential problems that I can't put my finger on right now.
530 *
531 * Utlimately, I think we want to move the controls and make all this task based instead of
532 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
533 * VM (if they want) rather than hand dispatch contexts.
534 */
535
536 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
537 task_unlock(task); /* Release task lock */
538 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
539 save->save_r3 = KERN_FAILURE; /* We must play alone... */
540 return 1;
541 }
542
91447636 543 if(!gact) act->machine.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
1c79356b
A
544
545 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
546
91447636 547 CTable = act->machine.vmmControl; /* Get the control table address */
1c79356b
A
548 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
549 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
91447636 550 act->machine.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
1c79356b
A
551 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
552 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
553 return 1;
554 }
555
556 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
91447636 557 act->machine.vmmControl = CTable; /* Initialize the table anchor */
1c79356b
A
558 }
559
55e303ae 560 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
1c79356b
A
561 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
562 }
563
55e303ae 564 if(cvi >= kVmmMaxContexts) { /* Did we find one? */
1c79356b
A
565 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
566 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
567 return 1;
568 }
569
570 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
571 act->map,
572 (vm_offset_t)vmm_user_state,
573 (vm_offset_t)vmm_user_state + PAGE_SIZE,
574 VM_PROT_READ | VM_PROT_WRITE,
575 FALSE);
576
577 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
578 goto return_in_shame;
579
580 /* Map the vmm state into the kernel's address space. */
55e303ae 581 conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state));
1c79356b
A
582
583 /* Find a virtual address to use. */
584 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
585 if (ret != KERN_SUCCESS) { /* Did we find an address? */
586 (void) vm_map_unwire(act->map, /* No, unwire the context area */
587 (vm_offset_t)vmm_user_state,
588 (vm_offset_t)vmm_user_state + PAGE_SIZE,
589 TRUE);
590 goto return_in_shame;
591 }
592
593 /* Map it into the kernel's address space. */
55e303ae 594
9bccf70c
A
595 pmap_enter(kernel_pmap, conkern, conphys,
596 VM_PROT_READ | VM_PROT_WRITE,
597 VM_WIMG_USE_DEFAULT, TRUE);
1c79356b
A
598
599 /* Clear the vmm state structure. */
600 vks = (vmm_state_page_t *)conkern;
601 bzero((char *)vks, PAGE_SIZE);
602
1c79356b
A
603
604 /* We're home free now. Simply fill in the necessary info and return. */
605
606 vks->interface_version = version; /* Set our version code */
607 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
608
609 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
1c79356b 610 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
91447636 611 CTable->vmmc[cvi].vmmContextPhys = conphys; /* Remember the state page physical addr */
1c79356b 612 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
9bccf70c
A
613
614 CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */
615 CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
616 CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
617 CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */
618 CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
619 CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
620 CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */
621
622 hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */
d7e50217 623
91447636 624 pmap_t hpmap = act->map->pmap; /* Get host pmap */
8f6c56a5 625 pmap_t gpmap = pmap_create(0); /* Make a fresh guest pmap */
91447636
A
626 if (gpmap) { /* Did we succeed ? */
627 CTable->vmmAdsp[cvi] = gpmap; /* Remember guest pmap for new context */
628 if (lowGlo.lgVMMforcedFeats & vmmGSA) { /* Forcing on guest shadow assist ? */
629 vmm_activate_gsa(act, cvi+1); /* Activate GSA */
630 }
631 } else {
632 ret = KERN_RESOURCE_SHORTAGE; /* We've failed to allocate a guest pmap */
633 goto return_in_shame; /* Shame on us. */
634 }
635
636 if (!(hpmap->pmapFlags & pmapVMhost)) { /* Do this stuff if this is our first time hosting */
637 hpmap->pmapFlags |= pmapVMhost; /* We're now hosting */
d7e50217 638 }
1c79356b
A
639
640 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
641 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
642 return 1;
643
644return_in_shame:
91447636
A
645 if(!gact) kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
646 act->machine.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
1c79356b
A
647 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
648 save->save_r3 = ret; /* Pass back return code... */
649 return 1;
650
651}
652
653
654/*-----------------------------------------------------------------------
655** vmm_tear_down_context
656**
657** This function uninitializes an emulation context. It deallocates
658** internal resources associated with the context block.
659**
660** Inputs:
661** act - pointer to current thread activation structure
662** index - index returned by vmm_init_context
663**
664** Outputs:
665** kernel return code indicating success or failure
55e303ae
A
666**
667** Strangeness note:
668** This call will also trash the address space with the same ID. While this
669** is really not too cool, we have to do it because we need to make
670** sure that old VMM users (not that we really have any) who depend upon
671** the address space going away with the context still work the same.
1c79356b
A
672-----------------------------------------------------------------------*/
673
674kern_return_t vmm_tear_down_context(
91447636 675 thread_t act,
1c79356b
A
676 vmm_thread_index_t index)
677{
678 vmmCntrlEntry *CEntry;
679 vmmCntrlTable *CTable;
680 int cvi;
681 register savearea *sv;
682
91447636
A
683 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
684 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1c79356b 685
91447636 686 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1c79356b 687
9bccf70c 688 hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */
1c79356b 689
91447636
A
690 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
691 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
9bccf70c 692 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
1c79356b
A
693 }
694
91447636
A
695 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
696 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
9bccf70c 697 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
1c79356b 698 }
55e303ae 699
91447636
A
700 CEntry->vmmPmap = 0; /* Remove this trace */
701 pmap_t gpmap = act->machine.vmmControl->vmmAdsp[index - 1];
702 /* Get context's guest pmap (if any) */
703 if (gpmap) { /* Check if there is an address space assigned here */
704 if (gpmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist case specially */
705 hw_rem_all_gv(gpmap); /* Remove all guest mappings from shadow hash table */
706 } else {
707 mapping_remove(gpmap, 0xFFFFFFFFFFFFF000LL);/* Remove final page explicitly because we might have mapped it */
708 pmap_remove(gpmap, 0, 0xFFFFFFFFFFFFF000LL);/* Remove all entries from this map */
709 }
710 pmap_destroy(gpmap); /* Toss the pmap for this context */
711 act->machine.vmmControl->vmmAdsp[index - 1] = NULL; /* Clean it up */
55e303ae 712 }
1c79356b
A
713
714 (void) vm_map_unwire( /* Unwire the user comm page */
715 act->map,
716 (vm_offset_t)CEntry->vmmContextUser,
717 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
718 FALSE);
719
720 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
721
91447636 722 CTable = act->machine.vmmControl; /* Get the control table address */
55e303ae
A
723 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
724
1c79356b 725 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
1c79356b
A
726 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
727 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
9bccf70c
A
728
729 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
730 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
731 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
732 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
733 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
734 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
735 CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */
1c79356b 736
55e303ae 737 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
1c79356b
A
738 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
739 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
740 return KERN_SUCCESS; /* Leave... */
741 }
742 }
743
55e303ae
A
744/*
745 * When we have tossed the last context, toss any address spaces left over before releasing
746 * the VMM control block
747 */
748
749 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
91447636
A
750 if(!act->machine.vmmControl->vmmAdsp[index - 1]) continue; /* Nothing to remove here */
751 mapping_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
752 pmap_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
753 pmap_destroy(act->machine.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */
754 act->machine.vmmControl->vmmAdsp[index - 1] = 0; /* Clear just in case */
755 }
756
757 pmap_t pmap = act->map->pmap; /* Get our pmap */
758 if (pmap->pmapVmmExt) { /* Release any VMM pmap extension block and shadow hash table */
759 vmm_release_shadow_hash(pmap->pmapVmmExt); /* Release extension block and shadow hash table */
760 pmap->pmapVmmExt = 0; /* Forget extension block */
761 pmap->pmapVmmExtPhys = 0; /* Forget extension block's physical address, too */
762 }
763 pmap->pmapFlags &= ~pmapVMhost; /* We're no longer hosting */
55e303ae 764
91447636
A
765 kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
766 act->machine.vmmControl = 0; /* Unmark us as vmm */
1c79356b
A
767
768 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
769
770 return KERN_SUCCESS;
771}
772
55e303ae
A
773
774/*-----------------------------------------------------------------------
91447636 775** vmm_activate_XA
55e303ae 776**
91447636 777** This function activates the eXtended Architecture flags for the specifed VM.
55e303ae
A
778**
779** We need to return the result in the return code rather than in the return parameters
780** because we need an architecture independent format so the results are actually
781** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
782** 4 for 32-bit.
783**
784** Note that this function does a lot of the same stuff as vmm_tear_down_context
785** and vmm_init_context.
786**
787** Inputs:
788** act - pointer to current thread activation structure
789** index - index returned by vmm_init_context
790** flags - the extended architecture flags
791**
792**
793** Outputs:
794** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
795** Also, the internal flags are set and, additionally, the VM is completely reset.
796-----------------------------------------------------------------------*/
91447636
A
797kern_return_t vmm_activate_XA(
798 thread_t act,
55e303ae
A
799 vmm_thread_index_t index,
800 unsigned int xaflags)
801{
802 vmmCntrlEntry *CEntry;
91447636 803 kern_return_t result = KERN_SUCCESS; /* Assume success */
55e303ae 804
91447636
A
805 if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (!getPerProc()->pf.Available & pf64Bit)))
806 return (KERN_FAILURE); /* Unknown or unsupported feature requested */
807
55e303ae
A
808 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
809 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
810
811 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
812
91447636 813 vmm_flush_context(act, index); /* Flush the context */
55e303ae 814
91447636
A
815 if (xaflags & vmm64Bit) { /* Activating 64-bit mode ? */
816 CEntry->vmmXAFlgs |= vmm64Bit; /* Activate 64-bit mode */
55e303ae 817 }
55e303ae 818
91447636
A
819 if (xaflags & vmmGSA) { /* Activating guest shadow assist ? */
820 result = vmm_activate_gsa(act, index); /* Activate guest shadow assist */
821 }
55e303ae 822
91447636
A
823 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
824
825 return result; /* Return activate result */
826}
55e303ae 827
91447636
A
828/*-----------------------------------------------------------------------
829** vmm_deactivate_XA
830**
831-----------------------------------------------------------------------*/
832kern_return_t vmm_deactivate_XA(
833 thread_t act,
834 vmm_thread_index_t index,
835 unsigned int xaflags)
836{
837 vmmCntrlEntry *CEntry;
838 kern_return_t result = KERN_SUCCESS; /* Assume success */
839
840 if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (getPerProc()->pf.Available & pf64Bit)))
841 return (KERN_FAILURE); /* Unknown or unsupported feature requested */
842
843 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
844 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
845
846 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
847
848 vmm_flush_context(act, index); /* Flush the context */
849
850 if (xaflags & vmm64Bit) { /* Deactivating 64-bit mode ? */
851 CEntry->vmmXAFlgs &= ~vmm64Bit; /* Deactivate 64-bit mode */
852 }
853
854 if (xaflags & vmmGSA) { /* Deactivating guest shadow assist ? */
855 vmm_deactivate_gsa(act, index); /* Deactivate guest shadow assist */
856 }
55e303ae
A
857
858 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
859
91447636 860 return result; /* Return deactivate result */
55e303ae
A
861}
862
863
1c79356b
A
864/*-----------------------------------------------------------------------
865** vmm_tear_down_all
866**
867** This function uninitializes all emulation contexts. If there are
868** any vmm contexts, it calls vmm_tear_down_context for each one.
869**
870** Note: this can also be called from normal thread termination. Because of
871** that, we will context switch out of an alternate if we are currenty in it.
872** It will be terminated with no valid return code set because we don't expect
873** the activation to ever run again.
874**
875** Inputs:
876** activation to tear down
877**
878** Outputs:
879** All vmm contexts released and VMM shut down
880-----------------------------------------------------------------------*/
91447636 881void vmm_tear_down_all(thread_t act) {
1c79356b
A
882
883 vmmCntrlTable *CTable;
884 int cvi;
885 kern_return_t ret;
886 savearea *save;
887 spl_t s;
888
91447636 889 if(act->machine.specFlags & runningVM) { /* Are we actually in a context right now? */
9bccf70c 890 save = find_user_regs(act); /* Find the user state context */
1c79356b
A
891 if(!save) { /* Did we find it? */
892 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
893 return;
894 }
895
0b4e3aa0 896 save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */
1c79356b
A
897 s = splhigh(); /* Make sure interrupts are off */
898 vmm_force_exit(act, save); /* Force and exit from VM state */
899 splx(s); /* Restore interrupts */
900 }
901
91447636 902 if(CTable = act->machine.vmmControl) { /* Do we have a vmm control block? */
1c79356b 903
55e303ae
A
904
905 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
1c79356b
A
906 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
907 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
908 if(ret != KERN_SUCCESS) { /* Did it go away? */
909 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
910 ret, act, cvi);
911 }
912 }
913 }
55e303ae
A
914
915/*
916 * Note that all address apces should be gone here.
917 */
91447636 918 if(act->machine.vmmControl) { /* Did we find one? */
1c79356b
A
919 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
920 }
921 }
922
923 return;
924}
925
926/*-----------------------------------------------------------------------
927** vmm_map_page
928**
929** This function maps a page from within the client's logical
55e303ae 930** address space into the alternate address space.
1c79356b
A
931**
932** The page need not be locked or resident. If not resident, it will be faulted
933** in by this code, which may take some time. Also, if the page is not locked,
934** it, and this mapping may disappear at any time, even before it gets used. Note also
935** that reference and change information is NOT preserved when a page is unmapped, either
936** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
937** space). This means that if RC is needed, the page MUST be wired.
938**
939** Note that if there is already a mapping at the address, it is removed and all
940** information (including RC) is lost BEFORE an attempt is made to map it. Also,
941** if the map call fails, the old address is still unmapped..
942**
943** Inputs:
944** act - pointer to current thread activation
55e303ae 945** index - index of address space to map into
1c79356b 946** va - virtual address within the client's address
0b4e3aa0 947** space
1c79356b 948** ava - virtual address within the alternate address
0b4e3aa0 949** space
1c79356b
A
950** prot - protection flags
951**
952** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
953** areas are not allowed and will fail. Same with directly mapped I/O areas.
954**
955** Input conditions:
956** Interrupts disabled (from fast trap)
957**
958** Outputs:
959** kernel return code indicating success or failure
960** if success, va resident and alternate mapping made
961-----------------------------------------------------------------------*/
962
963kern_return_t vmm_map_page(
91447636 964 thread_t act,
55e303ae
A
965 vmm_adsp_id_t index,
966 addr64_t cva,
967 addr64_t ava,
1c79356b
A
968 vm_prot_t prot)
969{
970 kern_return_t ret;
91447636 971 register mapping_t *mp;
1c79356b 972 vm_map_t map;
55e303ae
A
973 addr64_t ova, nextva;
974 pmap_t pmap;
975
91447636 976 pmap = vmm_get_adsp(act, index); /* Get the guest pmap for this address space */
55e303ae
A
977 if(!pmap) return KERN_FAILURE; /* Bogus address space, no VMs, or we can't make a pmap, failure... */
978
979 if(ava > vm_max_address) return kVmmInvalidAddress; /* Does the machine support an address of this size? */
1c79356b 980
91447636 981 map = current_thread()->map; /* Get the host's map */
1c79356b 982
91447636 983 if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist active ? */
8f6c56a5 984 ret = hw_res_map_gv(map->pmap, pmap, cva, ava, getProtPPC(prot));
91447636
A
985 /* Attempt to resume an existing gv->phys mapping */
986 if (mapRtOK != ret) { /* Nothing to resume, construct a new mapping */
987
988 while (1) { /* Find host mapping or fail */
989 mp = mapping_find(map->pmap, cva, &nextva, 0);
990 /* Attempt to find host mapping and pin it */
991 if (mp) break; /* Got it */
992
993 ml_set_interrupts_enabled(TRUE);
994 /* Open 'rupt window */
995 ret = vm_fault(map, /* Didn't find it, try to fault in host page read/write */
996 vm_map_trunc_page(cva),
997 VM_PROT_READ | VM_PROT_WRITE,
998 FALSE, /* change wiring */
999 THREAD_UNINT,
1000 NULL,
1001 0);
1002 ml_set_interrupts_enabled(FALSE);
1003 /* Close 'rupt window */
1004 if (ret != KERN_SUCCESS)
1005 return KERN_FAILURE; /* Fault failed, return failure */
1006 }
1007
1008 if (mpNormal != (mp->mpFlags & mpType)) {
1009 /* Host mapping must be a vanilla page */
1010 mapping_drop_busy(mp); /* Un-pin host mapping */
1011 return KERN_FAILURE; /* Return failure */
1012 }
1013
1014 /* Partially construct gv->phys mapping */
1015 unsigned int pindex;
1016 phys_entry_t *physent = mapping_phys_lookup(mp->mpPAddr, &pindex);
1017 if (!physent) {
1018 mapping_drop_busy(mp);
1019 return KERN_FAILURE;
1020 }
1021 unsigned int pattr = ((physent->ppLink & (ppI | ppG)) >> 60);
1022 unsigned int wimg = 0x2;
1023 if (pattr & mmFlgCInhib) wimg |= 0x4;
1024 if (pattr & mmFlgGuarded) wimg |= 0x1;
1025 unsigned int mflags = (pindex << 16) | mpGuest;
8f6c56a5 1026 addr64_t gva = ((ava & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot));
91447636
A
1027
1028 hw_add_map_gv(map->pmap, pmap, gva, mflags, mp->mpPAddr);
1029 /* Construct new guest->phys mapping */
1030
1031 mapping_drop_busy(mp); /* Un-pin host mapping */
1032 }
1033 } else {
1034 while(1) { /* Keep trying until we get it or until we fail */
1035
1036 mp = mapping_find(map->pmap, cva, &nextva, 0); /* Find the mapping for this address */
1037
1038 if(mp) break; /* We found it */
1039
1040 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
1041 ret = vm_fault(map, /* Didn't find it, try to fault it in read/write... */
1042 vm_map_trunc_page(cva),
1043 VM_PROT_READ | VM_PROT_WRITE,
1044 FALSE, /*change wiring */
1045 THREAD_UNINT,
1046 NULL,
1047 0);
1048 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
1049 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
1050 }
1051
1052 if((mp->mpFlags & mpType) != mpNormal) { /* If this is a block, a nest, or some other special thing, we can't map it */
1053 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1054 return KERN_FAILURE; /* Leave in shame */
1055 }
1c79356b 1056
91447636
A
1057 while(1) { /* Keep trying the enter until it goes in */
1058 ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */
1059 if(!ova) break; /* If there were no collisions, we are done... */
1060 mapping_remove(pmap, ova); /* Remove the mapping that collided */
1061 }
de355530 1062
91447636 1063 mapping_drop_busy(mp); /* We have everything we need from the mapping */
55e303ae
A
1064 }
1065
91447636
A
1066 if (!((getPerProc()->spcFlags) & FamVMmode)) {
1067 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1068 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
55e303ae 1069 }
1c79356b
A
1070
1071 return KERN_SUCCESS;
1072}
1073
1074
0b4e3aa0
A
1075/*-----------------------------------------------------------------------
1076** vmm_map_execute
1077**
1078** This function maps a page from within the client's logical
1079** address space into the alternate address space of the
1080** Virtual Machine Monitor context and then directly starts executing.
1081**
1082** See description of vmm_map_page for details.
1083**
55e303ae
A
1084** Inputs:
1085** Index is used for both the context and the address space ID.
1086** index[24:31] is the context id and index[16:23] is the address space.
1087** if the address space ID is 0, the context ID is used for it.
1088**
0b4e3aa0
A
1089** Outputs:
1090** Normal exit is to run the VM. Abnormal exit is triggered via a
1091** non-KERN_SUCCESS return from vmm_map_page or later during the
1092** attempt to transition into the VM.
1093-----------------------------------------------------------------------*/
1094
1095vmm_return_code_t vmm_map_execute(
91447636 1096 thread_t act,
0b4e3aa0 1097 vmm_thread_index_t index,
55e303ae
A
1098 addr64_t cva,
1099 addr64_t ava,
0b4e3aa0
A
1100 vm_prot_t prot)
1101{
1102 kern_return_t ret;
1103 vmmCntrlEntry *CEntry;
55e303ae
A
1104 unsigned int adsp;
1105 vmm_thread_index_t cndx;
0b4e3aa0 1106
55e303ae 1107 cndx = index & 0xFF; /* Clean it up */
0b4e3aa0 1108
55e303ae 1109 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
0b4e3aa0
A
1110 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1111
91447636 1112 if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
d7e50217
A
1113 return kVmmBogusContext; /* Yes, invalid index in Fam */
1114
55e303ae
A
1115 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1116 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
1117
1118 ret = vmm_map_page(act, adsp, cva, ava, prot); /* Go try to map the page on in */
1119
0b4e3aa0 1120
d7e50217 1121 if(ret == KERN_SUCCESS) {
91447636
A
1122 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1123 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
55e303ae 1124 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
d7e50217
A
1125 }
1126
55e303ae 1127 return ret; /* We had trouble mapping in the page */
0b4e3aa0
A
1128
1129}
1130
9bccf70c
A
1131/*-----------------------------------------------------------------------
1132** vmm_map_list
1133**
55e303ae 1134** This function maps a list of pages into various address spaces
9bccf70c
A
1135**
1136** Inputs:
1137** act - pointer to current thread activation
55e303ae 1138** index - index of default address space (used if not specifed in list entry
9bccf70c 1139** count - number of pages to release
55e303ae 1140** flavor - 0 if 32-bit version, 1 if 64-bit
9bccf70c
A
1141** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
1142**
1143** Outputs:
1144** kernel return code indicating success or failure
1145** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1146** or the vmm_map_page call fails.
55e303ae 1147** We return kVmmInvalidAddress if virtual address size is not supported
9bccf70c
A
1148-----------------------------------------------------------------------*/
1149
1150kern_return_t vmm_map_list(
91447636 1151 thread_t act,
55e303ae
A
1152 vmm_adsp_id_t index,
1153 unsigned int cnt,
1154 unsigned int flavor)
9bccf70c
A
1155{
1156 vmmCntrlEntry *CEntry;
1157 boolean_t ret;
1158 unsigned int i;
55e303ae
A
1159 vmmMList *lst;
1160 vmmMList64 *lstx;
1161 addr64_t cva;
1162 addr64_t ava;
9bccf70c 1163 vm_prot_t prot;
55e303ae 1164 vmm_adsp_id_t adsp;
9bccf70c 1165
55e303ae
A
1166 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1167 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
9bccf70c
A
1168
1169 if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */
1170 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
1171
55e303ae
A
1172 lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
1173 lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
9bccf70c
A
1174
1175 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
55e303ae
A
1176 if(flavor) { /* Check if 32- or 64-bit addresses */
1177 cva = lstx[i].vmlva; /* Get the 64-bit actual address */
1178 ava = lstx[i].vmlava; /* Get the 64-bit guest address */
1179 }
1180 else {
1181 cva = lst[i].vmlva; /* Get the 32-bit actual address */
1182 ava = lst[i].vmlava; /* Get the 32-bit guest address */
1183 }
1184
1185 prot = ava & vmmlProt; /* Extract the protection bits */
1186 adsp = (ava & vmmlAdID) >> 4; /* Extract an explicit address space request */
1187 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
1188 ava = ava &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1189
9bccf70c 1190 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
55e303ae 1191 if(ret != KERN_SUCCESS) return ret; /* Bail if any error */
9bccf70c
A
1192 }
1193
1194 return KERN_SUCCESS ; /* Return... */
1195}
1196
1c79356b
A
1197/*-----------------------------------------------------------------------
1198** vmm_get_page_mapping
1199**
91447636
A
1200** Given a context index and a guest virtual address, convert the address
1201** to its corresponding host virtual address.
1c79356b
A
1202**
1203** Inputs:
1204** act - pointer to current thread activation
91447636
A
1205** index - context index
1206** gva - guest virtual address
1c79356b
A
1207**
1208** Outputs:
91447636 1209** Host virtual address (page aligned) or -1 if not mapped or any failure
1c79356b
A
1210**
1211** Note:
91447636
A
1212** If the host address space contains multiple virtual addresses mapping
1213** to the physical address corresponding to the specified guest virtual
1214** address (i.e., host virtual aliases), it is unpredictable which host
1215** virtual address (alias) will be returned. Moral of the story: No host
1216** virtual aliases.
1c79356b
A
1217-----------------------------------------------------------------------*/
1218
55e303ae 1219addr64_t vmm_get_page_mapping(
91447636 1220 thread_t act,
55e303ae 1221 vmm_adsp_id_t index,
91447636 1222 addr64_t gva)
1c79356b 1223{
91447636 1224 register mapping_t *mp;
1c79356b 1225 pmap_t pmap;
91447636 1226 addr64_t nextva, hva;
55e303ae 1227 ppnum_t pa;
1c79356b 1228
91447636
A
1229 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
1230 if (!pmap)return -1; /* No good, failure... */
55e303ae 1231
91447636
A
1232 if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist (GSA) active ? */
1233 return (hw_gva_to_hva(pmap, gva)); /* Convert guest to host virtual address */
1234 } else {
1235 mp = mapping_find(pmap, gva, &nextva, 0); /* Find guest mapping for this virtual address */
1236
1237 if(!mp) return -1; /* Not mapped, return -1 */
1c79356b 1238
91447636 1239 pa = mp->mpPAddr; /* Remember the physical page address */
55e303ae 1240
91447636 1241 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
de355530 1242
91447636
A
1243 pmap = current_thread()->map->pmap; /* Get the host pmap */
1244 hva = mapping_p2v(pmap, pa); /* Now find the source virtual */
1c79356b 1245
91447636 1246 if(hva != 0) return hva; /* We found it... */
1c79356b 1247
91447636
A
1248 panic("vmm_get_page_mapping: could not back-map guest va (%016llX)\n", gva);
1249 /* We are bad wrong if we can't find it */
1c79356b 1250
91447636
A
1251 return -1; /* Never executed, prevents compiler warning */
1252 }
1c79356b
A
1253}
1254
1255/*-----------------------------------------------------------------------
1256** vmm_unmap_page
1257**
91447636 1258** This function unmaps a page from the guest address space.
1c79356b
A
1259**
1260** Inputs:
1261** act - pointer to current thread activation
1262** index - index of vmm state for this page
1263** va - virtual address within the vmm's address
1264** space
1265**
1266** Outputs:
1267** kernel return code indicating success or failure
1268-----------------------------------------------------------------------*/
1269
1270kern_return_t vmm_unmap_page(
91447636 1271 thread_t act,
55e303ae
A
1272 vmm_adsp_id_t index,
1273 addr64_t va)
1c79356b
A
1274{
1275 vmmCntrlEntry *CEntry;
55e303ae
A
1276 addr64_t nadd;
1277 pmap_t pmap;
1c79356b 1278
55e303ae
A
1279 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
1280 if (!pmap)return -1; /* No good, failure... */
1c79356b 1281
91447636
A
1282 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1283 hw_susp_map_gv(act->map->pmap, pmap, va); /* Suspend the mapping */
1284 return (KERN_SUCCESS); /* Always returns success */
1285 } else {
1286 nadd = mapping_remove(pmap, va); /* Toss the mapping */
1287
1288 return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS); /* Return... */
1289 }
1c79356b
A
1290}
1291
9bccf70c
A
1292/*-----------------------------------------------------------------------
1293** vmm_unmap_list
1294**
1295** This function unmaps a list of pages from the alternate's logical
1296** address space.
1297**
1298** Inputs:
1299** act - pointer to current thread activation
1300** index - index of vmm state for this page
1301** count - number of pages to release
55e303ae 1302** flavor - 0 if 32-bit, 1 if 64-bit
9bccf70c
A
1303** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
1304**
1305** Outputs:
1306** kernel return code indicating success or failure
1307** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1308-----------------------------------------------------------------------*/
1309
1310kern_return_t vmm_unmap_list(
91447636 1311 thread_t act,
55e303ae
A
1312 vmm_adsp_id_t index,
1313 unsigned int cnt,
1314 unsigned int flavor)
9bccf70c
A
1315{
1316 vmmCntrlEntry *CEntry;
1317 boolean_t ret;
1318 kern_return_t kern_result = KERN_SUCCESS;
1319 unsigned int *pgaddr, i;
55e303ae
A
1320 addr64_t gva;
1321 vmmUMList *lst;
1322 vmmUMList64 *lstx;
1323 pmap_t pmap;
1324 int adsp;
9bccf70c 1325
55e303ae
A
1326 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1327 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
9bccf70c 1328
55e303ae
A
1329 if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */
1330 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
9bccf70c 1331
55e303ae 1332 lst = (vmmUMList *)lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
de355530 1333
55e303ae
A
1334 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
1335 if(flavor) { /* Check if 32- or 64-bit addresses */
1336 gva = lstx[i].vmlava; /* Get the 64-bit guest address */
1337 }
1338 else {
1339 gva = lst[i].vmlava; /* Get the 32-bit guest address */
1340 }
1341
1342 adsp = (gva & vmmlAdID) >> 4; /* Extract an explicit address space request */
1343 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
91447636 1344 pmap = act->machine.vmmControl->vmmAdsp[adsp]; /* Get the pmap for this request */
55e303ae
A
1345 if(!pmap) continue; /* Ain't nuthin' mapped here, no durn map... */
1346
91447636
A
1347 gva = gva &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1348 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1349 hw_susp_map_gv(act->map->pmap, pmap, gva);
1350 /* Suspend the mapping */
1351 } else {
1352 (void)mapping_remove(pmap, gva); /* Toss the mapping */
1353 }
9bccf70c
A
1354 }
1355
55e303ae 1356 return KERN_SUCCESS ; /* Return... */
9bccf70c
A
1357}
1358
1c79356b
A
1359/*-----------------------------------------------------------------------
1360** vmm_unmap_all_pages
1361**
1362** This function unmaps all pages from the alternates's logical
1363** address space.
1364**
1365** Inputs:
1366** act - pointer to current thread activation
1367** index - index of context state
1368**
1369** Outputs:
1370** none
1371**
1372** Note:
1373** All pages are unmapped, but the address space (i.e., pmap) is still alive
1374-----------------------------------------------------------------------*/
1375
1376void vmm_unmap_all_pages(
91447636 1377 thread_t act,
55e303ae 1378 vmm_adsp_id_t index)
1c79356b
A
1379{
1380 vmmCntrlEntry *CEntry;
55e303ae 1381 pmap_t pmap;
1c79356b 1382
55e303ae
A
1383 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1384 if (!pmap) return; /* Either this isn't vmm thread or the index is bogus */
91447636
A
1385
1386 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1387 hw_rem_all_gv(pmap); /* Remove all guest's mappings from shadow hash table */
1388 } else {
1389 /*
1390 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
1391 */
1392 mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
1393 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
1394 }
1c79356b
A
1395 return;
1396}
1397
1398
1399/*-----------------------------------------------------------------------
1400** vmm_get_page_dirty_flag
1401**
1402** This function returns the changed flag of the page
1403** and optionally clears clears the flag.
1404**
1405** Inputs:
1406** act - pointer to current thread activation
1407** index - index of vmm state for this page
1408** va - virtual address within the vmm's address
1409** space
1410** reset - Clears dirty if true, untouched if not
1411**
1412** Outputs:
1413** the dirty bit
1414** clears the dirty bit in the pte if requested
1415**
1416** Note:
1417** The RC bits are merged into the global physical entry
1418-----------------------------------------------------------------------*/
1419
1420boolean_t vmm_get_page_dirty_flag(
91447636 1421 thread_t act,
55e303ae
A
1422 vmm_adsp_id_t index,
1423 addr64_t va,
1c79356b
A
1424 unsigned int reset)
1425{
1426 vmmCntrlEntry *CEntry;
91447636 1427 register mapping_t *mpv, *mp;
1c79356b 1428 unsigned int RC;
55e303ae 1429 pmap_t pmap;
1c79356b 1430
55e303ae
A
1431 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1432 if (!pmap) return 1; /* Either this isn't vmm thread or the index is bogus */
91447636
A
1433
1434 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1435 RC = hw_test_rc_gv(act->map->pmap, pmap, va, reset);/* Fetch the RC bits and clear if requested */
1436 } else {
1437 RC = hw_test_rc(pmap, (addr64_t)va, reset); /* Fetch the RC bits and clear if requested */
1438 }
1c79356b 1439
55e303ae
A
1440 switch (RC & mapRetCode) { /* Decode return code */
1441
1442 case mapRtOK: /* Changed */
1443 return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */
1444 break;
1445
1446 case mapRtNotFnd: /* Didn't find it */
1447 return 1; /* Return dirty */
1448 break;
1449
1450 default:
1451 panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC, pmap, va);
1452
1453 }
de355530 1454
55e303ae 1455 return 1; /* Return the change bit */
1c79356b
A
1456}
1457
0b4e3aa0
A
1458
1459/*-----------------------------------------------------------------------
1460** vmm_protect_page
1461**
1462** This function sets the protection bits of a mapped page
1463**
1464** Inputs:
1465** act - pointer to current thread activation
1466** index - index of vmm state for this page
1467** va - virtual address within the vmm's address
1468** space
1469** prot - Protection flags
1470**
1471** Outputs:
1472** none
1473** Protection bits of the mapping are modifed
1474**
1475-----------------------------------------------------------------------*/
1476
1477kern_return_t vmm_protect_page(
91447636 1478 thread_t act,
55e303ae
A
1479 vmm_adsp_id_t index,
1480 addr64_t va,
0b4e3aa0
A
1481 vm_prot_t prot)
1482{
1483 vmmCntrlEntry *CEntry;
55e303ae
A
1484 addr64_t nextva;
1485 int ret;
1486 pmap_t pmap;
0b4e3aa0 1487
55e303ae
A
1488 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1489 if (!pmap) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
d7e50217 1490
91447636
A
1491 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1492 ret = hw_protect_gv(pmap, va, prot); /* Try to change protection, GSA varient */
1493 } else {
1494 ret = hw_protect(pmap, va, prot, &nextva); /* Try to change protection */
1495 }
0b4e3aa0 1496
55e303ae
A
1497 switch (ret) { /* Decode return code */
1498
1499 case mapRtOK: /* All ok... */
1500 break; /* Outta here */
1501
1502 case mapRtNotFnd: /* Didn't find it */
1503 return KERN_SUCCESS; /* Ok, return... */
1504 break;
1505
1506 default:
1507 panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, (addr64_t)va);
1508
1509 }
de355530 1510
91447636
A
1511 if (!((getPerProc()->spcFlags) & FamVMmode)) {
1512 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1513 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
55e303ae 1514 }
0b4e3aa0
A
1515
1516 return KERN_SUCCESS; /* Return */
1517}
1518
1519
1520/*-----------------------------------------------------------------------
1521** vmm_protect_execute
1522**
1523** This function sets the protection bits of a mapped page
1524** and then directly starts executing.
1525**
55e303ae
A
1526** See description of vmm_protect_page for details
1527**
1528** Inputs:
1529** See vmm_protect_page and vmm_map_execute
0b4e3aa0
A
1530**
1531** Outputs:
1532** Normal exit is to run the VM. Abnormal exit is triggered via a
1533** non-KERN_SUCCESS return from vmm_map_page or later during the
1534** attempt to transition into the VM.
1535-----------------------------------------------------------------------*/
1536
1537vmm_return_code_t vmm_protect_execute(
91447636 1538 thread_t act,
0b4e3aa0 1539 vmm_thread_index_t index,
55e303ae 1540 addr64_t va,
0b4e3aa0
A
1541 vm_prot_t prot)
1542{
1543 kern_return_t ret;
1544 vmmCntrlEntry *CEntry;
55e303ae
A
1545 unsigned int adsp;
1546 vmm_thread_index_t cndx;
0b4e3aa0 1547
55e303ae
A
1548 cndx = index & 0xFF; /* Clean it up */
1549 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
1550 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1551
1552 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1553 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
0b4e3aa0 1554
91447636 1555 if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
d7e50217
A
1556 return kVmmBogusContext; /* Yes, invalid index in Fam */
1557
55e303ae 1558 ret = vmm_protect_page(act, adsp, va, prot); /* Go try to change access */
d7e50217
A
1559
1560 if(ret == KERN_SUCCESS) {
91447636
A
1561 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1562 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
55e303ae 1563 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
d7e50217 1564 }
0b4e3aa0 1565
55e303ae 1566 return ret; /* We had trouble of some kind (shouldn't happen) */
0b4e3aa0
A
1567
1568}
1569
1570
1c79356b
A
1571/*-----------------------------------------------------------------------
1572** vmm_get_float_state
1573**
1574** This function causes the current floating point state to
1575** be saved into the shared context area. It also clears the
1576** vmmFloatCngd changed flag.
1577**
1578** Inputs:
1579** act - pointer to current thread activation structure
1580** index - index returned by vmm_init_context
1581**
1582** Outputs:
1583** context saved
1584-----------------------------------------------------------------------*/
1585
1586kern_return_t vmm_get_float_state(
91447636 1587 thread_t act,
1c79356b
A
1588 vmm_thread_index_t index)
1589{
1590 vmmCntrlEntry *CEntry;
1591 vmmCntrlTable *CTable;
1592 int i;
9bccf70c 1593 register struct savearea_fpu *sv;
1c79356b
A
1594
1595 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1596 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1597
91447636 1598 act->machine.specFlags &= ~floatCng; /* Clear the special flag */
1c79356b 1599 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
9bccf70c
A
1600
1601 fpu_save(&CEntry->vmmFacCtx); /* Save context if live */
1602
9bccf70c
A
1603 if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */
1604 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */
1c79356b
A
1605 return KERN_SUCCESS;
1606 }
1607
1c79356b 1608
9bccf70c 1609 for(i = 0; i < 32; i++) { /* Initialize floating points */
1c79356b
A
1610 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
1611 }
1612
1613 return KERN_SUCCESS;
1614}
1615
1616/*-----------------------------------------------------------------------
1617** vmm_get_vector_state
1618**
1619** This function causes the current vector state to
1620** be saved into the shared context area. It also clears the
1621** vmmVectorCngd changed flag.
1622**
1623** Inputs:
1624** act - pointer to current thread activation structure
1625** index - index returned by vmm_init_context
1626**
1627** Outputs:
1628** context saved
1629-----------------------------------------------------------------------*/
1630
1631kern_return_t vmm_get_vector_state(
91447636 1632 thread_t act,
1c79356b
A
1633 vmm_thread_index_t index)
1634{
1635 vmmCntrlEntry *CEntry;
1636 vmmCntrlTable *CTable;
1637 int i, j;
1638 unsigned int vrvalidwrk;
9bccf70c 1639 register struct savearea_vec *sv;
1c79356b
A
1640
1641 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1642 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
9bccf70c
A
1643
1644 vec_save(&CEntry->vmmFacCtx); /* Save context if live */
1c79356b 1645
91447636 1646 act->machine.specFlags &= ~vectorCng; /* Clear the special flag */
1c79356b
A
1647 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
1648
9bccf70c 1649 if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */
1c79356b
A
1650
1651 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
1652
1c79356b
A
1653 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
1654 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
1655 for(j = 0; j < 4; j++) { /* If so, copy it over */
1656 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
1657 }
1658 }
1659 else {
1660 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
1661 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
1662 }
1663 }
1664
1665 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
1666
1667 }
1668
1669 return KERN_SUCCESS;
1670 }
1671
1c79356b
A
1672 for(i = 0; i < 32; i++) { /* Initialize vector registers */
1673 for(j=0; j < 4; j++) { /* Do words */
1674 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
1675 }
1676 }
1677
1678 return KERN_SUCCESS;
1679}
1680
1681/*-----------------------------------------------------------------------
1682** vmm_set_timer
1683**
1684** This function causes a timer (in AbsoluteTime) for a specific time
1685** to be set It also clears the vmmTimerPop flag if the timer is actually
1686** set, it is cleared otherwise.
1687**
1688** A timer is cleared by setting setting the time to 0. This will clear
1689** the vmmTimerPop bit. Simply setting the timer to earlier than the
1690** current time clears the internal timer request, but leaves the
1691** vmmTimerPop flag set.
1692**
1693**
1694** Inputs:
1695** act - pointer to current thread activation structure
1696** index - index returned by vmm_init_context
1697** timerhi - high order word of AbsoluteTime to pop
1698** timerlo - low order word of AbsoluteTime to pop
1699**
1700** Outputs:
1701** timer set, vmmTimerPop cleared
1702-----------------------------------------------------------------------*/
1703
1704kern_return_t vmm_set_timer(
91447636 1705 thread_t act,
1c79356b
A
1706 vmm_thread_index_t index,
1707 unsigned int timerhi,
1708 unsigned int timerlo)
1709{
1710 vmmCntrlEntry *CEntry;
1711
1712 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1713 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1714
0b4e3aa0 1715 CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
1c79356b
A
1716
1717 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
1718 return KERN_SUCCESS; /* Leave now... */
1719}
1720
1721
1722/*-----------------------------------------------------------------------
1723** vmm_get_timer
1724**
1725** This function causes the timer for a specified VM to be
1726** returned in return_params[0] and return_params[1].
55e303ae
A
1727** Note that this is kind of funky for 64-bit VMs because we
1728** split the timer into two parts so that we still set parms 0 and 1.
1729** Obviously, we don't need to do this because the parms are 8 bytes
1730** wide.
1c79356b
A
1731**
1732**
1733** Inputs:
1734** act - pointer to current thread activation structure
1735** index - index returned by vmm_init_context
1736**
1737** Outputs:
1738** Timer value set in return_params[0] and return_params[1].
1739** Set to 0 if timer is not set.
1740-----------------------------------------------------------------------*/
1741
1742kern_return_t vmm_get_timer(
91447636 1743 thread_t act,
1c79356b
A
1744 vmm_thread_index_t index)
1745{
1746 vmmCntrlEntry *CEntry;
1747 vmmCntrlTable *CTable;
1748
1749 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1750 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1751
55e303ae
A
1752 if(CEntry->vmmXAFlgs & vmm64Bit) { /* A 64-bit virtual machine? */
1753 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32); /* Return the last timer value */
1754 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1755 }
1756 else {
1757 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */
1758 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1759 }
1c79356b
A
1760 return KERN_SUCCESS;
1761}
1762
1763
1c79356b
A
1764/*-----------------------------------------------------------------------
1765** vmm_timer_pop
1766**
1767** This function causes all timers in the array of VMs to be updated.
1768** All appropriate flags are set or reset. If a VM is currently
1769** running and its timer expired, it is intercepted.
1770**
1771** The qactTimer value is set to the lowest unexpired timer. It is
1772** zeroed if all timers are expired or have been reset.
1773**
1774** Inputs:
1775** act - pointer to current thread activation structure
1776**
1777** Outputs:
1778** timers set, vmmTimerPop cleared or set
1779-----------------------------------------------------------------------*/
1780
1781void vmm_timer_pop(
91447636 1782 thread_t act)
1c79356b
A
1783{
1784 vmmCntrlEntry *CEntry;
1785 vmmCntrlTable *CTable;
1786 int cvi, any;
0b4e3aa0 1787 uint64_t now, soonest;
1c79356b
A
1788 savearea *sv;
1789
91447636 1790 if(!((unsigned int)act->machine.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
1c79356b
A
1791 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
1792 }
1793
0b4e3aa0 1794 soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */
1c79356b 1795
0b4e3aa0 1796 clock_get_uptime(&now); /* What time is it? */
1c79356b 1797
91447636 1798 CTable = act->machine.vmmControl; /* Make this easier */
1c79356b
A
1799 any = 0; /* Haven't found a running unexpired timer yet */
1800
55e303ae 1801 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Cycle through all and check time now */
1c79356b
A
1802
1803 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
1804
9bccf70c 1805 if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */
1c79356b
A
1806 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
1807 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
1808 continue; /* Check next */
1809 }
1810
0b4e3aa0 1811 if (CTable->vmmc[cvi].vmmTimer <= now) {
1c79356b
A
1812 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
1813 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
91447636 1814 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->machine.vmmCEntry) { /* Is this the running VM? */
9bccf70c 1815 sv = find_user_regs(act); /* Get the user state registers */
1c79356b
A
1816 if(!sv) { /* Did we find something? */
1817 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1818 }
0b4e3aa0 1819 sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */
1c79356b
A
1820 vmm_force_exit(act, sv); /* Intercept a running VM */
1821 }
1822 continue; /* Check the rest */
1823 }
1824 else { /* It hasn't popped yet */
1825 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1826 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1827 }
1828
1829 any = 1; /* Show we found an active unexpired timer */
1830
0b4e3aa0
A
1831 if (CTable->vmmc[cvi].vmmTimer < soonest)
1832 soonest = CTable->vmmc[cvi].vmmTimer;
1c79356b
A
1833 }
1834
1835 if(any) {
91447636
A
1836 if (act->machine.qactTimer == 0 || soonest <= act->machine.qactTimer)
1837 act->machine.qactTimer = soonest; /* Set lowest timer */
0b4e3aa0
A
1838 }
1839
1840 return;
1841}
1842
1843
1844
1845/*-----------------------------------------------------------------------
1846** vmm_stop_vm
1847**
1848** This function prevents the specified VM(s) to from running.
1849** If any is currently executing, the execution is intercepted
1850** with a code of kVmmStopped. Note that execution of the VM is
1851** blocked until a vmmExecuteVM is called with the start flag set to 1.
1852** This provides the ability for a thread to stop execution of a VM and
1853** insure that it will not be run until the emulator has processed the
1854** "virtual" interruption.
1855**
1856** Inputs:
1857** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1858** NOTE: if this mask is all 0s, any executing VM is intercepted with
1859* a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1860** note that there is a potential race here and the VM may not stop.
1861**
1862** Outputs:
1863** kernel return code indicating success
1864** or if no VMs are enabled, an invalid syscall exception.
1865-----------------------------------------------------------------------*/
1866
1867int vmm_stop_vm(struct savearea *save)
1868{
1869
91447636 1870 thread_t act;
0b4e3aa0
A
1871 vmmCntrlTable *CTable;
1872 int cvi, i;
1873 task_t task;
91447636 1874 thread_t fact;
0b4e3aa0
A
1875 unsigned int vmmask;
1876 ReturnHandler *stopapc;
1877
1878 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1879
1880 task = current_task(); /* Figure out who we are */
1881
1882 task_lock(task); /* Lock our task */
1883
91447636 1884 fact = (thread_t)task->threads.next; /* Get the first activation on task */
0b4e3aa0
A
1885 act = 0; /* Pretend we didn't find it yet */
1886
55e303ae 1887 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
91447636 1888 if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */
0b4e3aa0
A
1889 act = fact; /* Yeah... */
1890 break; /* Bail the loop... */
1891 }
91447636 1892 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
0b4e3aa0
A
1893 }
1894
1895 if(!((unsigned int)act)) { /* See if we have VMMs yet */
1896 task_unlock(task); /* No, unlock the task */
1897 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1898 return 0; /* Go generate a syscall exception */
1899 }
1900
91447636
A
1901 thread_reference(act);
1902
0b4e3aa0
A
1903 task_unlock(task); /* Safe to release now */
1904
91447636
A
1905 thread_mtx_lock(act);
1906
1907 CTable = act->machine.vmmControl; /* Get the pointer to the table */
0b4e3aa0
A
1908
1909 if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */
91447636
A
1910 thread_mtx_unlock(act); /* Unlock the activation */
1911 thread_deallocate(act);
0b4e3aa0
A
1912 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1913 return 0; /* Go generate a syscall exception */
1914 }
1915
1916 if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */
91447636
A
1917 thread_mtx_unlock(act); /* Unlock the activation */
1918 thread_deallocate(act);
0b4e3aa0
A
1919 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1920 save->save_r3 = KERN_SUCCESS; /* Set success */
1921 return 1; /* Return... */
1922 }
1923
55e303ae 1924 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search slots */
0b4e3aa0
A
1925 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */
1926 hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */
1927 }
1928 vmmask = vmmask << 1; /* Slide mask over */
1929 }
1930
91447636
A
1931 if(hw_compare_and_store(0, 1, &act->machine.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */
1932 thread_mtx_unlock(act); /* Already one pending, unlock the activation */
1933 thread_deallocate(act);
0b4e3aa0
A
1934 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1935 save->save_r3 = KERN_SUCCESS; /* Say we did it... */
1936 return 1; /* Leave */
1937 }
1938
1939 if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */
91447636
A
1940 act->machine.emPendRupts = 0; /* No memory, say we have given up request */
1941 thread_mtx_unlock(act); /* Unlock the activation */
1942 thread_deallocate(act);
0b4e3aa0
A
1943 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1944 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
1945 return 1; /* Return... */
1946 }
1947
1948 ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1949
1950 stopapc->handler = vmm_interrupt; /* Set interruption routine */
1951
1952 stopapc->next = act->handlers; /* Put our interrupt at the start of the list */
1953 act->handlers = stopapc; /* Point to us */
1954
1955 act_set_apc(act); /* Set an APC AST */
1956 ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */
1957
91447636
A
1958 thread_mtx_unlock(act); /* Unlock the activation */
1959 thread_deallocate(act);
0b4e3aa0
A
1960
1961 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1962 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
1963 return 1;
1964}
1965
1966/*-----------------------------------------------------------------------
1967** vmm_interrupt
1968**
1969** This function is executed asynchronously from an APC AST.
1970** It is to be used for anything that needs to interrupt a running VM.
1971** This include any kind of interruption generation (other than timer pop)
1972** or entering the stopped state.
1973**
1974** Inputs:
1975** ReturnHandler *rh - the return handler control block as required by the APC.
91447636 1976** thread_t act - the activation
0b4e3aa0
A
1977**
1978** Outputs:
1979** Whatever needed to be done is done.
1980-----------------------------------------------------------------------*/
1981
91447636 1982void vmm_interrupt(ReturnHandler *rh, thread_t act) {
0b4e3aa0
A
1983
1984 vmmCntrlTable *CTable;
1985 savearea *sv;
1986 boolean_t inter;
1987
1988
1989
91447636 1990 kfree(rh, sizeof(ReturnHandler)); /* Release the return handler block */
0b4e3aa0
A
1991
1992 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1993
91447636
A
1994 act->machine.emPendRupts = 0; /* Say that there are no more interrupts pending */
1995 CTable = act->machine.vmmControl; /* Get the pointer to the table */
0b4e3aa0
A
1996
1997 if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */
1998
91447636 1999 if(act->machine.vmmCEntry && (act->machine.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */
9bccf70c 2000 sv = find_user_regs(act); /* Get the user state registers */
0b4e3aa0
A
2001 if(!sv) { /* Did we find something? */
2002 panic("vmm_interrupt: no user context; act = %08X\n", act);
1c79356b 2003 }
0b4e3aa0
A
2004 sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */
2005 vmm_force_exit(act, sv); /* Intercept a running VM */
1c79356b 2006 }
0b4e3aa0 2007 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
1c79356b
A
2008
2009 return;
2010}