]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/vmachmon.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*-----------------------------------------------------------------------
23** vmachmon.c
24**
25** C routines that we are adding to the MacOS X kernel.
26**
1c79356b
A
27-----------------------------------------------------------------------*/
28
29#include <mach/mach_types.h>
30#include <mach/kern_return.h>
31#include <mach/host_info.h>
32#include <kern/kern_types.h>
91447636 33#include <kern/kalloc.h>
1c79356b
A
34#include <kern/host.h>
35#include <kern/task.h>
36#include <kern/thread.h>
37#include <ppc/exception.h>
38#include <ppc/mappings.h>
91447636 39#include <ppc/thread.h>
1c79356b 40#include <vm/vm_kern.h>
91447636 41#include <vm/vm_fault.h>
1c79356b
A
42
43#include <ppc/vmachmon.h>
91447636 44#include <ppc/lowglobals.h>
1c79356b 45
1c79356b
A
46extern double FloatInit;
47extern unsigned long QNaNbarbarian[4];
48
49/*************************************************************************************
50 Virtual Machine Monitor Internal Routines
51**************************************************************************************/
52
53/*-----------------------------------------------------------------------
54** vmm_get_entry
55**
56** This function verifies and return a vmm context entry index
57**
58** Inputs:
59** act - pointer to current thread activation
60** index - index into vmm control table (this is a "one based" value)
61**
62** Outputs:
63** address of a vmmCntrlEntry or 0 if not found
64-----------------------------------------------------------------------*/
65
91447636
A
66static vmmCntrlEntry *vmm_get_entry(
67 thread_t act,
1c79356b
A
68 vmm_thread_index_t index)
69{
70 vmmCntrlTable *CTable;
71 vmmCntrlEntry *CEntry;
72
55e303ae
A
73 index = index & vmmTInum; /* Clean up the index */
74
91447636 75 if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */
55e303ae 76 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
1c79356b 77
91447636 78 CTable = act->machine.vmmControl; /* Make the address a bit more convienient */
1c79356b
A
79 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
80
81 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
82
83 return CEntry;
84}
85
55e303ae
A
86/*-----------------------------------------------------------------------
87** vmm_get_adsp
88**
89** This function verifies and returns the pmap for an address space.
90** If there is none and the request is valid, a pmap will be created.
91**
92** Inputs:
93** act - pointer to current thread activation
94** index - index into vmm control table (this is a "one based" value)
95**
96** Outputs:
97** address of a pmap or 0 if not found or could no be created
98** Note that if there is no pmap for the address space it will be created.
99-----------------------------------------------------------------------*/
100
91447636 101static pmap_t vmm_get_adsp(thread_t act, vmm_thread_index_t index)
55e303ae
A
102{
103 pmap_t pmap;
104
91447636 105 if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */
55e303ae
A
106 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
107
91447636
A
108 pmap = act->machine.vmmControl->vmmAdsp[index - 1]; /* Get the pmap */
109 return (pmap); /* and return it. */
110}
55e303ae 111
91447636
A
112/*-----------------------------------------------------------------------
113** vmm_build_shadow_hash
114**
115** Allocate and initialize a shadow hash table.
116**
117** This function assumes that PAGE_SIZE is 4k-bytes.
118**
119-----------------------------------------------------------------------*/
120static pmap_vmm_ext *vmm_build_shadow_hash(pmap_t pmap)
121{
122 pmap_vmm_ext *ext; /* VMM pmap extension we're building */
123 ppnum_t extPP; /* VMM pmap extension physical page number */
124 kern_return_t ret; /* Return code from various calls */
125 uint32_t pages = GV_HPAGES; /* Number of pages in the hash table */
126 vm_offset_t free = VMX_HPIDX_OFFSET; /* Offset into extension page of free area (128-byte aligned) */
127 uint32_t freeSize = PAGE_SIZE - free; /* Number of free bytes in the extension page */
128
129 if ((pages * sizeof(addr64_t)) + (pages * sizeof(vm_offset_t)) > freeSize) {
130 panic("vmm_build_shadow_hash: too little pmap_vmm_ext free space\n");
131 }
132
133 ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&ext, PAGE_SIZE);
134 /* Allocate a page-sized extension block */
135 if (ret != KERN_SUCCESS) return (NULL); /* Return NULL for failed allocate */
136 bzero((char *)ext, PAGE_SIZE); /* Zero the entire extension block page */
137
138 extPP = pmap_find_phys(kernel_pmap, (vm_offset_t)ext);
139 /* Get extension block's physical page number */
140 if (!extPP) { /* This should not fail, but then again... */
141 panic("vmm_build_shadow_hash: could not translate pmap_vmm_ext vaddr %08X\n", ext);
142 }
143
144 ext->vmxSalt = (addr64_t)(vm_offset_t)ext ^ ptoa_64(extPP);
145 /* Set effective<->physical conversion salt */
146 ext->vmxHostPmapPhys = (addr64_t)(vm_offset_t)pmap ^ pmap->pmapvr;
147 /* Set host pmap's physical address */
148 ext->vmxHostPmap = pmap; /* Set host pmap's effective address */
149 ext->vmxHashPgIdx = (addr64_t *)((vm_offset_t)ext + VMX_HPIDX_OFFSET);
150 /* Allocate physical index */
151 ext->vmxHashPgList = (vm_offset_t *)((vm_offset_t)ext + VMX_HPLIST_OFFSET);
152 /* Allocate page list */
153 ext->vmxActiveBitmap = (vm_offset_t *)((vm_offset_t)ext + VMX_ACTMAP_OFFSET);
154 /* Allocate active mapping bitmap */
155
156 /* The hash table is typically larger than a single page, but we don't require it to be in a
157 contiguous virtual or physical chunk. So, we allocate it page by page, noting the effective and
158 physical address of each page in vmxHashPgList and vmxHashPgIdx, respectively. */
159 uint32_t idx;
160 for (idx = 0; idx < pages; idx++) {
161 ret = kmem_alloc_wired(kernel_map, &ext->vmxHashPgList[idx], PAGE_SIZE);
162 /* Allocate a hash-table page */
163 if (ret != KERN_SUCCESS) goto fail; /* Allocation failed, exit through cleanup */
164 bzero((char *)ext->vmxHashPgList[idx], PAGE_SIZE); /* Zero the page */
165 ext->vmxHashPgIdx[idx] = ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)ext->vmxHashPgList[idx]));
166 /* Put page's physical address into index */
167 if (!ext->vmxHashPgIdx[idx]) { /* Hash-table page's LRA failed */
168 panic("vmm_build_shadow_hash: could not translate hash-table vaddr %08X\n", ext->vmxHashPgList[idx]);
169 }
170 mapping_t *map = (mapping_t *)ext->vmxHashPgList[idx];
171 uint32_t mapIdx;
172 for (mapIdx = 0; mapIdx < GV_SLTS_PPG; mapIdx++) { /* Iterate over mappings in this page */
173 map->mpFlags = (mpGuest | mpgFree); /* Mark guest type and free */
174 map = (mapping_t *)((char *)map + GV_SLOT_SZ); /* Next slot-sized mapping */
175 }
176 }
177
178 return (ext); /* Return newly-minted VMM pmap extension */
179
180fail:
181 for (idx = 0; idx < pages; idx++) { /* De-allocate any pages we managed to allocate */
182 if (ext->vmxHashPgList[idx]) {
183 kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
184 }
185 }
186 kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
187 return (NULL); /* Return NULL for failure */
188}
189
190
191/*-----------------------------------------------------------------------
192** vmm_release_shadow_hash
193**
194** Release shadow hash table and VMM extension block
195**
196-----------------------------------------------------------------------*/
197static void vmm_release_shadow_hash(pmap_vmm_ext *ext)
198{
199 uint32_t idx;
200
201 for (idx = 0; idx < GV_HPAGES; idx++) { /* Release the hash table page by page */
202 kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
203 }
204
205 kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
206}
207
208/*-----------------------------------------------------------------------
209** vmm_activate_gsa
210**
211** Activate guest shadow assist
212**
213-----------------------------------------------------------------------*/
214static kern_return_t vmm_activate_gsa(
215 thread_t act,
216 vmm_thread_index_t index)
217{
218 vmmCntrlTable *CTable = act->machine.vmmControl; /* Get VMM control table */
219 if (!CTable) { /* Caller guarantees that this will work */
220 panic("vmm_activate_gsa: VMM control table not present; act = %08X, idx = %d\n",
221 act, index);
222 return KERN_FAILURE;
223 }
224 vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
225 if (!CEntry) { /* Caller guarantees that this will work */
226 panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
227 act, index);
228 return KERN_FAILURE;
229 }
230
231 pmap_t hpmap = act->map->pmap; /* Get host pmap */
232 pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
233 if (!gpmap) { /* Caller guarantees that this will work */
234 panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
235 act, index);
236 return KERN_FAILURE;
237 }
238
239 if (!hpmap->pmapVmmExt) { /* If there's no VMM extension for this host, create one */
240 hpmap->pmapVmmExt = vmm_build_shadow_hash(hpmap); /* Build VMM extension plus shadow hash and attach */
241 if (hpmap->pmapVmmExt) { /* See if we succeeded */
242 hpmap->pmapVmmExtPhys = (addr64_t)(vm_offset_t)hpmap->pmapVmmExt ^ hpmap->pmapVmmExt->vmxSalt;
243 /* Get VMM extensions block physical address */
244 } else {
245 return KERN_RESOURCE_SHORTAGE; /* Not enough mojo to go */
246 }
247 }
248 gpmap->pmapVmmExt = hpmap->pmapVmmExt; /* Copy VMM extension block virtual address into guest */
249 gpmap->pmapVmmExtPhys = hpmap->pmapVmmExtPhys; /* and its physical address, too */
250 gpmap->pmapFlags |= pmapVMgsaa; /* Enable GSA for this guest */
251 CEntry->vmmXAFlgs |= vmmGSA; /* Show GSA active here, too */
252
253 return KERN_SUCCESS;
254}
255
256
257/*-----------------------------------------------------------------------
258** vmm_deactivate_gsa
259**
260** Deactivate guest shadow assist
261**
262-----------------------------------------------------------------------*/
263static void vmm_deactivate_gsa(
264 thread_t act,
265 vmm_thread_index_t index)
266{
267 vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
268 if (!CEntry) { /* Caller guarantees that this will work */
269 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
270 act, index);
271 return KERN_FAILURE;
272 }
273
274 pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
275 if (!gpmap) { /* Caller guarantees that this will work */
276 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
277 act, index);
278 return KERN_FAILURE;
279 }
280
281 gpmap->pmapFlags &= ~pmapVMgsaa; /* Deactivate GSA for this guest */
282 CEntry->vmmXAFlgs &= ~vmmGSA; /* Show GSA deactivated here, too */
55e303ae
A
283}
284
1c79356b 285
91447636
A
286/*-----------------------------------------------------------------------
287** vmm_flush_context
288**
289** Flush specified guest context, purging all guest mappings and clearing
290** the context page.
291**
292-----------------------------------------------------------------------*/
293static void vmm_flush_context(
294 thread_t act,
295 vmm_thread_index_t index)
296{
297 vmmCntrlEntry *CEntry;
298 vmmCntrlTable *CTable;
299 vmm_state_page_t *vks;
300 vmm_version_t version;
301
302 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
303 if (!CEntry) { /* Caller guarantees that this will work */
304 panic("vmm_flush_context: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
305 act, index);
306 return;
307 }
308
309 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
310 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
311 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
312 }
313
314 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
315 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
316 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
317 }
318
319 vmm_unmap_all_pages(act, index); /* Blow away all mappings for this context */
320
321 CTable = act->machine.vmmControl; /* Get the control table address */
322 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
323
324 CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */
325 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
326 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
327 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
328 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
329 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
330 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
331
332 vks = CEntry->vmmContextKern; /* Get address of the context page */
333 version = vks->interface_version; /* Save the version code */
334 bzero((char *)vks, 4096); /* Clear all */
335
336 vks->interface_version = version; /* Set our version code */
337 vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */
338
339 return; /* Context is now flushed */
340}
341
1c79356b
A
342
343/*************************************************************************************
344 Virtual Machine Monitor Exported Functionality
345
346 The following routines are used to implement a quick-switch mechanism for
347 virtual machines that need to execute within their own processor envinroment
348 (including register and MMU state).
349**************************************************************************************/
350
351/*-----------------------------------------------------------------------
352** vmm_get_version
353**
354** This function returns the current version of the virtual machine
355** interface. It is divided into two portions. The top 16 bits
356** represent the major version number, and the bottom 16 bits
357** represent the minor version number. Clients using the Vmm
358** functionality should make sure they are using a verison new
359** enough for them.
360**
361** Inputs:
362** none
363**
364** Outputs:
365** 32-bit number representing major/minor version of
366** the Vmm module
367-----------------------------------------------------------------------*/
368
369int vmm_get_version(struct savearea *save)
370{
371 save->save_r3 = kVmmCurrentVersion; /* Return the version */
372 return 1;
373}
374
375
376/*-----------------------------------------------------------------------
377** Vmm_get_features
378**
379** This function returns a set of flags that represents the functionality
380** supported by the current verison of the Vmm interface. Clients should
381** use this to determine whether they can run on this system.
382**
383** Inputs:
384** none
385**
386** Outputs:
387** 32-bit number representing functionality supported by this
388** version of the Vmm module
389-----------------------------------------------------------------------*/
390
391int vmm_get_features(struct savearea *save)
392{
393 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
91447636 394 if(getPerProc()->pf.Available & pf64Bit) {
55e303ae
A
395 save->save_r3 &= ~kVmmFeature_LittleEndian; /* No little endian here */
396 save->save_r3 |= kVmmFeature_SixtyFourBit; /* Set that we can do 64-bit */
397 }
1c79356b
A
398 return 1;
399}
400
401
55e303ae
A
402/*-----------------------------------------------------------------------
403** vmm_max_addr
404**
405** This function returns the maximum addressable virtual address sported
406**
407** Outputs:
408** Returns max address
409-----------------------------------------------------------------------*/
410
91447636 411addr64_t vmm_max_addr(thread_t act)
55e303ae
A
412{
413 return vm_max_address; /* Return the maximum address */
414}
415
416/*-----------------------------------------------------------------------
417** vmm_get_XA
418**
419** This function retrieves the eXtended Architecture flags for the specifed VM.
420**
421** We need to return the result in the return code rather than in the return parameters
422** because we need an architecture independent format so the results are actually
423** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
424** 4 for 32-bit.
425**
426**
427** Inputs:
428** act - pointer to current thread activation structure
429** index - index returned by vmm_init_context
430**
431** Outputs:
432** Return code is set to the XA flags. If the index is invalid or the
433** context has not been created, we return 0.
434-----------------------------------------------------------------------*/
435
436unsigned int vmm_get_XA(
91447636 437 thread_t act,
55e303ae
A
438 vmm_thread_index_t index)
439{
440 vmmCntrlEntry *CEntry;
441
442 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
443 if (CEntry == NULL) return 0; /* Either this isn't a vmm or the index is bogus */
444
445 return CEntry->vmmXAFlgs; /* Return the flags */
446}
447
1c79356b
A
448/*-----------------------------------------------------------------------
449** vmm_init_context
450**
451** This function initializes an emulation context. It allocates
452** a new pmap (address space) and fills in the initial processor
453** state within the specified structure. The structure, mapped
454** into the client's logical address space, must be page-aligned.
455**
456** Inputs:
457** act - pointer to current thread activation
458** version - requested version of the Vmm interface (allowing
459** future versions of the interface to change, but still
460** support older clients)
461** vmm_user_state - pointer to a logical page within the
462** client's address space
463**
464** Outputs:
465** kernel return code indicating success or failure
466-----------------------------------------------------------------------*/
467
468int vmm_init_context(struct savearea *save)
469{
470
91447636 471 thread_t act;
1c79356b
A
472 vmm_version_t version;
473 vmm_state_page_t * vmm_user_state;
474 vmmCntrlTable *CTable;
475 vm_offset_t conkern;
476 vmm_state_page_t * vks;
55e303ae 477 ppnum_t conphys;
1c79356b 478 kern_return_t ret;
1c79356b
A
479 int cvi, i;
480 task_t task;
91447636 481 thread_t fact, gact;
1c79356b 482
55e303ae 483 vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4); /* Get the user address of the comm area */
1c79356b
A
484 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
485 save->save_r3 = KERN_FAILURE; /* Return failure */
486 return 1;
487 }
488
0b4e3aa0 489 /* Make sure that the version requested is supported */
1c79356b 490 version = save->save_r3; /* Pick up passed in version */
0b4e3aa0
A
491 if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
492 save->save_r3 = KERN_FAILURE; /* Return failure */
493 return 1;
1c79356b 494 }
0b4e3aa0
A
495
496 if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */
497 save->save_r3 = KERN_FAILURE; /* Return failure */
498 return 1;
499 }
500
91447636 501 act = current_thread(); /* Pick up our activation */
1c79356b
A
502
503 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
504
505 task = current_task(); /* Figure out who we are */
506
507 task_lock(task); /* Lock our task */
508
91447636 509 fact = (thread_t)task->threads.next; /* Get the first activation on task */
1c79356b
A
510 gact = 0; /* Pretend we didn't find it yet */
511
55e303ae 512 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
91447636 513 if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */
1c79356b
A
514 gact = fact; /* Yeah... */
515 break; /* Bail the loop... */
516 }
91447636 517 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
1c79356b
A
518 }
519
520
521/*
522 * We only allow one thread per task to be a virtual machine monitor right now. This solves
523 * a number of potential problems that I can't put my finger on right now.
524 *
525 * Utlimately, I think we want to move the controls and make all this task based instead of
526 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
527 * VM (if they want) rather than hand dispatch contexts.
528 */
529
530 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
531 task_unlock(task); /* Release task lock */
532 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
533 save->save_r3 = KERN_FAILURE; /* We must play alone... */
534 return 1;
535 }
536
91447636 537 if(!gact) act->machine.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
1c79356b
A
538
539 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
540
91447636 541 CTable = act->machine.vmmControl; /* Get the control table address */
1c79356b
A
542 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
543 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
91447636 544 act->machine.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
1c79356b
A
545 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
546 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
547 return 1;
548 }
549
550 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
91447636 551 act->machine.vmmControl = CTable; /* Initialize the table anchor */
1c79356b
A
552 }
553
55e303ae 554 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
1c79356b
A
555 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
556 }
557
55e303ae 558 if(cvi >= kVmmMaxContexts) { /* Did we find one? */
1c79356b
A
559 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
560 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
561 return 1;
562 }
563
564 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
565 act->map,
566 (vm_offset_t)vmm_user_state,
567 (vm_offset_t)vmm_user_state + PAGE_SIZE,
568 VM_PROT_READ | VM_PROT_WRITE,
569 FALSE);
570
571 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
572 goto return_in_shame;
573
574 /* Map the vmm state into the kernel's address space. */
55e303ae 575 conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state));
1c79356b
A
576
577 /* Find a virtual address to use. */
578 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
579 if (ret != KERN_SUCCESS) { /* Did we find an address? */
580 (void) vm_map_unwire(act->map, /* No, unwire the context area */
581 (vm_offset_t)vmm_user_state,
582 (vm_offset_t)vmm_user_state + PAGE_SIZE,
583 TRUE);
584 goto return_in_shame;
585 }
586
587 /* Map it into the kernel's address space. */
55e303ae 588
9bccf70c
A
589 pmap_enter(kernel_pmap, conkern, conphys,
590 VM_PROT_READ | VM_PROT_WRITE,
591 VM_WIMG_USE_DEFAULT, TRUE);
1c79356b
A
592
593 /* Clear the vmm state structure. */
594 vks = (vmm_state_page_t *)conkern;
595 bzero((char *)vks, PAGE_SIZE);
596
1c79356b
A
597
598 /* We're home free now. Simply fill in the necessary info and return. */
599
600 vks->interface_version = version; /* Set our version code */
601 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
602
603 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
1c79356b 604 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
91447636 605 CTable->vmmc[cvi].vmmContextPhys = conphys; /* Remember the state page physical addr */
1c79356b 606 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
9bccf70c
A
607
608 CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */
609 CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
610 CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
611 CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */
612 CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
613 CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
614 CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */
615
616 hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */
d7e50217 617
91447636 618 pmap_t hpmap = act->map->pmap; /* Get host pmap */
c0fea474 619 pmap_t gpmap = pmap_create(0, FALSE); /* Make a fresh guest pmap */
91447636
A
620 if (gpmap) { /* Did we succeed ? */
621 CTable->vmmAdsp[cvi] = gpmap; /* Remember guest pmap for new context */
622 if (lowGlo.lgVMMforcedFeats & vmmGSA) { /* Forcing on guest shadow assist ? */
623 vmm_activate_gsa(act, cvi+1); /* Activate GSA */
624 }
625 } else {
626 ret = KERN_RESOURCE_SHORTAGE; /* We've failed to allocate a guest pmap */
627 goto return_in_shame; /* Shame on us. */
628 }
629
630 if (!(hpmap->pmapFlags & pmapVMhost)) { /* Do this stuff if this is our first time hosting */
631 hpmap->pmapFlags |= pmapVMhost; /* We're now hosting */
d7e50217 632 }
1c79356b
A
633
634 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
635 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
636 return 1;
637
638return_in_shame:
91447636
A
639 if(!gact) kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
640 act->machine.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
1c79356b
A
641 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
642 save->save_r3 = ret; /* Pass back return code... */
643 return 1;
644
645}
646
647
648/*-----------------------------------------------------------------------
649** vmm_tear_down_context
650**
651** This function uninitializes an emulation context. It deallocates
652** internal resources associated with the context block.
653**
654** Inputs:
655** act - pointer to current thread activation structure
656** index - index returned by vmm_init_context
657**
658** Outputs:
659** kernel return code indicating success or failure
55e303ae
A
660**
661** Strangeness note:
662** This call will also trash the address space with the same ID. While this
663** is really not too cool, we have to do it because we need to make
664** sure that old VMM users (not that we really have any) who depend upon
665** the address space going away with the context still work the same.
1c79356b
A
666-----------------------------------------------------------------------*/
667
668kern_return_t vmm_tear_down_context(
91447636 669 thread_t act,
1c79356b
A
670 vmm_thread_index_t index)
671{
672 vmmCntrlEntry *CEntry;
673 vmmCntrlTable *CTable;
674 int cvi;
675 register savearea *sv;
676
91447636
A
677 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
678 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1c79356b 679
91447636 680 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1c79356b 681
9bccf70c 682 hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */
1c79356b 683
91447636
A
684 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
685 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
9bccf70c 686 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
1c79356b
A
687 }
688
91447636
A
689 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
690 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
9bccf70c 691 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
1c79356b 692 }
55e303ae 693
91447636
A
694 CEntry->vmmPmap = 0; /* Remove this trace */
695 pmap_t gpmap = act->machine.vmmControl->vmmAdsp[index - 1];
696 /* Get context's guest pmap (if any) */
697 if (gpmap) { /* Check if there is an address space assigned here */
698 if (gpmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist case specially */
699 hw_rem_all_gv(gpmap); /* Remove all guest mappings from shadow hash table */
700 } else {
701 mapping_remove(gpmap, 0xFFFFFFFFFFFFF000LL);/* Remove final page explicitly because we might have mapped it */
702 pmap_remove(gpmap, 0, 0xFFFFFFFFFFFFF000LL);/* Remove all entries from this map */
703 }
704 pmap_destroy(gpmap); /* Toss the pmap for this context */
705 act->machine.vmmControl->vmmAdsp[index - 1] = NULL; /* Clean it up */
55e303ae 706 }
1c79356b
A
707
708 (void) vm_map_unwire( /* Unwire the user comm page */
709 act->map,
710 (vm_offset_t)CEntry->vmmContextUser,
711 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
712 FALSE);
713
714 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
715
91447636 716 CTable = act->machine.vmmControl; /* Get the control table address */
55e303ae
A
717 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
718
1c79356b 719 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
1c79356b
A
720 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
721 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
9bccf70c
A
722
723 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
724 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
725 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
726 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
727 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
728 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
729 CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */
1c79356b 730
55e303ae 731 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
1c79356b
A
732 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
733 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
734 return KERN_SUCCESS; /* Leave... */
735 }
736 }
737
55e303ae
A
738/*
739 * When we have tossed the last context, toss any address spaces left over before releasing
740 * the VMM control block
741 */
742
743 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
91447636
A
744 if(!act->machine.vmmControl->vmmAdsp[index - 1]) continue; /* Nothing to remove here */
745 mapping_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
746 pmap_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
747 pmap_destroy(act->machine.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */
748 act->machine.vmmControl->vmmAdsp[index - 1] = 0; /* Clear just in case */
749 }
750
751 pmap_t pmap = act->map->pmap; /* Get our pmap */
752 if (pmap->pmapVmmExt) { /* Release any VMM pmap extension block and shadow hash table */
753 vmm_release_shadow_hash(pmap->pmapVmmExt); /* Release extension block and shadow hash table */
754 pmap->pmapVmmExt = 0; /* Forget extension block */
755 pmap->pmapVmmExtPhys = 0; /* Forget extension block's physical address, too */
756 }
757 pmap->pmapFlags &= ~pmapVMhost; /* We're no longer hosting */
55e303ae 758
91447636
A
759 kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
760 act->machine.vmmControl = 0; /* Unmark us as vmm */
1c79356b
A
761
762 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
763
764 return KERN_SUCCESS;
765}
766
55e303ae
A
767
768/*-----------------------------------------------------------------------
91447636 769** vmm_activate_XA
55e303ae 770**
91447636 771** This function activates the eXtended Architecture flags for the specifed VM.
55e303ae
A
772**
773** We need to return the result in the return code rather than in the return parameters
774** because we need an architecture independent format so the results are actually
775** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
776** 4 for 32-bit.
777**
778** Note that this function does a lot of the same stuff as vmm_tear_down_context
779** and vmm_init_context.
780**
781** Inputs:
782** act - pointer to current thread activation structure
783** index - index returned by vmm_init_context
784** flags - the extended architecture flags
785**
786**
787** Outputs:
788** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
789** Also, the internal flags are set and, additionally, the VM is completely reset.
790-----------------------------------------------------------------------*/
91447636
A
791kern_return_t vmm_activate_XA(
792 thread_t act,
55e303ae
A
793 vmm_thread_index_t index,
794 unsigned int xaflags)
795{
796 vmmCntrlEntry *CEntry;
91447636 797 kern_return_t result = KERN_SUCCESS; /* Assume success */
55e303ae 798
91447636
A
799 if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (!getPerProc()->pf.Available & pf64Bit)))
800 return (KERN_FAILURE); /* Unknown or unsupported feature requested */
801
55e303ae
A
802 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
803 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
804
805 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
806
91447636 807 vmm_flush_context(act, index); /* Flush the context */
55e303ae 808
91447636
A
809 if (xaflags & vmm64Bit) { /* Activating 64-bit mode ? */
810 CEntry->vmmXAFlgs |= vmm64Bit; /* Activate 64-bit mode */
55e303ae 811 }
55e303ae 812
91447636
A
813 if (xaflags & vmmGSA) { /* Activating guest shadow assist ? */
814 result = vmm_activate_gsa(act, index); /* Activate guest shadow assist */
815 }
55e303ae 816
91447636
A
817 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
818
819 return result; /* Return activate result */
820}
55e303ae 821
91447636
A
822/*-----------------------------------------------------------------------
823** vmm_deactivate_XA
824**
825-----------------------------------------------------------------------*/
826kern_return_t vmm_deactivate_XA(
827 thread_t act,
828 vmm_thread_index_t index,
829 unsigned int xaflags)
830{
831 vmmCntrlEntry *CEntry;
832 kern_return_t result = KERN_SUCCESS; /* Assume success */
833
834 if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (getPerProc()->pf.Available & pf64Bit)))
835 return (KERN_FAILURE); /* Unknown or unsupported feature requested */
836
837 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
838 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
839
840 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
841
842 vmm_flush_context(act, index); /* Flush the context */
843
844 if (xaflags & vmm64Bit) { /* Deactivating 64-bit mode ? */
845 CEntry->vmmXAFlgs &= ~vmm64Bit; /* Deactivate 64-bit mode */
846 }
847
848 if (xaflags & vmmGSA) { /* Deactivating guest shadow assist ? */
849 vmm_deactivate_gsa(act, index); /* Deactivate guest shadow assist */
850 }
55e303ae
A
851
852 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
853
91447636 854 return result; /* Return deactivate result */
55e303ae
A
855}
856
857
1c79356b
A
858/*-----------------------------------------------------------------------
859** vmm_tear_down_all
860**
861** This function uninitializes all emulation contexts. If there are
862** any vmm contexts, it calls vmm_tear_down_context for each one.
863**
864** Note: this can also be called from normal thread termination. Because of
865** that, we will context switch out of an alternate if we are currenty in it.
866** It will be terminated with no valid return code set because we don't expect
867** the activation to ever run again.
868**
869** Inputs:
870** activation to tear down
871**
872** Outputs:
873** All vmm contexts released and VMM shut down
874-----------------------------------------------------------------------*/
91447636 875void vmm_tear_down_all(thread_t act) {
1c79356b
A
876
877 vmmCntrlTable *CTable;
878 int cvi;
879 kern_return_t ret;
880 savearea *save;
881 spl_t s;
882
91447636 883 if(act->machine.specFlags & runningVM) { /* Are we actually in a context right now? */
9bccf70c 884 save = find_user_regs(act); /* Find the user state context */
1c79356b
A
885 if(!save) { /* Did we find it? */
886 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
887 return;
888 }
889
0b4e3aa0 890 save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */
1c79356b
A
891 s = splhigh(); /* Make sure interrupts are off */
892 vmm_force_exit(act, save); /* Force and exit from VM state */
893 splx(s); /* Restore interrupts */
894 }
895
91447636 896 if(CTable = act->machine.vmmControl) { /* Do we have a vmm control block? */
1c79356b 897
55e303ae
A
898
899 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
1c79356b
A
900 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
901 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
902 if(ret != KERN_SUCCESS) { /* Did it go away? */
903 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
904 ret, act, cvi);
905 }
906 }
907 }
55e303ae
A
908
909/*
910 * Note that all address apces should be gone here.
911 */
91447636 912 if(act->machine.vmmControl) { /* Did we find one? */
1c79356b
A
913 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
914 }
915 }
916
917 return;
918}
919
920/*-----------------------------------------------------------------------
921** vmm_map_page
922**
923** This function maps a page from within the client's logical
55e303ae 924** address space into the alternate address space.
1c79356b
A
925**
926** The page need not be locked or resident. If not resident, it will be faulted
927** in by this code, which may take some time. Also, if the page is not locked,
928** it, and this mapping may disappear at any time, even before it gets used. Note also
929** that reference and change information is NOT preserved when a page is unmapped, either
930** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
931** space). This means that if RC is needed, the page MUST be wired.
932**
933** Note that if there is already a mapping at the address, it is removed and all
934** information (including RC) is lost BEFORE an attempt is made to map it. Also,
935** if the map call fails, the old address is still unmapped..
936**
937** Inputs:
938** act - pointer to current thread activation
55e303ae 939** index - index of address space to map into
1c79356b 940** va - virtual address within the client's address
0b4e3aa0 941** space
1c79356b 942** ava - virtual address within the alternate address
0b4e3aa0 943** space
1c79356b
A
944** prot - protection flags
945**
946** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
947** areas are not allowed and will fail. Same with directly mapped I/O areas.
948**
949** Input conditions:
950** Interrupts disabled (from fast trap)
951**
952** Outputs:
953** kernel return code indicating success or failure
954** if success, va resident and alternate mapping made
955-----------------------------------------------------------------------*/
956
957kern_return_t vmm_map_page(
91447636 958 thread_t act,
55e303ae
A
959 vmm_adsp_id_t index,
960 addr64_t cva,
961 addr64_t ava,
1c79356b
A
962 vm_prot_t prot)
963{
964 kern_return_t ret;
91447636 965 register mapping_t *mp;
1c79356b 966 vm_map_t map;
55e303ae
A
967 addr64_t ova, nextva;
968 pmap_t pmap;
969
91447636 970 pmap = vmm_get_adsp(act, index); /* Get the guest pmap for this address space */
55e303ae
A
971 if(!pmap) return KERN_FAILURE; /* Bogus address space, no VMs, or we can't make a pmap, failure... */
972
973 if(ava > vm_max_address) return kVmmInvalidAddress; /* Does the machine support an address of this size? */
1c79356b 974
91447636 975 map = current_thread()->map; /* Get the host's map */
1c79356b 976
91447636 977 if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist active ? */
c0fea474 978 ret = hw_res_map_gv(map->pmap, pmap, cva, ava, getProtPPC(prot, TRUE));
91447636
A
979 /* Attempt to resume an existing gv->phys mapping */
980 if (mapRtOK != ret) { /* Nothing to resume, construct a new mapping */
981
982 while (1) { /* Find host mapping or fail */
983 mp = mapping_find(map->pmap, cva, &nextva, 0);
984 /* Attempt to find host mapping and pin it */
985 if (mp) break; /* Got it */
986
987 ml_set_interrupts_enabled(TRUE);
988 /* Open 'rupt window */
989 ret = vm_fault(map, /* Didn't find it, try to fault in host page read/write */
990 vm_map_trunc_page(cva),
991 VM_PROT_READ | VM_PROT_WRITE,
992 FALSE, /* change wiring */
993 THREAD_UNINT,
994 NULL,
995 0);
996 ml_set_interrupts_enabled(FALSE);
997 /* Close 'rupt window */
998 if (ret != KERN_SUCCESS)
999 return KERN_FAILURE; /* Fault failed, return failure */
1000 }
1001
1002 if (mpNormal != (mp->mpFlags & mpType)) {
1003 /* Host mapping must be a vanilla page */
1004 mapping_drop_busy(mp); /* Un-pin host mapping */
1005 return KERN_FAILURE; /* Return failure */
1006 }
1007
1008 /* Partially construct gv->phys mapping */
1009 unsigned int pindex;
1010 phys_entry_t *physent = mapping_phys_lookup(mp->mpPAddr, &pindex);
1011 if (!physent) {
1012 mapping_drop_busy(mp);
1013 return KERN_FAILURE;
1014 }
1015 unsigned int pattr = ((physent->ppLink & (ppI | ppG)) >> 60);
1016 unsigned int wimg = 0x2;
1017 if (pattr & mmFlgCInhib) wimg |= 0x4;
1018 if (pattr & mmFlgGuarded) wimg |= 0x1;
1019 unsigned int mflags = (pindex << 16) | mpGuest;
c0fea474 1020 addr64_t gva = ((ava & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot, TRUE));
91447636
A
1021
1022 hw_add_map_gv(map->pmap, pmap, gva, mflags, mp->mpPAddr);
1023 /* Construct new guest->phys mapping */
1024
1025 mapping_drop_busy(mp); /* Un-pin host mapping */
1026 }
1027 } else {
1028 while(1) { /* Keep trying until we get it or until we fail */
1029
1030 mp = mapping_find(map->pmap, cva, &nextva, 0); /* Find the mapping for this address */
1031
1032 if(mp) break; /* We found it */
1033
1034 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
1035 ret = vm_fault(map, /* Didn't find it, try to fault it in read/write... */
1036 vm_map_trunc_page(cva),
1037 VM_PROT_READ | VM_PROT_WRITE,
1038 FALSE, /*change wiring */
1039 THREAD_UNINT,
1040 NULL,
1041 0);
1042 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
1043 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
1044 }
1045
1046 if((mp->mpFlags & mpType) != mpNormal) { /* If this is a block, a nest, or some other special thing, we can't map it */
1047 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1048 return KERN_FAILURE; /* Leave in shame */
1049 }
1c79356b 1050
91447636
A
1051 while(1) { /* Keep trying the enter until it goes in */
1052 ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */
1053 if(!ova) break; /* If there were no collisions, we are done... */
1054 mapping_remove(pmap, ova); /* Remove the mapping that collided */
1055 }
de355530 1056
91447636 1057 mapping_drop_busy(mp); /* We have everything we need from the mapping */
55e303ae
A
1058 }
1059
91447636
A
1060 if (!((getPerProc()->spcFlags) & FamVMmode)) {
1061 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1062 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
55e303ae 1063 }
1c79356b
A
1064
1065 return KERN_SUCCESS;
1066}
1067
1068
0b4e3aa0
A
1069/*-----------------------------------------------------------------------
1070** vmm_map_execute
1071**
1072** This function maps a page from within the client's logical
1073** address space into the alternate address space of the
1074** Virtual Machine Monitor context and then directly starts executing.
1075**
1076** See description of vmm_map_page for details.
1077**
55e303ae
A
1078** Inputs:
1079** Index is used for both the context and the address space ID.
1080** index[24:31] is the context id and index[16:23] is the address space.
1081** if the address space ID is 0, the context ID is used for it.
1082**
0b4e3aa0
A
1083** Outputs:
1084** Normal exit is to run the VM. Abnormal exit is triggered via a
1085** non-KERN_SUCCESS return from vmm_map_page or later during the
1086** attempt to transition into the VM.
1087-----------------------------------------------------------------------*/
1088
1089vmm_return_code_t vmm_map_execute(
91447636 1090 thread_t act,
0b4e3aa0 1091 vmm_thread_index_t index,
55e303ae
A
1092 addr64_t cva,
1093 addr64_t ava,
0b4e3aa0
A
1094 vm_prot_t prot)
1095{
1096 kern_return_t ret;
1097 vmmCntrlEntry *CEntry;
55e303ae
A
1098 unsigned int adsp;
1099 vmm_thread_index_t cndx;
0b4e3aa0 1100
55e303ae 1101 cndx = index & 0xFF; /* Clean it up */
0b4e3aa0 1102
55e303ae 1103 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
0b4e3aa0
A
1104 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1105
91447636 1106 if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
d7e50217
A
1107 return kVmmBogusContext; /* Yes, invalid index in Fam */
1108
55e303ae
A
1109 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1110 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
1111
1112 ret = vmm_map_page(act, adsp, cva, ava, prot); /* Go try to map the page on in */
1113
0b4e3aa0 1114
d7e50217 1115 if(ret == KERN_SUCCESS) {
91447636
A
1116 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1117 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
55e303ae 1118 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
d7e50217
A
1119 }
1120
55e303ae 1121 return ret; /* We had trouble mapping in the page */
0b4e3aa0
A
1122
1123}
1124
9bccf70c
A
1125/*-----------------------------------------------------------------------
1126** vmm_map_list
1127**
55e303ae 1128** This function maps a list of pages into various address spaces
9bccf70c
A
1129**
1130** Inputs:
1131** act - pointer to current thread activation
55e303ae 1132** index - index of default address space (used if not specifed in list entry
9bccf70c 1133** count - number of pages to release
55e303ae 1134** flavor - 0 if 32-bit version, 1 if 64-bit
9bccf70c
A
1135** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
1136**
1137** Outputs:
1138** kernel return code indicating success or failure
1139** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1140** or the vmm_map_page call fails.
55e303ae 1141** We return kVmmInvalidAddress if virtual address size is not supported
9bccf70c
A
1142-----------------------------------------------------------------------*/
1143
1144kern_return_t vmm_map_list(
91447636 1145 thread_t act,
55e303ae
A
1146 vmm_adsp_id_t index,
1147 unsigned int cnt,
1148 unsigned int flavor)
9bccf70c
A
1149{
1150 vmmCntrlEntry *CEntry;
1151 boolean_t ret;
1152 unsigned int i;
55e303ae
A
1153 vmmMList *lst;
1154 vmmMList64 *lstx;
1155 addr64_t cva;
1156 addr64_t ava;
9bccf70c 1157 vm_prot_t prot;
55e303ae 1158 vmm_adsp_id_t adsp;
9bccf70c 1159
55e303ae
A
1160 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1161 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
9bccf70c
A
1162
1163 if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */
1164 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
1165
55e303ae
A
1166 lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
1167 lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
9bccf70c
A
1168
1169 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
55e303ae
A
1170 if(flavor) { /* Check if 32- or 64-bit addresses */
1171 cva = lstx[i].vmlva; /* Get the 64-bit actual address */
1172 ava = lstx[i].vmlava; /* Get the 64-bit guest address */
1173 }
1174 else {
1175 cva = lst[i].vmlva; /* Get the 32-bit actual address */
1176 ava = lst[i].vmlava; /* Get the 32-bit guest address */
1177 }
1178
1179 prot = ava & vmmlProt; /* Extract the protection bits */
1180 adsp = (ava & vmmlAdID) >> 4; /* Extract an explicit address space request */
1181 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
1182 ava = ava &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1183
9bccf70c 1184 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
55e303ae 1185 if(ret != KERN_SUCCESS) return ret; /* Bail if any error */
9bccf70c
A
1186 }
1187
1188 return KERN_SUCCESS ; /* Return... */
1189}
1190
1c79356b
A
1191/*-----------------------------------------------------------------------
1192** vmm_get_page_mapping
1193**
91447636
A
1194** Given a context index and a guest virtual address, convert the address
1195** to its corresponding host virtual address.
1c79356b
A
1196**
1197** Inputs:
1198** act - pointer to current thread activation
91447636
A
1199** index - context index
1200** gva - guest virtual address
1c79356b
A
1201**
1202** Outputs:
91447636 1203** Host virtual address (page aligned) or -1 if not mapped or any failure
1c79356b
A
1204**
1205** Note:
91447636
A
1206** If the host address space contains multiple virtual addresses mapping
1207** to the physical address corresponding to the specified guest virtual
1208** address (i.e., host virtual aliases), it is unpredictable which host
1209** virtual address (alias) will be returned. Moral of the story: No host
1210** virtual aliases.
1c79356b
A
1211-----------------------------------------------------------------------*/
1212
55e303ae 1213addr64_t vmm_get_page_mapping(
91447636 1214 thread_t act,
55e303ae 1215 vmm_adsp_id_t index,
91447636 1216 addr64_t gva)
1c79356b 1217{
91447636 1218 register mapping_t *mp;
1c79356b 1219 pmap_t pmap;
91447636 1220 addr64_t nextva, hva;
55e303ae 1221 ppnum_t pa;
1c79356b 1222
91447636
A
1223 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
1224 if (!pmap)return -1; /* No good, failure... */
55e303ae 1225
91447636
A
1226 if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist (GSA) active ? */
1227 return (hw_gva_to_hva(pmap, gva)); /* Convert guest to host virtual address */
1228 } else {
1229 mp = mapping_find(pmap, gva, &nextva, 0); /* Find guest mapping for this virtual address */
1230
1231 if(!mp) return -1; /* Not mapped, return -1 */
1c79356b 1232
91447636 1233 pa = mp->mpPAddr; /* Remember the physical page address */
55e303ae 1234
91447636 1235 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
de355530 1236
91447636
A
1237 pmap = current_thread()->map->pmap; /* Get the host pmap */
1238 hva = mapping_p2v(pmap, pa); /* Now find the source virtual */
1c79356b 1239
91447636 1240 if(hva != 0) return hva; /* We found it... */
1c79356b 1241
91447636
A
1242 panic("vmm_get_page_mapping: could not back-map guest va (%016llX)\n", gva);
1243 /* We are bad wrong if we can't find it */
1c79356b 1244
91447636
A
1245 return -1; /* Never executed, prevents compiler warning */
1246 }
1c79356b
A
1247}
1248
1249/*-----------------------------------------------------------------------
1250** vmm_unmap_page
1251**
91447636 1252** This function unmaps a page from the guest address space.
1c79356b
A
1253**
1254** Inputs:
1255** act - pointer to current thread activation
1256** index - index of vmm state for this page
1257** va - virtual address within the vmm's address
1258** space
1259**
1260** Outputs:
1261** kernel return code indicating success or failure
1262-----------------------------------------------------------------------*/
1263
1264kern_return_t vmm_unmap_page(
91447636 1265 thread_t act,
55e303ae
A
1266 vmm_adsp_id_t index,
1267 addr64_t va)
1c79356b
A
1268{
1269 vmmCntrlEntry *CEntry;
55e303ae
A
1270 addr64_t nadd;
1271 pmap_t pmap;
1c79356b 1272
55e303ae
A
1273 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
1274 if (!pmap)return -1; /* No good, failure... */
1c79356b 1275
91447636
A
1276 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1277 hw_susp_map_gv(act->map->pmap, pmap, va); /* Suspend the mapping */
1278 return (KERN_SUCCESS); /* Always returns success */
1279 } else {
1280 nadd = mapping_remove(pmap, va); /* Toss the mapping */
1281
1282 return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS); /* Return... */
1283 }
1c79356b
A
1284}
1285
9bccf70c
A
1286/*-----------------------------------------------------------------------
1287** vmm_unmap_list
1288**
1289** This function unmaps a list of pages from the alternate's logical
1290** address space.
1291**
1292** Inputs:
1293** act - pointer to current thread activation
1294** index - index of vmm state for this page
1295** count - number of pages to release
55e303ae 1296** flavor - 0 if 32-bit, 1 if 64-bit
9bccf70c
A
1297** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
1298**
1299** Outputs:
1300** kernel return code indicating success or failure
1301** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1302-----------------------------------------------------------------------*/
1303
1304kern_return_t vmm_unmap_list(
91447636 1305 thread_t act,
55e303ae
A
1306 vmm_adsp_id_t index,
1307 unsigned int cnt,
1308 unsigned int flavor)
9bccf70c
A
1309{
1310 vmmCntrlEntry *CEntry;
1311 boolean_t ret;
1312 kern_return_t kern_result = KERN_SUCCESS;
1313 unsigned int *pgaddr, i;
55e303ae
A
1314 addr64_t gva;
1315 vmmUMList *lst;
1316 vmmUMList64 *lstx;
1317 pmap_t pmap;
1318 int adsp;
9bccf70c 1319
55e303ae
A
1320 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1321 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
9bccf70c 1322
55e303ae
A
1323 if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */
1324 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
9bccf70c 1325
55e303ae 1326 lst = (vmmUMList *)lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
de355530 1327
55e303ae
A
1328 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
1329 if(flavor) { /* Check if 32- or 64-bit addresses */
1330 gva = lstx[i].vmlava; /* Get the 64-bit guest address */
1331 }
1332 else {
1333 gva = lst[i].vmlava; /* Get the 32-bit guest address */
1334 }
1335
1336 adsp = (gva & vmmlAdID) >> 4; /* Extract an explicit address space request */
1337 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
91447636 1338 pmap = act->machine.vmmControl->vmmAdsp[adsp]; /* Get the pmap for this request */
55e303ae
A
1339 if(!pmap) continue; /* Ain't nuthin' mapped here, no durn map... */
1340
91447636
A
1341 gva = gva &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1342 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1343 hw_susp_map_gv(act->map->pmap, pmap, gva);
1344 /* Suspend the mapping */
1345 } else {
1346 (void)mapping_remove(pmap, gva); /* Toss the mapping */
1347 }
9bccf70c
A
1348 }
1349
55e303ae 1350 return KERN_SUCCESS ; /* Return... */
9bccf70c
A
1351}
1352
1c79356b
A
1353/*-----------------------------------------------------------------------
1354** vmm_unmap_all_pages
1355**
1356** This function unmaps all pages from the alternates's logical
1357** address space.
1358**
1359** Inputs:
1360** act - pointer to current thread activation
1361** index - index of context state
1362**
1363** Outputs:
1364** none
1365**
1366** Note:
1367** All pages are unmapped, but the address space (i.e., pmap) is still alive
1368-----------------------------------------------------------------------*/
1369
1370void vmm_unmap_all_pages(
91447636 1371 thread_t act,
55e303ae 1372 vmm_adsp_id_t index)
1c79356b
A
1373{
1374 vmmCntrlEntry *CEntry;
55e303ae 1375 pmap_t pmap;
1c79356b 1376
55e303ae
A
1377 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1378 if (!pmap) return; /* Either this isn't vmm thread or the index is bogus */
91447636
A
1379
1380 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1381 hw_rem_all_gv(pmap); /* Remove all guest's mappings from shadow hash table */
1382 } else {
1383 /*
1384 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
1385 */
1386 mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
1387 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
1388 }
1c79356b
A
1389 return;
1390}
1391
1392
1393/*-----------------------------------------------------------------------
1394** vmm_get_page_dirty_flag
1395**
1396** This function returns the changed flag of the page
1397** and optionally clears clears the flag.
1398**
1399** Inputs:
1400** act - pointer to current thread activation
1401** index - index of vmm state for this page
1402** va - virtual address within the vmm's address
1403** space
1404** reset - Clears dirty if true, untouched if not
1405**
1406** Outputs:
1407** the dirty bit
1408** clears the dirty bit in the pte if requested
1409**
1410** Note:
1411** The RC bits are merged into the global physical entry
1412-----------------------------------------------------------------------*/
1413
1414boolean_t vmm_get_page_dirty_flag(
91447636 1415 thread_t act,
55e303ae
A
1416 vmm_adsp_id_t index,
1417 addr64_t va,
1c79356b
A
1418 unsigned int reset)
1419{
1420 vmmCntrlEntry *CEntry;
91447636 1421 register mapping_t *mpv, *mp;
1c79356b 1422 unsigned int RC;
55e303ae 1423 pmap_t pmap;
1c79356b 1424
55e303ae
A
1425 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1426 if (!pmap) return 1; /* Either this isn't vmm thread or the index is bogus */
91447636
A
1427
1428 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1429 RC = hw_test_rc_gv(act->map->pmap, pmap, va, reset);/* Fetch the RC bits and clear if requested */
1430 } else {
1431 RC = hw_test_rc(pmap, (addr64_t)va, reset); /* Fetch the RC bits and clear if requested */
1432 }
1c79356b 1433
55e303ae
A
1434 switch (RC & mapRetCode) { /* Decode return code */
1435
1436 case mapRtOK: /* Changed */
1437 return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */
1438 break;
1439
1440 case mapRtNotFnd: /* Didn't find it */
1441 return 1; /* Return dirty */
1442 break;
1443
1444 default:
1445 panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC, pmap, va);
1446
1447 }
de355530 1448
55e303ae 1449 return 1; /* Return the change bit */
1c79356b
A
1450}
1451
0b4e3aa0
A
1452
1453/*-----------------------------------------------------------------------
1454** vmm_protect_page
1455**
1456** This function sets the protection bits of a mapped page
1457**
1458** Inputs:
1459** act - pointer to current thread activation
1460** index - index of vmm state for this page
1461** va - virtual address within the vmm's address
1462** space
1463** prot - Protection flags
1464**
1465** Outputs:
1466** none
1467** Protection bits of the mapping are modifed
1468**
1469-----------------------------------------------------------------------*/
1470
1471kern_return_t vmm_protect_page(
91447636 1472 thread_t act,
55e303ae
A
1473 vmm_adsp_id_t index,
1474 addr64_t va,
0b4e3aa0
A
1475 vm_prot_t prot)
1476{
1477 vmmCntrlEntry *CEntry;
55e303ae
A
1478 addr64_t nextva;
1479 int ret;
1480 pmap_t pmap;
0b4e3aa0 1481
55e303ae
A
1482 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1483 if (!pmap) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
d7e50217 1484
91447636
A
1485 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1486 ret = hw_protect_gv(pmap, va, prot); /* Try to change protection, GSA varient */
1487 } else {
1488 ret = hw_protect(pmap, va, prot, &nextva); /* Try to change protection */
1489 }
0b4e3aa0 1490
55e303ae
A
1491 switch (ret) { /* Decode return code */
1492
1493 case mapRtOK: /* All ok... */
1494 break; /* Outta here */
1495
1496 case mapRtNotFnd: /* Didn't find it */
1497 return KERN_SUCCESS; /* Ok, return... */
1498 break;
1499
1500 default:
1501 panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, (addr64_t)va);
1502
1503 }
de355530 1504
91447636
A
1505 if (!((getPerProc()->spcFlags) & FamVMmode)) {
1506 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1507 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
55e303ae 1508 }
0b4e3aa0
A
1509
1510 return KERN_SUCCESS; /* Return */
1511}
1512
1513
1514/*-----------------------------------------------------------------------
1515** vmm_protect_execute
1516**
1517** This function sets the protection bits of a mapped page
1518** and then directly starts executing.
1519**
55e303ae
A
1520** See description of vmm_protect_page for details
1521**
1522** Inputs:
1523** See vmm_protect_page and vmm_map_execute
0b4e3aa0
A
1524**
1525** Outputs:
1526** Normal exit is to run the VM. Abnormal exit is triggered via a
1527** non-KERN_SUCCESS return from vmm_map_page or later during the
1528** attempt to transition into the VM.
1529-----------------------------------------------------------------------*/
1530
1531vmm_return_code_t vmm_protect_execute(
91447636 1532 thread_t act,
0b4e3aa0 1533 vmm_thread_index_t index,
55e303ae 1534 addr64_t va,
0b4e3aa0
A
1535 vm_prot_t prot)
1536{
1537 kern_return_t ret;
1538 vmmCntrlEntry *CEntry;
55e303ae
A
1539 unsigned int adsp;
1540 vmm_thread_index_t cndx;
0b4e3aa0 1541
55e303ae
A
1542 cndx = index & 0xFF; /* Clean it up */
1543 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
1544 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1545
1546 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1547 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
0b4e3aa0 1548
91447636 1549 if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
d7e50217
A
1550 return kVmmBogusContext; /* Yes, invalid index in Fam */
1551
55e303ae 1552 ret = vmm_protect_page(act, adsp, va, prot); /* Go try to change access */
d7e50217
A
1553
1554 if(ret == KERN_SUCCESS) {
91447636
A
1555 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1556 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
55e303ae 1557 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
d7e50217 1558 }
0b4e3aa0 1559
55e303ae 1560 return ret; /* We had trouble of some kind (shouldn't happen) */
0b4e3aa0
A
1561
1562}
1563
1564
1c79356b
A
1565/*-----------------------------------------------------------------------
1566** vmm_get_float_state
1567**
1568** This function causes the current floating point state to
1569** be saved into the shared context area. It also clears the
1570** vmmFloatCngd changed flag.
1571**
1572** Inputs:
1573** act - pointer to current thread activation structure
1574** index - index returned by vmm_init_context
1575**
1576** Outputs:
1577** context saved
1578-----------------------------------------------------------------------*/
1579
1580kern_return_t vmm_get_float_state(
91447636 1581 thread_t act,
1c79356b
A
1582 vmm_thread_index_t index)
1583{
1584 vmmCntrlEntry *CEntry;
1585 vmmCntrlTable *CTable;
1586 int i;
9bccf70c 1587 register struct savearea_fpu *sv;
1c79356b
A
1588
1589 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1590 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1591
91447636 1592 act->machine.specFlags &= ~floatCng; /* Clear the special flag */
1c79356b 1593 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
9bccf70c
A
1594
1595 fpu_save(&CEntry->vmmFacCtx); /* Save context if live */
1596
9bccf70c
A
1597 if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */
1598 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */
1c79356b
A
1599 return KERN_SUCCESS;
1600 }
1601
1c79356b 1602
9bccf70c 1603 for(i = 0; i < 32; i++) { /* Initialize floating points */
1c79356b
A
1604 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
1605 }
1606
1607 return KERN_SUCCESS;
1608}
1609
1610/*-----------------------------------------------------------------------
1611** vmm_get_vector_state
1612**
1613** This function causes the current vector state to
1614** be saved into the shared context area. It also clears the
1615** vmmVectorCngd changed flag.
1616**
1617** Inputs:
1618** act - pointer to current thread activation structure
1619** index - index returned by vmm_init_context
1620**
1621** Outputs:
1622** context saved
1623-----------------------------------------------------------------------*/
1624
1625kern_return_t vmm_get_vector_state(
91447636 1626 thread_t act,
1c79356b
A
1627 vmm_thread_index_t index)
1628{
1629 vmmCntrlEntry *CEntry;
1630 vmmCntrlTable *CTable;
1631 int i, j;
1632 unsigned int vrvalidwrk;
9bccf70c 1633 register struct savearea_vec *sv;
1c79356b
A
1634
1635 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1636 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
9bccf70c
A
1637
1638 vec_save(&CEntry->vmmFacCtx); /* Save context if live */
1c79356b 1639
91447636 1640 act->machine.specFlags &= ~vectorCng; /* Clear the special flag */
1c79356b
A
1641 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
1642
9bccf70c 1643 if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */
1c79356b
A
1644
1645 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
1646
1c79356b
A
1647 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
1648 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
1649 for(j = 0; j < 4; j++) { /* If so, copy it over */
1650 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
1651 }
1652 }
1653 else {
1654 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
1655 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
1656 }
1657 }
1658
1659 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
1660
1661 }
1662
1663 return KERN_SUCCESS;
1664 }
1665
1c79356b
A
1666 for(i = 0; i < 32; i++) { /* Initialize vector registers */
1667 for(j=0; j < 4; j++) { /* Do words */
1668 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
1669 }
1670 }
1671
1672 return KERN_SUCCESS;
1673}
1674
1675/*-----------------------------------------------------------------------
1676** vmm_set_timer
1677**
1678** This function causes a timer (in AbsoluteTime) for a specific time
1679** to be set It also clears the vmmTimerPop flag if the timer is actually
1680** set, it is cleared otherwise.
1681**
1682** A timer is cleared by setting setting the time to 0. This will clear
1683** the vmmTimerPop bit. Simply setting the timer to earlier than the
1684** current time clears the internal timer request, but leaves the
1685** vmmTimerPop flag set.
1686**
1687**
1688** Inputs:
1689** act - pointer to current thread activation structure
1690** index - index returned by vmm_init_context
1691** timerhi - high order word of AbsoluteTime to pop
1692** timerlo - low order word of AbsoluteTime to pop
1693**
1694** Outputs:
1695** timer set, vmmTimerPop cleared
1696-----------------------------------------------------------------------*/
1697
1698kern_return_t vmm_set_timer(
91447636 1699 thread_t act,
1c79356b
A
1700 vmm_thread_index_t index,
1701 unsigned int timerhi,
1702 unsigned int timerlo)
1703{
1704 vmmCntrlEntry *CEntry;
1705
1706 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1707 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1708
0b4e3aa0 1709 CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
1c79356b
A
1710
1711 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
1712 return KERN_SUCCESS; /* Leave now... */
1713}
1714
1715
1716/*-----------------------------------------------------------------------
1717** vmm_get_timer
1718**
1719** This function causes the timer for a specified VM to be
1720** returned in return_params[0] and return_params[1].
55e303ae
A
1721** Note that this is kind of funky for 64-bit VMs because we
1722** split the timer into two parts so that we still set parms 0 and 1.
1723** Obviously, we don't need to do this because the parms are 8 bytes
1724** wide.
1c79356b
A
1725**
1726**
1727** Inputs:
1728** act - pointer to current thread activation structure
1729** index - index returned by vmm_init_context
1730**
1731** Outputs:
1732** Timer value set in return_params[0] and return_params[1].
1733** Set to 0 if timer is not set.
1734-----------------------------------------------------------------------*/
1735
1736kern_return_t vmm_get_timer(
91447636 1737 thread_t act,
1c79356b
A
1738 vmm_thread_index_t index)
1739{
1740 vmmCntrlEntry *CEntry;
1741 vmmCntrlTable *CTable;
1742
1743 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1744 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1745
55e303ae
A
1746 if(CEntry->vmmXAFlgs & vmm64Bit) { /* A 64-bit virtual machine? */
1747 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32); /* Return the last timer value */
1748 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1749 }
1750 else {
1751 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */
1752 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1753 }
1c79356b
A
1754 return KERN_SUCCESS;
1755}
1756
1757
1c79356b
A
1758/*-----------------------------------------------------------------------
1759** vmm_timer_pop
1760**
1761** This function causes all timers in the array of VMs to be updated.
1762** All appropriate flags are set or reset. If a VM is currently
1763** running and its timer expired, it is intercepted.
1764**
1765** The qactTimer value is set to the lowest unexpired timer. It is
1766** zeroed if all timers are expired or have been reset.
1767**
1768** Inputs:
1769** act - pointer to current thread activation structure
1770**
1771** Outputs:
1772** timers set, vmmTimerPop cleared or set
1773-----------------------------------------------------------------------*/
1774
1775void vmm_timer_pop(
91447636 1776 thread_t act)
1c79356b
A
1777{
1778 vmmCntrlEntry *CEntry;
1779 vmmCntrlTable *CTable;
1780 int cvi, any;
0b4e3aa0 1781 uint64_t now, soonest;
1c79356b
A
1782 savearea *sv;
1783
91447636 1784 if(!((unsigned int)act->machine.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
1c79356b
A
1785 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
1786 }
1787
0b4e3aa0 1788 soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */
1c79356b 1789
0b4e3aa0 1790 clock_get_uptime(&now); /* What time is it? */
1c79356b 1791
91447636 1792 CTable = act->machine.vmmControl; /* Make this easier */
1c79356b
A
1793 any = 0; /* Haven't found a running unexpired timer yet */
1794
55e303ae 1795 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Cycle through all and check time now */
1c79356b
A
1796
1797 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
1798
9bccf70c 1799 if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */
1c79356b
A
1800 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
1801 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
1802 continue; /* Check next */
1803 }
1804
0b4e3aa0 1805 if (CTable->vmmc[cvi].vmmTimer <= now) {
1c79356b
A
1806 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
1807 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
91447636 1808 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->machine.vmmCEntry) { /* Is this the running VM? */
9bccf70c 1809 sv = find_user_regs(act); /* Get the user state registers */
1c79356b
A
1810 if(!sv) { /* Did we find something? */
1811 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1812 }
0b4e3aa0 1813 sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */
1c79356b
A
1814 vmm_force_exit(act, sv); /* Intercept a running VM */
1815 }
1816 continue; /* Check the rest */
1817 }
1818 else { /* It hasn't popped yet */
1819 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1820 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1821 }
1822
1823 any = 1; /* Show we found an active unexpired timer */
1824
0b4e3aa0
A
1825 if (CTable->vmmc[cvi].vmmTimer < soonest)
1826 soonest = CTable->vmmc[cvi].vmmTimer;
1c79356b
A
1827 }
1828
1829 if(any) {
91447636
A
1830 if (act->machine.qactTimer == 0 || soonest <= act->machine.qactTimer)
1831 act->machine.qactTimer = soonest; /* Set lowest timer */
0b4e3aa0
A
1832 }
1833
1834 return;
1835}
1836
1837
1838
1839/*-----------------------------------------------------------------------
1840** vmm_stop_vm
1841**
1842** This function prevents the specified VM(s) to from running.
1843** If any is currently executing, the execution is intercepted
1844** with a code of kVmmStopped. Note that execution of the VM is
1845** blocked until a vmmExecuteVM is called with the start flag set to 1.
1846** This provides the ability for a thread to stop execution of a VM and
1847** insure that it will not be run until the emulator has processed the
1848** "virtual" interruption.
1849**
1850** Inputs:
1851** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1852** NOTE: if this mask is all 0s, any executing VM is intercepted with
1853* a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1854** note that there is a potential race here and the VM may not stop.
1855**
1856** Outputs:
1857** kernel return code indicating success
1858** or if no VMs are enabled, an invalid syscall exception.
1859-----------------------------------------------------------------------*/
1860
1861int vmm_stop_vm(struct savearea *save)
1862{
1863
91447636 1864 thread_t act;
0b4e3aa0
A
1865 vmmCntrlTable *CTable;
1866 int cvi, i;
1867 task_t task;
91447636 1868 thread_t fact;
0b4e3aa0
A
1869 unsigned int vmmask;
1870 ReturnHandler *stopapc;
1871
1872 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1873
1874 task = current_task(); /* Figure out who we are */
1875
1876 task_lock(task); /* Lock our task */
1877
91447636 1878 fact = (thread_t)task->threads.next; /* Get the first activation on task */
0b4e3aa0
A
1879 act = 0; /* Pretend we didn't find it yet */
1880
55e303ae 1881 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
91447636 1882 if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */
0b4e3aa0
A
1883 act = fact; /* Yeah... */
1884 break; /* Bail the loop... */
1885 }
91447636 1886 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
0b4e3aa0
A
1887 }
1888
1889 if(!((unsigned int)act)) { /* See if we have VMMs yet */
1890 task_unlock(task); /* No, unlock the task */
1891 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1892 return 0; /* Go generate a syscall exception */
1893 }
1894
91447636
A
1895 thread_reference(act);
1896
0b4e3aa0
A
1897 task_unlock(task); /* Safe to release now */
1898
91447636
A
1899 thread_mtx_lock(act);
1900
1901 CTable = act->machine.vmmControl; /* Get the pointer to the table */
0b4e3aa0
A
1902
1903 if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */
91447636
A
1904 thread_mtx_unlock(act); /* Unlock the activation */
1905 thread_deallocate(act);
0b4e3aa0
A
1906 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1907 return 0; /* Go generate a syscall exception */
1908 }
1909
1910 if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */
91447636
A
1911 thread_mtx_unlock(act); /* Unlock the activation */
1912 thread_deallocate(act);
0b4e3aa0
A
1913 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1914 save->save_r3 = KERN_SUCCESS; /* Set success */
1915 return 1; /* Return... */
1916 }
1917
55e303ae 1918 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search slots */
0b4e3aa0
A
1919 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */
1920 hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */
1921 }
1922 vmmask = vmmask << 1; /* Slide mask over */
1923 }
1924
91447636
A
1925 if(hw_compare_and_store(0, 1, &act->machine.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */
1926 thread_mtx_unlock(act); /* Already one pending, unlock the activation */
1927 thread_deallocate(act);
0b4e3aa0
A
1928 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1929 save->save_r3 = KERN_SUCCESS; /* Say we did it... */
1930 return 1; /* Leave */
1931 }
1932
1933 if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */
91447636
A
1934 act->machine.emPendRupts = 0; /* No memory, say we have given up request */
1935 thread_mtx_unlock(act); /* Unlock the activation */
1936 thread_deallocate(act);
0b4e3aa0
A
1937 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1938 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
1939 return 1; /* Return... */
1940 }
1941
1942 ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1943
1944 stopapc->handler = vmm_interrupt; /* Set interruption routine */
1945
1946 stopapc->next = act->handlers; /* Put our interrupt at the start of the list */
1947 act->handlers = stopapc; /* Point to us */
1948
1949 act_set_apc(act); /* Set an APC AST */
1950 ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */
1951
91447636
A
1952 thread_mtx_unlock(act); /* Unlock the activation */
1953 thread_deallocate(act);
0b4e3aa0
A
1954
1955 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1956 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
1957 return 1;
1958}
1959
1960/*-----------------------------------------------------------------------
1961** vmm_interrupt
1962**
1963** This function is executed asynchronously from an APC AST.
1964** It is to be used for anything that needs to interrupt a running VM.
1965** This include any kind of interruption generation (other than timer pop)
1966** or entering the stopped state.
1967**
1968** Inputs:
1969** ReturnHandler *rh - the return handler control block as required by the APC.
91447636 1970** thread_t act - the activation
0b4e3aa0
A
1971**
1972** Outputs:
1973** Whatever needed to be done is done.
1974-----------------------------------------------------------------------*/
1975
91447636 1976void vmm_interrupt(ReturnHandler *rh, thread_t act) {
0b4e3aa0
A
1977
1978 vmmCntrlTable *CTable;
1979 savearea *sv;
1980 boolean_t inter;
1981
1982
1983
91447636 1984 kfree(rh, sizeof(ReturnHandler)); /* Release the return handler block */
0b4e3aa0
A
1985
1986 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1987
91447636
A
1988 act->machine.emPendRupts = 0; /* Say that there are no more interrupts pending */
1989 CTable = act->machine.vmmControl; /* Get the pointer to the table */
0b4e3aa0
A
1990
1991 if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */
1992
91447636 1993 if(act->machine.vmmCEntry && (act->machine.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */
9bccf70c 1994 sv = find_user_regs(act); /* Get the user state registers */
0b4e3aa0
A
1995 if(!sv) { /* Did we find something? */
1996 panic("vmm_interrupt: no user context; act = %08X\n", act);
1c79356b 1997 }
0b4e3aa0
A
1998 sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */
1999 vmm_force_exit(act, sv); /* Intercept a running VM */
1c79356b 2000 }
0b4e3aa0 2001 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
1c79356b
A
2002
2003 return;
2004}