]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/vmachmon.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*-----------------------------------------------------------------------
24 ** vmachmon.c
25 **
26 ** C routines that we are adding to the MacOS X kernel.
27 **
28 -----------------------------------------------------------------------*/
29
30 #include <mach/mach_types.h>
31 #include <mach/kern_return.h>
32 #include <mach/host_info.h>
33 #include <kern/kern_types.h>
34 #include <kern/kalloc.h>
35 #include <kern/host.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <ppc/exception.h>
39 #include <ppc/mappings.h>
40 #include <ppc/thread.h>
41 #include <vm/vm_kern.h>
42 #include <vm/vm_fault.h>
43
44 #include <ppc/vmachmon.h>
45 #include <ppc/lowglobals.h>
46
47 extern double FloatInit;
48 extern unsigned long QNaNbarbarian[4];
49
50 /*************************************************************************************
51 Virtual Machine Monitor Internal Routines
52 **************************************************************************************/
53
54 /*-----------------------------------------------------------------------
55 ** vmm_get_entry
56 **
57 ** This function verifies and return a vmm context entry index
58 **
59 ** Inputs:
60 ** act - pointer to current thread activation
61 ** index - index into vmm control table (this is a "one based" value)
62 **
63 ** Outputs:
64 ** address of a vmmCntrlEntry or 0 if not found
65 -----------------------------------------------------------------------*/
66
67 static vmmCntrlEntry *vmm_get_entry(
68 thread_t act,
69 vmm_thread_index_t index)
70 {
71 vmmCntrlTable *CTable;
72 vmmCntrlEntry *CEntry;
73
74 index = index & vmmTInum; /* Clean up the index */
75
76 if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */
77 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
78
79 CTable = act->machine.vmmControl; /* Make the address a bit more convienient */
80 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
81
82 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
83
84 return CEntry;
85 }
86
87 /*-----------------------------------------------------------------------
88 ** vmm_get_adsp
89 **
90 ** This function verifies and returns the pmap for an address space.
91 ** If there is none and the request is valid, a pmap will be created.
92 **
93 ** Inputs:
94 ** act - pointer to current thread activation
95 ** index - index into vmm control table (this is a "one based" value)
96 **
97 ** Outputs:
98 ** address of a pmap or 0 if not found or could no be created
99 ** Note that if there is no pmap for the address space it will be created.
100 -----------------------------------------------------------------------*/
101
102 static pmap_t vmm_get_adsp(thread_t act, vmm_thread_index_t index)
103 {
104 pmap_t pmap;
105
106 if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */
107 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
108
109 pmap = act->machine.vmmControl->vmmAdsp[index - 1]; /* Get the pmap */
110 return (pmap); /* and return it. */
111 }
112
113 /*-----------------------------------------------------------------------
114 ** vmm_build_shadow_hash
115 **
116 ** Allocate and initialize a shadow hash table.
117 **
118 ** This function assumes that PAGE_SIZE is 4k-bytes.
119 **
120 -----------------------------------------------------------------------*/
121 static pmap_vmm_ext *vmm_build_shadow_hash(pmap_t pmap)
122 {
123 pmap_vmm_ext *ext; /* VMM pmap extension we're building */
124 ppnum_t extPP; /* VMM pmap extension physical page number */
125 kern_return_t ret; /* Return code from various calls */
126 uint32_t pages = GV_HPAGES; /* Number of pages in the hash table */
127 vm_offset_t free = VMX_HPIDX_OFFSET; /* Offset into extension page of free area (128-byte aligned) */
128 uint32_t freeSize = PAGE_SIZE - free; /* Number of free bytes in the extension page */
129
130 if ((pages * sizeof(addr64_t)) + (pages * sizeof(vm_offset_t)) > freeSize) {
131 panic("vmm_build_shadow_hash: too little pmap_vmm_ext free space\n");
132 }
133
134 ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&ext, PAGE_SIZE);
135 /* Allocate a page-sized extension block */
136 if (ret != KERN_SUCCESS) return (NULL); /* Return NULL for failed allocate */
137 bzero((char *)ext, PAGE_SIZE); /* Zero the entire extension block page */
138
139 extPP = pmap_find_phys(kernel_pmap, (vm_offset_t)ext);
140 /* Get extension block's physical page number */
141 if (!extPP) { /* This should not fail, but then again... */
142 panic("vmm_build_shadow_hash: could not translate pmap_vmm_ext vaddr %08X\n", ext);
143 }
144
145 ext->vmxSalt = (addr64_t)(vm_offset_t)ext ^ ptoa_64(extPP);
146 /* Set effective<->physical conversion salt */
147 ext->vmxHostPmapPhys = (addr64_t)(vm_offset_t)pmap ^ pmap->pmapvr;
148 /* Set host pmap's physical address */
149 ext->vmxHostPmap = pmap; /* Set host pmap's effective address */
150 ext->vmxHashPgIdx = (addr64_t *)((vm_offset_t)ext + VMX_HPIDX_OFFSET);
151 /* Allocate physical index */
152 ext->vmxHashPgList = (vm_offset_t *)((vm_offset_t)ext + VMX_HPLIST_OFFSET);
153 /* Allocate page list */
154 ext->vmxActiveBitmap = (vm_offset_t *)((vm_offset_t)ext + VMX_ACTMAP_OFFSET);
155 /* Allocate active mapping bitmap */
156
157 /* The hash table is typically larger than a single page, but we don't require it to be in a
158 contiguous virtual or physical chunk. So, we allocate it page by page, noting the effective and
159 physical address of each page in vmxHashPgList and vmxHashPgIdx, respectively. */
160 uint32_t idx;
161 for (idx = 0; idx < pages; idx++) {
162 ret = kmem_alloc_wired(kernel_map, &ext->vmxHashPgList[idx], PAGE_SIZE);
163 /* Allocate a hash-table page */
164 if (ret != KERN_SUCCESS) goto fail; /* Allocation failed, exit through cleanup */
165 bzero((char *)ext->vmxHashPgList[idx], PAGE_SIZE); /* Zero the page */
166 ext->vmxHashPgIdx[idx] = ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)ext->vmxHashPgList[idx]));
167 /* Put page's physical address into index */
168 if (!ext->vmxHashPgIdx[idx]) { /* Hash-table page's LRA failed */
169 panic("vmm_build_shadow_hash: could not translate hash-table vaddr %08X\n", ext->vmxHashPgList[idx]);
170 }
171 mapping_t *map = (mapping_t *)ext->vmxHashPgList[idx];
172 uint32_t mapIdx;
173 for (mapIdx = 0; mapIdx < GV_SLTS_PPG; mapIdx++) { /* Iterate over mappings in this page */
174 map->mpFlags = (mpGuest | mpgFree); /* Mark guest type and free */
175 map = (mapping_t *)((char *)map + GV_SLOT_SZ); /* Next slot-sized mapping */
176 }
177 }
178
179 return (ext); /* Return newly-minted VMM pmap extension */
180
181 fail:
182 for (idx = 0; idx < pages; idx++) { /* De-allocate any pages we managed to allocate */
183 if (ext->vmxHashPgList[idx]) {
184 kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
185 }
186 }
187 kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
188 return (NULL); /* Return NULL for failure */
189 }
190
191
192 /*-----------------------------------------------------------------------
193 ** vmm_release_shadow_hash
194 **
195 ** Release shadow hash table and VMM extension block
196 **
197 -----------------------------------------------------------------------*/
198 static void vmm_release_shadow_hash(pmap_vmm_ext *ext)
199 {
200 uint32_t idx;
201
202 for (idx = 0; idx < GV_HPAGES; idx++) { /* Release the hash table page by page */
203 kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
204 }
205
206 kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
207 }
208
209 /*-----------------------------------------------------------------------
210 ** vmm_activate_gsa
211 **
212 ** Activate guest shadow assist
213 **
214 -----------------------------------------------------------------------*/
215 static kern_return_t vmm_activate_gsa(
216 thread_t act,
217 vmm_thread_index_t index)
218 {
219 vmmCntrlTable *CTable = act->machine.vmmControl; /* Get VMM control table */
220 if (!CTable) { /* Caller guarantees that this will work */
221 panic("vmm_activate_gsa: VMM control table not present; act = %08X, idx = %d\n",
222 act, index);
223 return KERN_FAILURE;
224 }
225 vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
226 if (!CEntry) { /* Caller guarantees that this will work */
227 panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
228 act, index);
229 return KERN_FAILURE;
230 }
231
232 pmap_t hpmap = act->map->pmap; /* Get host pmap */
233 pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
234 if (!gpmap) { /* Caller guarantees that this will work */
235 panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
236 act, index);
237 return KERN_FAILURE;
238 }
239
240 if (!hpmap->pmapVmmExt) { /* If there's no VMM extension for this host, create one */
241 hpmap->pmapVmmExt = vmm_build_shadow_hash(hpmap); /* Build VMM extension plus shadow hash and attach */
242 if (hpmap->pmapVmmExt) { /* See if we succeeded */
243 hpmap->pmapVmmExtPhys = (addr64_t)(vm_offset_t)hpmap->pmapVmmExt ^ hpmap->pmapVmmExt->vmxSalt;
244 /* Get VMM extensions block physical address */
245 } else {
246 return KERN_RESOURCE_SHORTAGE; /* Not enough mojo to go */
247 }
248 }
249 gpmap->pmapVmmExt = hpmap->pmapVmmExt; /* Copy VMM extension block virtual address into guest */
250 gpmap->pmapVmmExtPhys = hpmap->pmapVmmExtPhys; /* and its physical address, too */
251 gpmap->pmapFlags |= pmapVMgsaa; /* Enable GSA for this guest */
252 CEntry->vmmXAFlgs |= vmmGSA; /* Show GSA active here, too */
253
254 return KERN_SUCCESS;
255 }
256
257
258 /*-----------------------------------------------------------------------
259 ** vmm_deactivate_gsa
260 **
261 ** Deactivate guest shadow assist
262 **
263 -----------------------------------------------------------------------*/
264 static void vmm_deactivate_gsa(
265 thread_t act,
266 vmm_thread_index_t index)
267 {
268 vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
269 if (!CEntry) { /* Caller guarantees that this will work */
270 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
271 act, index);
272 return KERN_FAILURE;
273 }
274
275 pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
276 if (!gpmap) { /* Caller guarantees that this will work */
277 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
278 act, index);
279 return KERN_FAILURE;
280 }
281
282 gpmap->pmapFlags &= ~pmapVMgsaa; /* Deactivate GSA for this guest */
283 CEntry->vmmXAFlgs &= ~vmmGSA; /* Show GSA deactivated here, too */
284 }
285
286
287 /*-----------------------------------------------------------------------
288 ** vmm_flush_context
289 **
290 ** Flush specified guest context, purging all guest mappings and clearing
291 ** the context page.
292 **
293 -----------------------------------------------------------------------*/
294 static void vmm_flush_context(
295 thread_t act,
296 vmm_thread_index_t index)
297 {
298 vmmCntrlEntry *CEntry;
299 vmmCntrlTable *CTable;
300 vmm_state_page_t *vks;
301 vmm_version_t version;
302
303 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
304 if (!CEntry) { /* Caller guarantees that this will work */
305 panic("vmm_flush_context: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
306 act, index);
307 return;
308 }
309
310 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
311 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
312 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
313 }
314
315 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
316 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
317 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
318 }
319
320 vmm_unmap_all_pages(act, index); /* Blow away all mappings for this context */
321
322 CTable = act->machine.vmmControl; /* Get the control table address */
323 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
324
325 CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */
326 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
327 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
328 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
329 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
330 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
331 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
332
333 vks = CEntry->vmmContextKern; /* Get address of the context page */
334 version = vks->interface_version; /* Save the version code */
335 bzero((char *)vks, 4096); /* Clear all */
336
337 vks->interface_version = version; /* Set our version code */
338 vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */
339
340 return; /* Context is now flushed */
341 }
342
343
344 /*************************************************************************************
345 Virtual Machine Monitor Exported Functionality
346
347 The following routines are used to implement a quick-switch mechanism for
348 virtual machines that need to execute within their own processor envinroment
349 (including register and MMU state).
350 **************************************************************************************/
351
352 /*-----------------------------------------------------------------------
353 ** vmm_get_version
354 **
355 ** This function returns the current version of the virtual machine
356 ** interface. It is divided into two portions. The top 16 bits
357 ** represent the major version number, and the bottom 16 bits
358 ** represent the minor version number. Clients using the Vmm
359 ** functionality should make sure they are using a verison new
360 ** enough for them.
361 **
362 ** Inputs:
363 ** none
364 **
365 ** Outputs:
366 ** 32-bit number representing major/minor version of
367 ** the Vmm module
368 -----------------------------------------------------------------------*/
369
370 int vmm_get_version(struct savearea *save)
371 {
372 save->save_r3 = kVmmCurrentVersion; /* Return the version */
373 return 1;
374 }
375
376
377 /*-----------------------------------------------------------------------
378 ** Vmm_get_features
379 **
380 ** This function returns a set of flags that represents the functionality
381 ** supported by the current verison of the Vmm interface. Clients should
382 ** use this to determine whether they can run on this system.
383 **
384 ** Inputs:
385 ** none
386 **
387 ** Outputs:
388 ** 32-bit number representing functionality supported by this
389 ** version of the Vmm module
390 -----------------------------------------------------------------------*/
391
392 int vmm_get_features(struct savearea *save)
393 {
394 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
395 if(getPerProc()->pf.Available & pf64Bit) {
396 save->save_r3 &= ~kVmmFeature_LittleEndian; /* No little endian here */
397 save->save_r3 |= kVmmFeature_SixtyFourBit; /* Set that we can do 64-bit */
398 }
399 return 1;
400 }
401
402
403 /*-----------------------------------------------------------------------
404 ** vmm_max_addr
405 **
406 ** This function returns the maximum addressable virtual address sported
407 **
408 ** Outputs:
409 ** Returns max address
410 -----------------------------------------------------------------------*/
411
412 addr64_t vmm_max_addr(thread_t act)
413 {
414 return vm_max_address; /* Return the maximum address */
415 }
416
417 /*-----------------------------------------------------------------------
418 ** vmm_get_XA
419 **
420 ** This function retrieves the eXtended Architecture flags for the specifed VM.
421 **
422 ** We need to return the result in the return code rather than in the return parameters
423 ** because we need an architecture independent format so the results are actually
424 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
425 ** 4 for 32-bit.
426 **
427 **
428 ** Inputs:
429 ** act - pointer to current thread activation structure
430 ** index - index returned by vmm_init_context
431 **
432 ** Outputs:
433 ** Return code is set to the XA flags. If the index is invalid or the
434 ** context has not been created, we return 0.
435 -----------------------------------------------------------------------*/
436
437 unsigned int vmm_get_XA(
438 thread_t act,
439 vmm_thread_index_t index)
440 {
441 vmmCntrlEntry *CEntry;
442
443 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
444 if (CEntry == NULL) return 0; /* Either this isn't a vmm or the index is bogus */
445
446 return CEntry->vmmXAFlgs; /* Return the flags */
447 }
448
449 /*-----------------------------------------------------------------------
450 ** vmm_init_context
451 **
452 ** This function initializes an emulation context. It allocates
453 ** a new pmap (address space) and fills in the initial processor
454 ** state within the specified structure. The structure, mapped
455 ** into the client's logical address space, must be page-aligned.
456 **
457 ** Inputs:
458 ** act - pointer to current thread activation
459 ** version - requested version of the Vmm interface (allowing
460 ** future versions of the interface to change, but still
461 ** support older clients)
462 ** vmm_user_state - pointer to a logical page within the
463 ** client's address space
464 **
465 ** Outputs:
466 ** kernel return code indicating success or failure
467 -----------------------------------------------------------------------*/
468
469 int vmm_init_context(struct savearea *save)
470 {
471
472 thread_t act;
473 vmm_version_t version;
474 vmm_state_page_t * vmm_user_state;
475 vmmCntrlTable *CTable;
476 vm_offset_t conkern;
477 vmm_state_page_t * vks;
478 ppnum_t conphys;
479 kern_return_t ret;
480 int cvi, i;
481 task_t task;
482 thread_t fact, gact;
483
484 vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4); /* Get the user address of the comm area */
485 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
486 save->save_r3 = KERN_FAILURE; /* Return failure */
487 return 1;
488 }
489
490 /* Make sure that the version requested is supported */
491 version = save->save_r3; /* Pick up passed in version */
492 if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
493 save->save_r3 = KERN_FAILURE; /* Return failure */
494 return 1;
495 }
496
497 if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */
498 save->save_r3 = KERN_FAILURE; /* Return failure */
499 return 1;
500 }
501
502 act = current_thread(); /* Pick up our activation */
503
504 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
505
506 task = current_task(); /* Figure out who we are */
507
508 task_lock(task); /* Lock our task */
509
510 fact = (thread_t)task->threads.next; /* Get the first activation on task */
511 gact = 0; /* Pretend we didn't find it yet */
512
513 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
514 if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */
515 gact = fact; /* Yeah... */
516 break; /* Bail the loop... */
517 }
518 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
519 }
520
521
522 /*
523 * We only allow one thread per task to be a virtual machine monitor right now. This solves
524 * a number of potential problems that I can't put my finger on right now.
525 *
526 * Utlimately, I think we want to move the controls and make all this task based instead of
527 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
528 * VM (if they want) rather than hand dispatch contexts.
529 */
530
531 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
532 task_unlock(task); /* Release task lock */
533 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
534 save->save_r3 = KERN_FAILURE; /* We must play alone... */
535 return 1;
536 }
537
538 if(!gact) act->machine.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
539
540 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
541
542 CTable = act->machine.vmmControl; /* Get the control table address */
543 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
544 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
545 act->machine.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
546 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
547 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
548 return 1;
549 }
550
551 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
552 act->machine.vmmControl = CTable; /* Initialize the table anchor */
553 }
554
555 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
556 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
557 }
558
559 if(cvi >= kVmmMaxContexts) { /* Did we find one? */
560 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
561 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
562 return 1;
563 }
564
565 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
566 act->map,
567 (vm_offset_t)vmm_user_state,
568 (vm_offset_t)vmm_user_state + PAGE_SIZE,
569 VM_PROT_READ | VM_PROT_WRITE,
570 FALSE);
571
572 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
573 goto return_in_shame;
574
575 /* Map the vmm state into the kernel's address space. */
576 conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state));
577
578 /* Find a virtual address to use. */
579 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
580 if (ret != KERN_SUCCESS) { /* Did we find an address? */
581 (void) vm_map_unwire(act->map, /* No, unwire the context area */
582 (vm_offset_t)vmm_user_state,
583 (vm_offset_t)vmm_user_state + PAGE_SIZE,
584 TRUE);
585 goto return_in_shame;
586 }
587
588 /* Map it into the kernel's address space. */
589
590 pmap_enter(kernel_pmap, conkern, conphys,
591 VM_PROT_READ | VM_PROT_WRITE,
592 VM_WIMG_USE_DEFAULT, TRUE);
593
594 /* Clear the vmm state structure. */
595 vks = (vmm_state_page_t *)conkern;
596 bzero((char *)vks, PAGE_SIZE);
597
598
599 /* We're home free now. Simply fill in the necessary info and return. */
600
601 vks->interface_version = version; /* Set our version code */
602 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
603
604 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
605 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
606 CTable->vmmc[cvi].vmmContextPhys = conphys; /* Remember the state page physical addr */
607 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
608
609 CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */
610 CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
611 CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
612 CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */
613 CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
614 CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
615 CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */
616
617 hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */
618
619 pmap_t hpmap = act->map->pmap; /* Get host pmap */
620 pmap_t gpmap = pmap_create(0); /* Make a fresh guest pmap */
621 if (gpmap) { /* Did we succeed ? */
622 CTable->vmmAdsp[cvi] = gpmap; /* Remember guest pmap for new context */
623 if (lowGlo.lgVMMforcedFeats & vmmGSA) { /* Forcing on guest shadow assist ? */
624 vmm_activate_gsa(act, cvi+1); /* Activate GSA */
625 }
626 } else {
627 ret = KERN_RESOURCE_SHORTAGE; /* We've failed to allocate a guest pmap */
628 goto return_in_shame; /* Shame on us. */
629 }
630
631 if (!(hpmap->pmapFlags & pmapVMhost)) { /* Do this stuff if this is our first time hosting */
632 hpmap->pmapFlags |= pmapVMhost; /* We're now hosting */
633 }
634
635 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
636 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
637 return 1;
638
639 return_in_shame:
640 if(!gact) kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
641 act->machine.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
642 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
643 save->save_r3 = ret; /* Pass back return code... */
644 return 1;
645
646 }
647
648
649 /*-----------------------------------------------------------------------
650 ** vmm_tear_down_context
651 **
652 ** This function uninitializes an emulation context. It deallocates
653 ** internal resources associated with the context block.
654 **
655 ** Inputs:
656 ** act - pointer to current thread activation structure
657 ** index - index returned by vmm_init_context
658 **
659 ** Outputs:
660 ** kernel return code indicating success or failure
661 **
662 ** Strangeness note:
663 ** This call will also trash the address space with the same ID. While this
664 ** is really not too cool, we have to do it because we need to make
665 ** sure that old VMM users (not that we really have any) who depend upon
666 ** the address space going away with the context still work the same.
667 -----------------------------------------------------------------------*/
668
669 kern_return_t vmm_tear_down_context(
670 thread_t act,
671 vmm_thread_index_t index)
672 {
673 vmmCntrlEntry *CEntry;
674 vmmCntrlTable *CTable;
675 int cvi;
676 register savearea *sv;
677
678 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
679 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
680
681 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
682
683 hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */
684
685 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
686 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
687 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
688 }
689
690 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
691 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
692 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
693 }
694
695 CEntry->vmmPmap = 0; /* Remove this trace */
696 pmap_t gpmap = act->machine.vmmControl->vmmAdsp[index - 1];
697 /* Get context's guest pmap (if any) */
698 if (gpmap) { /* Check if there is an address space assigned here */
699 if (gpmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist case specially */
700 hw_rem_all_gv(gpmap); /* Remove all guest mappings from shadow hash table */
701 } else {
702 mapping_remove(gpmap, 0xFFFFFFFFFFFFF000LL);/* Remove final page explicitly because we might have mapped it */
703 pmap_remove(gpmap, 0, 0xFFFFFFFFFFFFF000LL);/* Remove all entries from this map */
704 }
705 pmap_destroy(gpmap); /* Toss the pmap for this context */
706 act->machine.vmmControl->vmmAdsp[index - 1] = NULL; /* Clean it up */
707 }
708
709 (void) vm_map_unwire( /* Unwire the user comm page */
710 act->map,
711 (vm_offset_t)CEntry->vmmContextUser,
712 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
713 FALSE);
714
715 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
716
717 CTable = act->machine.vmmControl; /* Get the control table address */
718 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
719
720 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
721 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
722 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
723
724 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
725 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
726 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
727 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
728 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
729 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
730 CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */
731
732 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
733 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
734 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
735 return KERN_SUCCESS; /* Leave... */
736 }
737 }
738
739 /*
740 * When we have tossed the last context, toss any address spaces left over before releasing
741 * the VMM control block
742 */
743
744 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
745 if(!act->machine.vmmControl->vmmAdsp[index - 1]) continue; /* Nothing to remove here */
746 mapping_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
747 pmap_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
748 pmap_destroy(act->machine.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */
749 act->machine.vmmControl->vmmAdsp[index - 1] = 0; /* Clear just in case */
750 }
751
752 pmap_t pmap = act->map->pmap; /* Get our pmap */
753 if (pmap->pmapVmmExt) { /* Release any VMM pmap extension block and shadow hash table */
754 vmm_release_shadow_hash(pmap->pmapVmmExt); /* Release extension block and shadow hash table */
755 pmap->pmapVmmExt = 0; /* Forget extension block */
756 pmap->pmapVmmExtPhys = 0; /* Forget extension block's physical address, too */
757 }
758 pmap->pmapFlags &= ~pmapVMhost; /* We're no longer hosting */
759
760 kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
761 act->machine.vmmControl = 0; /* Unmark us as vmm */
762
763 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
764
765 return KERN_SUCCESS;
766 }
767
768
769 /*-----------------------------------------------------------------------
770 ** vmm_activate_XA
771 **
772 ** This function activates the eXtended Architecture flags for the specifed VM.
773 **
774 ** We need to return the result in the return code rather than in the return parameters
775 ** because we need an architecture independent format so the results are actually
776 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
777 ** 4 for 32-bit.
778 **
779 ** Note that this function does a lot of the same stuff as vmm_tear_down_context
780 ** and vmm_init_context.
781 **
782 ** Inputs:
783 ** act - pointer to current thread activation structure
784 ** index - index returned by vmm_init_context
785 ** flags - the extended architecture flags
786 **
787 **
788 ** Outputs:
789 ** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
790 ** Also, the internal flags are set and, additionally, the VM is completely reset.
791 -----------------------------------------------------------------------*/
792 kern_return_t vmm_activate_XA(
793 thread_t act,
794 vmm_thread_index_t index,
795 unsigned int xaflags)
796 {
797 vmmCntrlEntry *CEntry;
798 kern_return_t result = KERN_SUCCESS; /* Assume success */
799
800 if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (!getPerProc()->pf.Available & pf64Bit)))
801 return (KERN_FAILURE); /* Unknown or unsupported feature requested */
802
803 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
804 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
805
806 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
807
808 vmm_flush_context(act, index); /* Flush the context */
809
810 if (xaflags & vmm64Bit) { /* Activating 64-bit mode ? */
811 CEntry->vmmXAFlgs |= vmm64Bit; /* Activate 64-bit mode */
812 }
813
814 if (xaflags & vmmGSA) { /* Activating guest shadow assist ? */
815 result = vmm_activate_gsa(act, index); /* Activate guest shadow assist */
816 }
817
818 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
819
820 return result; /* Return activate result */
821 }
822
823 /*-----------------------------------------------------------------------
824 ** vmm_deactivate_XA
825 **
826 -----------------------------------------------------------------------*/
827 kern_return_t vmm_deactivate_XA(
828 thread_t act,
829 vmm_thread_index_t index,
830 unsigned int xaflags)
831 {
832 vmmCntrlEntry *CEntry;
833 kern_return_t result = KERN_SUCCESS; /* Assume success */
834
835 if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (getPerProc()->pf.Available & pf64Bit)))
836 return (KERN_FAILURE); /* Unknown or unsupported feature requested */
837
838 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
839 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
840
841 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
842
843 vmm_flush_context(act, index); /* Flush the context */
844
845 if (xaflags & vmm64Bit) { /* Deactivating 64-bit mode ? */
846 CEntry->vmmXAFlgs &= ~vmm64Bit; /* Deactivate 64-bit mode */
847 }
848
849 if (xaflags & vmmGSA) { /* Deactivating guest shadow assist ? */
850 vmm_deactivate_gsa(act, index); /* Deactivate guest shadow assist */
851 }
852
853 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
854
855 return result; /* Return deactivate result */
856 }
857
858
859 /*-----------------------------------------------------------------------
860 ** vmm_tear_down_all
861 **
862 ** This function uninitializes all emulation contexts. If there are
863 ** any vmm contexts, it calls vmm_tear_down_context for each one.
864 **
865 ** Note: this can also be called from normal thread termination. Because of
866 ** that, we will context switch out of an alternate if we are currenty in it.
867 ** It will be terminated with no valid return code set because we don't expect
868 ** the activation to ever run again.
869 **
870 ** Inputs:
871 ** activation to tear down
872 **
873 ** Outputs:
874 ** All vmm contexts released and VMM shut down
875 -----------------------------------------------------------------------*/
876 void vmm_tear_down_all(thread_t act) {
877
878 vmmCntrlTable *CTable;
879 int cvi;
880 kern_return_t ret;
881 savearea *save;
882 spl_t s;
883
884 if(act->machine.specFlags & runningVM) { /* Are we actually in a context right now? */
885 save = find_user_regs(act); /* Find the user state context */
886 if(!save) { /* Did we find it? */
887 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
888 return;
889 }
890
891 save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */
892 s = splhigh(); /* Make sure interrupts are off */
893 vmm_force_exit(act, save); /* Force and exit from VM state */
894 splx(s); /* Restore interrupts */
895 }
896
897 if(CTable = act->machine.vmmControl) { /* Do we have a vmm control block? */
898
899
900 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
901 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
902 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
903 if(ret != KERN_SUCCESS) { /* Did it go away? */
904 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
905 ret, act, cvi);
906 }
907 }
908 }
909
910 /*
911 * Note that all address apces should be gone here.
912 */
913 if(act->machine.vmmControl) { /* Did we find one? */
914 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
915 }
916 }
917
918 return;
919 }
920
921 /*-----------------------------------------------------------------------
922 ** vmm_map_page
923 **
924 ** This function maps a page from within the client's logical
925 ** address space into the alternate address space.
926 **
927 ** The page need not be locked or resident. If not resident, it will be faulted
928 ** in by this code, which may take some time. Also, if the page is not locked,
929 ** it, and this mapping may disappear at any time, even before it gets used. Note also
930 ** that reference and change information is NOT preserved when a page is unmapped, either
931 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
932 ** space). This means that if RC is needed, the page MUST be wired.
933 **
934 ** Note that if there is already a mapping at the address, it is removed and all
935 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
936 ** if the map call fails, the old address is still unmapped..
937 **
938 ** Inputs:
939 ** act - pointer to current thread activation
940 ** index - index of address space to map into
941 ** va - virtual address within the client's address
942 ** space
943 ** ava - virtual address within the alternate address
944 ** space
945 ** prot - protection flags
946 **
947 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
948 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
949 **
950 ** Input conditions:
951 ** Interrupts disabled (from fast trap)
952 **
953 ** Outputs:
954 ** kernel return code indicating success or failure
955 ** if success, va resident and alternate mapping made
956 -----------------------------------------------------------------------*/
957
958 kern_return_t vmm_map_page(
959 thread_t act,
960 vmm_adsp_id_t index,
961 addr64_t cva,
962 addr64_t ava,
963 vm_prot_t prot)
964 {
965 kern_return_t ret;
966 register mapping_t *mp;
967 vm_map_t map;
968 addr64_t ova, nextva;
969 pmap_t pmap;
970
971 pmap = vmm_get_adsp(act, index); /* Get the guest pmap for this address space */
972 if(!pmap) return KERN_FAILURE; /* Bogus address space, no VMs, or we can't make a pmap, failure... */
973
974 if(ava > vm_max_address) return kVmmInvalidAddress; /* Does the machine support an address of this size? */
975
976 map = current_thread()->map; /* Get the host's map */
977
978 if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist active ? */
979 ret = hw_res_map_gv(map->pmap, pmap, cva, ava, getProtPPC(prot));
980 /* Attempt to resume an existing gv->phys mapping */
981 if (mapRtOK != ret) { /* Nothing to resume, construct a new mapping */
982
983 while (1) { /* Find host mapping or fail */
984 mp = mapping_find(map->pmap, cva, &nextva, 0);
985 /* Attempt to find host mapping and pin it */
986 if (mp) break; /* Got it */
987
988 ml_set_interrupts_enabled(TRUE);
989 /* Open 'rupt window */
990 ret = vm_fault(map, /* Didn't find it, try to fault in host page read/write */
991 vm_map_trunc_page(cva),
992 VM_PROT_READ | VM_PROT_WRITE,
993 FALSE, /* change wiring */
994 THREAD_UNINT,
995 NULL,
996 0);
997 ml_set_interrupts_enabled(FALSE);
998 /* Close 'rupt window */
999 if (ret != KERN_SUCCESS)
1000 return KERN_FAILURE; /* Fault failed, return failure */
1001 }
1002
1003 if (mpNormal != (mp->mpFlags & mpType)) {
1004 /* Host mapping must be a vanilla page */
1005 mapping_drop_busy(mp); /* Un-pin host mapping */
1006 return KERN_FAILURE; /* Return failure */
1007 }
1008
1009 /* Partially construct gv->phys mapping */
1010 unsigned int pindex;
1011 phys_entry_t *physent = mapping_phys_lookup(mp->mpPAddr, &pindex);
1012 if (!physent) {
1013 mapping_drop_busy(mp);
1014 return KERN_FAILURE;
1015 }
1016 unsigned int pattr = ((physent->ppLink & (ppI | ppG)) >> 60);
1017 unsigned int wimg = 0x2;
1018 if (pattr & mmFlgCInhib) wimg |= 0x4;
1019 if (pattr & mmFlgGuarded) wimg |= 0x1;
1020 unsigned int mflags = (pindex << 16) | mpGuest;
1021 addr64_t gva = ((ava & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot));
1022
1023 hw_add_map_gv(map->pmap, pmap, gva, mflags, mp->mpPAddr);
1024 /* Construct new guest->phys mapping */
1025
1026 mapping_drop_busy(mp); /* Un-pin host mapping */
1027 }
1028 } else {
1029 while(1) { /* Keep trying until we get it or until we fail */
1030
1031 mp = mapping_find(map->pmap, cva, &nextva, 0); /* Find the mapping for this address */
1032
1033 if(mp) break; /* We found it */
1034
1035 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
1036 ret = vm_fault(map, /* Didn't find it, try to fault it in read/write... */
1037 vm_map_trunc_page(cva),
1038 VM_PROT_READ | VM_PROT_WRITE,
1039 FALSE, /*change wiring */
1040 THREAD_UNINT,
1041 NULL,
1042 0);
1043 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
1044 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
1045 }
1046
1047 if((mp->mpFlags & mpType) != mpNormal) { /* If this is a block, a nest, or some other special thing, we can't map it */
1048 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1049 return KERN_FAILURE; /* Leave in shame */
1050 }
1051
1052 while(1) { /* Keep trying the enter until it goes in */
1053 ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */
1054 if(!ova) break; /* If there were no collisions, we are done... */
1055 mapping_remove(pmap, ova); /* Remove the mapping that collided */
1056 }
1057
1058 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1059 }
1060
1061 if (!((getPerProc()->spcFlags) & FamVMmode)) {
1062 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1063 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
1064 }
1065
1066 return KERN_SUCCESS;
1067 }
1068
1069
1070 /*-----------------------------------------------------------------------
1071 ** vmm_map_execute
1072 **
1073 ** This function maps a page from within the client's logical
1074 ** address space into the alternate address space of the
1075 ** Virtual Machine Monitor context and then directly starts executing.
1076 **
1077 ** See description of vmm_map_page for details.
1078 **
1079 ** Inputs:
1080 ** Index is used for both the context and the address space ID.
1081 ** index[24:31] is the context id and index[16:23] is the address space.
1082 ** if the address space ID is 0, the context ID is used for it.
1083 **
1084 ** Outputs:
1085 ** Normal exit is to run the VM. Abnormal exit is triggered via a
1086 ** non-KERN_SUCCESS return from vmm_map_page or later during the
1087 ** attempt to transition into the VM.
1088 -----------------------------------------------------------------------*/
1089
1090 vmm_return_code_t vmm_map_execute(
1091 thread_t act,
1092 vmm_thread_index_t index,
1093 addr64_t cva,
1094 addr64_t ava,
1095 vm_prot_t prot)
1096 {
1097 kern_return_t ret;
1098 vmmCntrlEntry *CEntry;
1099 unsigned int adsp;
1100 vmm_thread_index_t cndx;
1101
1102 cndx = index & 0xFF; /* Clean it up */
1103
1104 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
1105 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1106
1107 if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
1108 return kVmmBogusContext; /* Yes, invalid index in Fam */
1109
1110 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1111 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
1112
1113 ret = vmm_map_page(act, adsp, cva, ava, prot); /* Go try to map the page on in */
1114
1115
1116 if(ret == KERN_SUCCESS) {
1117 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1118 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
1119 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
1120 }
1121
1122 return ret; /* We had trouble mapping in the page */
1123
1124 }
1125
1126 /*-----------------------------------------------------------------------
1127 ** vmm_map_list
1128 **
1129 ** This function maps a list of pages into various address spaces
1130 **
1131 ** Inputs:
1132 ** act - pointer to current thread activation
1133 ** index - index of default address space (used if not specifed in list entry
1134 ** count - number of pages to release
1135 ** flavor - 0 if 32-bit version, 1 if 64-bit
1136 ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
1137 **
1138 ** Outputs:
1139 ** kernel return code indicating success or failure
1140 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1141 ** or the vmm_map_page call fails.
1142 ** We return kVmmInvalidAddress if virtual address size is not supported
1143 -----------------------------------------------------------------------*/
1144
1145 kern_return_t vmm_map_list(
1146 thread_t act,
1147 vmm_adsp_id_t index,
1148 unsigned int cnt,
1149 unsigned int flavor)
1150 {
1151 vmmCntrlEntry *CEntry;
1152 boolean_t ret;
1153 unsigned int i;
1154 vmmMList *lst;
1155 vmmMList64 *lstx;
1156 addr64_t cva;
1157 addr64_t ava;
1158 vm_prot_t prot;
1159 vmm_adsp_id_t adsp;
1160
1161 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1162 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
1163
1164 if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */
1165 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
1166
1167 lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
1168 lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
1169
1170 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
1171 if(flavor) { /* Check if 32- or 64-bit addresses */
1172 cva = lstx[i].vmlva; /* Get the 64-bit actual address */
1173 ava = lstx[i].vmlava; /* Get the 64-bit guest address */
1174 }
1175 else {
1176 cva = lst[i].vmlva; /* Get the 32-bit actual address */
1177 ava = lst[i].vmlava; /* Get the 32-bit guest address */
1178 }
1179
1180 prot = ava & vmmlProt; /* Extract the protection bits */
1181 adsp = (ava & vmmlAdID) >> 4; /* Extract an explicit address space request */
1182 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
1183 ava = ava &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1184
1185 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
1186 if(ret != KERN_SUCCESS) return ret; /* Bail if any error */
1187 }
1188
1189 return KERN_SUCCESS ; /* Return... */
1190 }
1191
1192 /*-----------------------------------------------------------------------
1193 ** vmm_get_page_mapping
1194 **
1195 ** Given a context index and a guest virtual address, convert the address
1196 ** to its corresponding host virtual address.
1197 **
1198 ** Inputs:
1199 ** act - pointer to current thread activation
1200 ** index - context index
1201 ** gva - guest virtual address
1202 **
1203 ** Outputs:
1204 ** Host virtual address (page aligned) or -1 if not mapped or any failure
1205 **
1206 ** Note:
1207 ** If the host address space contains multiple virtual addresses mapping
1208 ** to the physical address corresponding to the specified guest virtual
1209 ** address (i.e., host virtual aliases), it is unpredictable which host
1210 ** virtual address (alias) will be returned. Moral of the story: No host
1211 ** virtual aliases.
1212 -----------------------------------------------------------------------*/
1213
1214 addr64_t vmm_get_page_mapping(
1215 thread_t act,
1216 vmm_adsp_id_t index,
1217 addr64_t gva)
1218 {
1219 register mapping_t *mp;
1220 pmap_t pmap;
1221 addr64_t nextva, hva;
1222 ppnum_t pa;
1223
1224 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
1225 if (!pmap)return -1; /* No good, failure... */
1226
1227 if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist (GSA) active ? */
1228 return (hw_gva_to_hva(pmap, gva)); /* Convert guest to host virtual address */
1229 } else {
1230 mp = mapping_find(pmap, gva, &nextva, 0); /* Find guest mapping for this virtual address */
1231
1232 if(!mp) return -1; /* Not mapped, return -1 */
1233
1234 pa = mp->mpPAddr; /* Remember the physical page address */
1235
1236 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
1237
1238 pmap = current_thread()->map->pmap; /* Get the host pmap */
1239 hva = mapping_p2v(pmap, pa); /* Now find the source virtual */
1240
1241 if(hva != 0) return hva; /* We found it... */
1242
1243 panic("vmm_get_page_mapping: could not back-map guest va (%016llX)\n", gva);
1244 /* We are bad wrong if we can't find it */
1245
1246 return -1; /* Never executed, prevents compiler warning */
1247 }
1248 }
1249
1250 /*-----------------------------------------------------------------------
1251 ** vmm_unmap_page
1252 **
1253 ** This function unmaps a page from the guest address space.
1254 **
1255 ** Inputs:
1256 ** act - pointer to current thread activation
1257 ** index - index of vmm state for this page
1258 ** va - virtual address within the vmm's address
1259 ** space
1260 **
1261 ** Outputs:
1262 ** kernel return code indicating success or failure
1263 -----------------------------------------------------------------------*/
1264
1265 kern_return_t vmm_unmap_page(
1266 thread_t act,
1267 vmm_adsp_id_t index,
1268 addr64_t va)
1269 {
1270 vmmCntrlEntry *CEntry;
1271 addr64_t nadd;
1272 pmap_t pmap;
1273
1274 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
1275 if (!pmap)return -1; /* No good, failure... */
1276
1277 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1278 hw_susp_map_gv(act->map->pmap, pmap, va); /* Suspend the mapping */
1279 return (KERN_SUCCESS); /* Always returns success */
1280 } else {
1281 nadd = mapping_remove(pmap, va); /* Toss the mapping */
1282
1283 return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS); /* Return... */
1284 }
1285 }
1286
1287 /*-----------------------------------------------------------------------
1288 ** vmm_unmap_list
1289 **
1290 ** This function unmaps a list of pages from the alternate's logical
1291 ** address space.
1292 **
1293 ** Inputs:
1294 ** act - pointer to current thread activation
1295 ** index - index of vmm state for this page
1296 ** count - number of pages to release
1297 ** flavor - 0 if 32-bit, 1 if 64-bit
1298 ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
1299 **
1300 ** Outputs:
1301 ** kernel return code indicating success or failure
1302 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1303 -----------------------------------------------------------------------*/
1304
1305 kern_return_t vmm_unmap_list(
1306 thread_t act,
1307 vmm_adsp_id_t index,
1308 unsigned int cnt,
1309 unsigned int flavor)
1310 {
1311 vmmCntrlEntry *CEntry;
1312 boolean_t ret;
1313 kern_return_t kern_result = KERN_SUCCESS;
1314 unsigned int *pgaddr, i;
1315 addr64_t gva;
1316 vmmUMList *lst;
1317 vmmUMList64 *lstx;
1318 pmap_t pmap;
1319 int adsp;
1320
1321 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1322 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
1323
1324 if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */
1325 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
1326
1327 lst = (vmmUMList *)lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
1328
1329 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
1330 if(flavor) { /* Check if 32- or 64-bit addresses */
1331 gva = lstx[i].vmlava; /* Get the 64-bit guest address */
1332 }
1333 else {
1334 gva = lst[i].vmlava; /* Get the 32-bit guest address */
1335 }
1336
1337 adsp = (gva & vmmlAdID) >> 4; /* Extract an explicit address space request */
1338 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
1339 pmap = act->machine.vmmControl->vmmAdsp[adsp]; /* Get the pmap for this request */
1340 if(!pmap) continue; /* Ain't nuthin' mapped here, no durn map... */
1341
1342 gva = gva &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1343 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1344 hw_susp_map_gv(act->map->pmap, pmap, gva);
1345 /* Suspend the mapping */
1346 } else {
1347 (void)mapping_remove(pmap, gva); /* Toss the mapping */
1348 }
1349 }
1350
1351 return KERN_SUCCESS ; /* Return... */
1352 }
1353
1354 /*-----------------------------------------------------------------------
1355 ** vmm_unmap_all_pages
1356 **
1357 ** This function unmaps all pages from the alternates's logical
1358 ** address space.
1359 **
1360 ** Inputs:
1361 ** act - pointer to current thread activation
1362 ** index - index of context state
1363 **
1364 ** Outputs:
1365 ** none
1366 **
1367 ** Note:
1368 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
1369 -----------------------------------------------------------------------*/
1370
1371 void vmm_unmap_all_pages(
1372 thread_t act,
1373 vmm_adsp_id_t index)
1374 {
1375 vmmCntrlEntry *CEntry;
1376 pmap_t pmap;
1377
1378 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1379 if (!pmap) return; /* Either this isn't vmm thread or the index is bogus */
1380
1381 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1382 hw_rem_all_gv(pmap); /* Remove all guest's mappings from shadow hash table */
1383 } else {
1384 /*
1385 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
1386 */
1387 mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
1388 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
1389 }
1390 return;
1391 }
1392
1393
1394 /*-----------------------------------------------------------------------
1395 ** vmm_get_page_dirty_flag
1396 **
1397 ** This function returns the changed flag of the page
1398 ** and optionally clears clears the flag.
1399 **
1400 ** Inputs:
1401 ** act - pointer to current thread activation
1402 ** index - index of vmm state for this page
1403 ** va - virtual address within the vmm's address
1404 ** space
1405 ** reset - Clears dirty if true, untouched if not
1406 **
1407 ** Outputs:
1408 ** the dirty bit
1409 ** clears the dirty bit in the pte if requested
1410 **
1411 ** Note:
1412 ** The RC bits are merged into the global physical entry
1413 -----------------------------------------------------------------------*/
1414
1415 boolean_t vmm_get_page_dirty_flag(
1416 thread_t act,
1417 vmm_adsp_id_t index,
1418 addr64_t va,
1419 unsigned int reset)
1420 {
1421 vmmCntrlEntry *CEntry;
1422 register mapping_t *mpv, *mp;
1423 unsigned int RC;
1424 pmap_t pmap;
1425
1426 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1427 if (!pmap) return 1; /* Either this isn't vmm thread or the index is bogus */
1428
1429 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1430 RC = hw_test_rc_gv(act->map->pmap, pmap, va, reset);/* Fetch the RC bits and clear if requested */
1431 } else {
1432 RC = hw_test_rc(pmap, (addr64_t)va, reset); /* Fetch the RC bits and clear if requested */
1433 }
1434
1435 switch (RC & mapRetCode) { /* Decode return code */
1436
1437 case mapRtOK: /* Changed */
1438 return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */
1439 break;
1440
1441 case mapRtNotFnd: /* Didn't find it */
1442 return 1; /* Return dirty */
1443 break;
1444
1445 default:
1446 panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC, pmap, va);
1447
1448 }
1449
1450 return 1; /* Return the change bit */
1451 }
1452
1453
1454 /*-----------------------------------------------------------------------
1455 ** vmm_protect_page
1456 **
1457 ** This function sets the protection bits of a mapped page
1458 **
1459 ** Inputs:
1460 ** act - pointer to current thread activation
1461 ** index - index of vmm state for this page
1462 ** va - virtual address within the vmm's address
1463 ** space
1464 ** prot - Protection flags
1465 **
1466 ** Outputs:
1467 ** none
1468 ** Protection bits of the mapping are modifed
1469 **
1470 -----------------------------------------------------------------------*/
1471
1472 kern_return_t vmm_protect_page(
1473 thread_t act,
1474 vmm_adsp_id_t index,
1475 addr64_t va,
1476 vm_prot_t prot)
1477 {
1478 vmmCntrlEntry *CEntry;
1479 addr64_t nextva;
1480 int ret;
1481 pmap_t pmap;
1482
1483 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1484 if (!pmap) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1485
1486 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1487 ret = hw_protect_gv(pmap, va, prot); /* Try to change protection, GSA varient */
1488 } else {
1489 ret = hw_protect(pmap, va, prot, &nextva); /* Try to change protection */
1490 }
1491
1492 switch (ret) { /* Decode return code */
1493
1494 case mapRtOK: /* All ok... */
1495 break; /* Outta here */
1496
1497 case mapRtNotFnd: /* Didn't find it */
1498 return KERN_SUCCESS; /* Ok, return... */
1499 break;
1500
1501 default:
1502 panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, (addr64_t)va);
1503
1504 }
1505
1506 if (!((getPerProc()->spcFlags) & FamVMmode)) {
1507 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1508 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
1509 }
1510
1511 return KERN_SUCCESS; /* Return */
1512 }
1513
1514
1515 /*-----------------------------------------------------------------------
1516 ** vmm_protect_execute
1517 **
1518 ** This function sets the protection bits of a mapped page
1519 ** and then directly starts executing.
1520 **
1521 ** See description of vmm_protect_page for details
1522 **
1523 ** Inputs:
1524 ** See vmm_protect_page and vmm_map_execute
1525 **
1526 ** Outputs:
1527 ** Normal exit is to run the VM. Abnormal exit is triggered via a
1528 ** non-KERN_SUCCESS return from vmm_map_page or later during the
1529 ** attempt to transition into the VM.
1530 -----------------------------------------------------------------------*/
1531
1532 vmm_return_code_t vmm_protect_execute(
1533 thread_t act,
1534 vmm_thread_index_t index,
1535 addr64_t va,
1536 vm_prot_t prot)
1537 {
1538 kern_return_t ret;
1539 vmmCntrlEntry *CEntry;
1540 unsigned int adsp;
1541 vmm_thread_index_t cndx;
1542
1543 cndx = index & 0xFF; /* Clean it up */
1544 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
1545 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1546
1547 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1548 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
1549
1550 if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
1551 return kVmmBogusContext; /* Yes, invalid index in Fam */
1552
1553 ret = vmm_protect_page(act, adsp, va, prot); /* Go try to change access */
1554
1555 if(ret == KERN_SUCCESS) {
1556 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1557 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
1558 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
1559 }
1560
1561 return ret; /* We had trouble of some kind (shouldn't happen) */
1562
1563 }
1564
1565
1566 /*-----------------------------------------------------------------------
1567 ** vmm_get_float_state
1568 **
1569 ** This function causes the current floating point state to
1570 ** be saved into the shared context area. It also clears the
1571 ** vmmFloatCngd changed flag.
1572 **
1573 ** Inputs:
1574 ** act - pointer to current thread activation structure
1575 ** index - index returned by vmm_init_context
1576 **
1577 ** Outputs:
1578 ** context saved
1579 -----------------------------------------------------------------------*/
1580
1581 kern_return_t vmm_get_float_state(
1582 thread_t act,
1583 vmm_thread_index_t index)
1584 {
1585 vmmCntrlEntry *CEntry;
1586 vmmCntrlTable *CTable;
1587 int i;
1588 register struct savearea_fpu *sv;
1589
1590 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1591 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1592
1593 act->machine.specFlags &= ~floatCng; /* Clear the special flag */
1594 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
1595
1596 fpu_save(&CEntry->vmmFacCtx); /* Save context if live */
1597
1598 if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */
1599 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */
1600 return KERN_SUCCESS;
1601 }
1602
1603
1604 for(i = 0; i < 32; i++) { /* Initialize floating points */
1605 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
1606 }
1607
1608 return KERN_SUCCESS;
1609 }
1610
1611 /*-----------------------------------------------------------------------
1612 ** vmm_get_vector_state
1613 **
1614 ** This function causes the current vector state to
1615 ** be saved into the shared context area. It also clears the
1616 ** vmmVectorCngd changed flag.
1617 **
1618 ** Inputs:
1619 ** act - pointer to current thread activation structure
1620 ** index - index returned by vmm_init_context
1621 **
1622 ** Outputs:
1623 ** context saved
1624 -----------------------------------------------------------------------*/
1625
1626 kern_return_t vmm_get_vector_state(
1627 thread_t act,
1628 vmm_thread_index_t index)
1629 {
1630 vmmCntrlEntry *CEntry;
1631 vmmCntrlTable *CTable;
1632 int i, j;
1633 unsigned int vrvalidwrk;
1634 register struct savearea_vec *sv;
1635
1636 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1637 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1638
1639 vec_save(&CEntry->vmmFacCtx); /* Save context if live */
1640
1641 act->machine.specFlags &= ~vectorCng; /* Clear the special flag */
1642 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
1643
1644 if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */
1645
1646 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
1647
1648 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
1649 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
1650 for(j = 0; j < 4; j++) { /* If so, copy it over */
1651 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
1652 }
1653 }
1654 else {
1655 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
1656 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
1657 }
1658 }
1659
1660 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
1661
1662 }
1663
1664 return KERN_SUCCESS;
1665 }
1666
1667 for(i = 0; i < 32; i++) { /* Initialize vector registers */
1668 for(j=0; j < 4; j++) { /* Do words */
1669 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
1670 }
1671 }
1672
1673 return KERN_SUCCESS;
1674 }
1675
1676 /*-----------------------------------------------------------------------
1677 ** vmm_set_timer
1678 **
1679 ** This function causes a timer (in AbsoluteTime) for a specific time
1680 ** to be set It also clears the vmmTimerPop flag if the timer is actually
1681 ** set, it is cleared otherwise.
1682 **
1683 ** A timer is cleared by setting setting the time to 0. This will clear
1684 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
1685 ** current time clears the internal timer request, but leaves the
1686 ** vmmTimerPop flag set.
1687 **
1688 **
1689 ** Inputs:
1690 ** act - pointer to current thread activation structure
1691 ** index - index returned by vmm_init_context
1692 ** timerhi - high order word of AbsoluteTime to pop
1693 ** timerlo - low order word of AbsoluteTime to pop
1694 **
1695 ** Outputs:
1696 ** timer set, vmmTimerPop cleared
1697 -----------------------------------------------------------------------*/
1698
1699 kern_return_t vmm_set_timer(
1700 thread_t act,
1701 vmm_thread_index_t index,
1702 unsigned int timerhi,
1703 unsigned int timerlo)
1704 {
1705 vmmCntrlEntry *CEntry;
1706
1707 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1708 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1709
1710 CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
1711
1712 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
1713 return KERN_SUCCESS; /* Leave now... */
1714 }
1715
1716
1717 /*-----------------------------------------------------------------------
1718 ** vmm_get_timer
1719 **
1720 ** This function causes the timer for a specified VM to be
1721 ** returned in return_params[0] and return_params[1].
1722 ** Note that this is kind of funky for 64-bit VMs because we
1723 ** split the timer into two parts so that we still set parms 0 and 1.
1724 ** Obviously, we don't need to do this because the parms are 8 bytes
1725 ** wide.
1726 **
1727 **
1728 ** Inputs:
1729 ** act - pointer to current thread activation structure
1730 ** index - index returned by vmm_init_context
1731 **
1732 ** Outputs:
1733 ** Timer value set in return_params[0] and return_params[1].
1734 ** Set to 0 if timer is not set.
1735 -----------------------------------------------------------------------*/
1736
1737 kern_return_t vmm_get_timer(
1738 thread_t act,
1739 vmm_thread_index_t index)
1740 {
1741 vmmCntrlEntry *CEntry;
1742 vmmCntrlTable *CTable;
1743
1744 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1745 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1746
1747 if(CEntry->vmmXAFlgs & vmm64Bit) { /* A 64-bit virtual machine? */
1748 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32); /* Return the last timer value */
1749 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1750 }
1751 else {
1752 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */
1753 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1754 }
1755 return KERN_SUCCESS;
1756 }
1757
1758
1759 /*-----------------------------------------------------------------------
1760 ** vmm_timer_pop
1761 **
1762 ** This function causes all timers in the array of VMs to be updated.
1763 ** All appropriate flags are set or reset. If a VM is currently
1764 ** running and its timer expired, it is intercepted.
1765 **
1766 ** The qactTimer value is set to the lowest unexpired timer. It is
1767 ** zeroed if all timers are expired or have been reset.
1768 **
1769 ** Inputs:
1770 ** act - pointer to current thread activation structure
1771 **
1772 ** Outputs:
1773 ** timers set, vmmTimerPop cleared or set
1774 -----------------------------------------------------------------------*/
1775
1776 void vmm_timer_pop(
1777 thread_t act)
1778 {
1779 vmmCntrlEntry *CEntry;
1780 vmmCntrlTable *CTable;
1781 int cvi, any;
1782 uint64_t now, soonest;
1783 savearea *sv;
1784
1785 if(!((unsigned int)act->machine.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
1786 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
1787 }
1788
1789 soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */
1790
1791 clock_get_uptime(&now); /* What time is it? */
1792
1793 CTable = act->machine.vmmControl; /* Make this easier */
1794 any = 0; /* Haven't found a running unexpired timer yet */
1795
1796 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Cycle through all and check time now */
1797
1798 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
1799
1800 if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */
1801 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
1802 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
1803 continue; /* Check next */
1804 }
1805
1806 if (CTable->vmmc[cvi].vmmTimer <= now) {
1807 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
1808 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
1809 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->machine.vmmCEntry) { /* Is this the running VM? */
1810 sv = find_user_regs(act); /* Get the user state registers */
1811 if(!sv) { /* Did we find something? */
1812 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1813 }
1814 sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */
1815 vmm_force_exit(act, sv); /* Intercept a running VM */
1816 }
1817 continue; /* Check the rest */
1818 }
1819 else { /* It hasn't popped yet */
1820 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1821 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1822 }
1823
1824 any = 1; /* Show we found an active unexpired timer */
1825
1826 if (CTable->vmmc[cvi].vmmTimer < soonest)
1827 soonest = CTable->vmmc[cvi].vmmTimer;
1828 }
1829
1830 if(any) {
1831 if (act->machine.qactTimer == 0 || soonest <= act->machine.qactTimer)
1832 act->machine.qactTimer = soonest; /* Set lowest timer */
1833 }
1834
1835 return;
1836 }
1837
1838
1839
1840 /*-----------------------------------------------------------------------
1841 ** vmm_stop_vm
1842 **
1843 ** This function prevents the specified VM(s) to from running.
1844 ** If any is currently executing, the execution is intercepted
1845 ** with a code of kVmmStopped. Note that execution of the VM is
1846 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
1847 ** This provides the ability for a thread to stop execution of a VM and
1848 ** insure that it will not be run until the emulator has processed the
1849 ** "virtual" interruption.
1850 **
1851 ** Inputs:
1852 ** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1853 ** NOTE: if this mask is all 0s, any executing VM is intercepted with
1854 * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1855 ** note that there is a potential race here and the VM may not stop.
1856 **
1857 ** Outputs:
1858 ** kernel return code indicating success
1859 ** or if no VMs are enabled, an invalid syscall exception.
1860 -----------------------------------------------------------------------*/
1861
1862 int vmm_stop_vm(struct savearea *save)
1863 {
1864
1865 thread_t act;
1866 vmmCntrlTable *CTable;
1867 int cvi, i;
1868 task_t task;
1869 thread_t fact;
1870 unsigned int vmmask;
1871 ReturnHandler *stopapc;
1872
1873 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1874
1875 task = current_task(); /* Figure out who we are */
1876
1877 task_lock(task); /* Lock our task */
1878
1879 fact = (thread_t)task->threads.next; /* Get the first activation on task */
1880 act = 0; /* Pretend we didn't find it yet */
1881
1882 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
1883 if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */
1884 act = fact; /* Yeah... */
1885 break; /* Bail the loop... */
1886 }
1887 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
1888 }
1889
1890 if(!((unsigned int)act)) { /* See if we have VMMs yet */
1891 task_unlock(task); /* No, unlock the task */
1892 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1893 return 0; /* Go generate a syscall exception */
1894 }
1895
1896 thread_reference(act);
1897
1898 task_unlock(task); /* Safe to release now */
1899
1900 thread_mtx_lock(act);
1901
1902 CTable = act->machine.vmmControl; /* Get the pointer to the table */
1903
1904 if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */
1905 thread_mtx_unlock(act); /* Unlock the activation */
1906 thread_deallocate(act);
1907 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1908 return 0; /* Go generate a syscall exception */
1909 }
1910
1911 if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */
1912 thread_mtx_unlock(act); /* Unlock the activation */
1913 thread_deallocate(act);
1914 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1915 save->save_r3 = KERN_SUCCESS; /* Set success */
1916 return 1; /* Return... */
1917 }
1918
1919 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search slots */
1920 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */
1921 hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */
1922 }
1923 vmmask = vmmask << 1; /* Slide mask over */
1924 }
1925
1926 if(hw_compare_and_store(0, 1, &act->machine.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */
1927 thread_mtx_unlock(act); /* Already one pending, unlock the activation */
1928 thread_deallocate(act);
1929 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1930 save->save_r3 = KERN_SUCCESS; /* Say we did it... */
1931 return 1; /* Leave */
1932 }
1933
1934 if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */
1935 act->machine.emPendRupts = 0; /* No memory, say we have given up request */
1936 thread_mtx_unlock(act); /* Unlock the activation */
1937 thread_deallocate(act);
1938 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1939 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
1940 return 1; /* Return... */
1941 }
1942
1943 ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1944
1945 stopapc->handler = vmm_interrupt; /* Set interruption routine */
1946
1947 stopapc->next = act->handlers; /* Put our interrupt at the start of the list */
1948 act->handlers = stopapc; /* Point to us */
1949
1950 act_set_apc(act); /* Set an APC AST */
1951 ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */
1952
1953 thread_mtx_unlock(act); /* Unlock the activation */
1954 thread_deallocate(act);
1955
1956 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1957 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
1958 return 1;
1959 }
1960
1961 /*-----------------------------------------------------------------------
1962 ** vmm_interrupt
1963 **
1964 ** This function is executed asynchronously from an APC AST.
1965 ** It is to be used for anything that needs to interrupt a running VM.
1966 ** This include any kind of interruption generation (other than timer pop)
1967 ** or entering the stopped state.
1968 **
1969 ** Inputs:
1970 ** ReturnHandler *rh - the return handler control block as required by the APC.
1971 ** thread_t act - the activation
1972 **
1973 ** Outputs:
1974 ** Whatever needed to be done is done.
1975 -----------------------------------------------------------------------*/
1976
1977 void vmm_interrupt(ReturnHandler *rh, thread_t act) {
1978
1979 vmmCntrlTable *CTable;
1980 savearea *sv;
1981 boolean_t inter;
1982
1983
1984
1985 kfree(rh, sizeof(ReturnHandler)); /* Release the return handler block */
1986
1987 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1988
1989 act->machine.emPendRupts = 0; /* Say that there are no more interrupts pending */
1990 CTable = act->machine.vmmControl; /* Get the pointer to the table */
1991
1992 if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */
1993
1994 if(act->machine.vmmCEntry && (act->machine.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */
1995 sv = find_user_regs(act); /* Get the user state registers */
1996 if(!sv) { /* Did we find something? */
1997 panic("vmm_interrupt: no user context; act = %08X\n", act);
1998 }
1999 sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */
2000 vmm_force_exit(act, sv); /* Intercept a running VM */
2001 }
2002 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
2003
2004 return;
2005 }