]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/vmachmon.c
92d622373d946ae5d3c61c20b703ea8394b23231
[apple/xnu.git] / osfmk / ppc / vmachmon.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*-----------------------------------------------------------------------
31 ** vmachmon.c
32 **
33 ** C routines that we are adding to the MacOS X kernel.
34 **
35 -----------------------------------------------------------------------*/
36
37 #include <mach/mach_types.h>
38 #include <mach/kern_return.h>
39 #include <mach/host_info.h>
40 #include <kern/kern_types.h>
41 #include <kern/kalloc.h>
42 #include <kern/host.h>
43 #include <kern/task.h>
44 #include <kern/thread.h>
45 #include <ppc/exception.h>
46 #include <ppc/mappings.h>
47 #include <ppc/thread.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_fault.h>
50
51 #include <ppc/vmachmon.h>
52 #include <ppc/lowglobals.h>
53
54 extern double FloatInit;
55 extern unsigned long QNaNbarbarian[4];
56
57 /*************************************************************************************
58 Virtual Machine Monitor Internal Routines
59 **************************************************************************************/
60
61 /*-----------------------------------------------------------------------
62 ** vmm_get_entry
63 **
64 ** This function verifies and return a vmm context entry index
65 **
66 ** Inputs:
67 ** act - pointer to current thread activation
68 ** index - index into vmm control table (this is a "one based" value)
69 **
70 ** Outputs:
71 ** address of a vmmCntrlEntry or 0 if not found
72 -----------------------------------------------------------------------*/
73
74 static vmmCntrlEntry *vmm_get_entry(
75 thread_t act,
76 vmm_thread_index_t index)
77 {
78 vmmCntrlTable *CTable;
79 vmmCntrlEntry *CEntry;
80
81 index = index & vmmTInum; /* Clean up the index */
82
83 if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */
84 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
85
86 CTable = act->machine.vmmControl; /* Make the address a bit more convienient */
87 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
88
89 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
90
91 return CEntry;
92 }
93
94 /*-----------------------------------------------------------------------
95 ** vmm_get_adsp
96 **
97 ** This function verifies and returns the pmap for an address space.
98 ** If there is none and the request is valid, a pmap will be created.
99 **
100 ** Inputs:
101 ** act - pointer to current thread activation
102 ** index - index into vmm control table (this is a "one based" value)
103 **
104 ** Outputs:
105 ** address of a pmap or 0 if not found or could no be created
106 ** Note that if there is no pmap for the address space it will be created.
107 -----------------------------------------------------------------------*/
108
109 static pmap_t vmm_get_adsp(thread_t act, vmm_thread_index_t index)
110 {
111 pmap_t pmap;
112
113 if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */
114 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
115
116 pmap = act->machine.vmmControl->vmmAdsp[index - 1]; /* Get the pmap */
117 return (pmap); /* and return it. */
118 }
119
120 /*-----------------------------------------------------------------------
121 ** vmm_build_shadow_hash
122 **
123 ** Allocate and initialize a shadow hash table.
124 **
125 ** This function assumes that PAGE_SIZE is 4k-bytes.
126 **
127 -----------------------------------------------------------------------*/
128 static pmap_vmm_ext *vmm_build_shadow_hash(pmap_t pmap)
129 {
130 pmap_vmm_ext *ext; /* VMM pmap extension we're building */
131 ppnum_t extPP; /* VMM pmap extension physical page number */
132 kern_return_t ret; /* Return code from various calls */
133 uint32_t pages = GV_HPAGES; /* Number of pages in the hash table */
134 vm_offset_t free = VMX_HPIDX_OFFSET; /* Offset into extension page of free area (128-byte aligned) */
135 uint32_t freeSize = PAGE_SIZE - free; /* Number of free bytes in the extension page */
136
137 if ((pages * sizeof(addr64_t)) + (pages * sizeof(vm_offset_t)) > freeSize) {
138 panic("vmm_build_shadow_hash: too little pmap_vmm_ext free space\n");
139 }
140
141 ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&ext, PAGE_SIZE);
142 /* Allocate a page-sized extension block */
143 if (ret != KERN_SUCCESS) return (NULL); /* Return NULL for failed allocate */
144 bzero((char *)ext, PAGE_SIZE); /* Zero the entire extension block page */
145
146 extPP = pmap_find_phys(kernel_pmap, (vm_offset_t)ext);
147 /* Get extension block's physical page number */
148 if (!extPP) { /* This should not fail, but then again... */
149 panic("vmm_build_shadow_hash: could not translate pmap_vmm_ext vaddr %08X\n", ext);
150 }
151
152 ext->vmxSalt = (addr64_t)(vm_offset_t)ext ^ ptoa_64(extPP);
153 /* Set effective<->physical conversion salt */
154 ext->vmxHostPmapPhys = (addr64_t)(vm_offset_t)pmap ^ pmap->pmapvr;
155 /* Set host pmap's physical address */
156 ext->vmxHostPmap = pmap; /* Set host pmap's effective address */
157 ext->vmxHashPgIdx = (addr64_t *)((vm_offset_t)ext + VMX_HPIDX_OFFSET);
158 /* Allocate physical index */
159 ext->vmxHashPgList = (vm_offset_t *)((vm_offset_t)ext + VMX_HPLIST_OFFSET);
160 /* Allocate page list */
161 ext->vmxActiveBitmap = (vm_offset_t *)((vm_offset_t)ext + VMX_ACTMAP_OFFSET);
162 /* Allocate active mapping bitmap */
163
164 /* The hash table is typically larger than a single page, but we don't require it to be in a
165 contiguous virtual or physical chunk. So, we allocate it page by page, noting the effective and
166 physical address of each page in vmxHashPgList and vmxHashPgIdx, respectively. */
167 uint32_t idx;
168 for (idx = 0; idx < pages; idx++) {
169 ret = kmem_alloc_wired(kernel_map, &ext->vmxHashPgList[idx], PAGE_SIZE);
170 /* Allocate a hash-table page */
171 if (ret != KERN_SUCCESS) goto fail; /* Allocation failed, exit through cleanup */
172 bzero((char *)ext->vmxHashPgList[idx], PAGE_SIZE); /* Zero the page */
173 ext->vmxHashPgIdx[idx] = ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)ext->vmxHashPgList[idx]));
174 /* Put page's physical address into index */
175 if (!ext->vmxHashPgIdx[idx]) { /* Hash-table page's LRA failed */
176 panic("vmm_build_shadow_hash: could not translate hash-table vaddr %08X\n", ext->vmxHashPgList[idx]);
177 }
178 mapping_t *map = (mapping_t *)ext->vmxHashPgList[idx];
179 uint32_t mapIdx;
180 for (mapIdx = 0; mapIdx < GV_SLTS_PPG; mapIdx++) { /* Iterate over mappings in this page */
181 map->mpFlags = (mpGuest | mpgFree); /* Mark guest type and free */
182 map = (mapping_t *)((char *)map + GV_SLOT_SZ); /* Next slot-sized mapping */
183 }
184 }
185
186 return (ext); /* Return newly-minted VMM pmap extension */
187
188 fail:
189 for (idx = 0; idx < pages; idx++) { /* De-allocate any pages we managed to allocate */
190 if (ext->vmxHashPgList[idx]) {
191 kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
192 }
193 }
194 kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
195 return (NULL); /* Return NULL for failure */
196 }
197
198
199 /*-----------------------------------------------------------------------
200 ** vmm_release_shadow_hash
201 **
202 ** Release shadow hash table and VMM extension block
203 **
204 -----------------------------------------------------------------------*/
205 static void vmm_release_shadow_hash(pmap_vmm_ext *ext)
206 {
207 uint32_t idx;
208
209 for (idx = 0; idx < GV_HPAGES; idx++) { /* Release the hash table page by page */
210 kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
211 }
212
213 kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */
214 }
215
216 /*-----------------------------------------------------------------------
217 ** vmm_activate_gsa
218 **
219 ** Activate guest shadow assist
220 **
221 -----------------------------------------------------------------------*/
222 static kern_return_t vmm_activate_gsa(
223 thread_t act,
224 vmm_thread_index_t index)
225 {
226 vmmCntrlTable *CTable = act->machine.vmmControl; /* Get VMM control table */
227 if (!CTable) { /* Caller guarantees that this will work */
228 panic("vmm_activate_gsa: VMM control table not present; act = %08X, idx = %d\n",
229 act, index);
230 return KERN_FAILURE;
231 }
232 vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
233 if (!CEntry) { /* Caller guarantees that this will work */
234 panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
235 act, index);
236 return KERN_FAILURE;
237 }
238
239 pmap_t hpmap = act->map->pmap; /* Get host pmap */
240 pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
241 if (!gpmap) { /* Caller guarantees that this will work */
242 panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
243 act, index);
244 return KERN_FAILURE;
245 }
246
247 if (!hpmap->pmapVmmExt) { /* If there's no VMM extension for this host, create one */
248 hpmap->pmapVmmExt = vmm_build_shadow_hash(hpmap); /* Build VMM extension plus shadow hash and attach */
249 if (hpmap->pmapVmmExt) { /* See if we succeeded */
250 hpmap->pmapVmmExtPhys = (addr64_t)(vm_offset_t)hpmap->pmapVmmExt ^ hpmap->pmapVmmExt->vmxSalt;
251 /* Get VMM extensions block physical address */
252 } else {
253 return KERN_RESOURCE_SHORTAGE; /* Not enough mojo to go */
254 }
255 }
256 gpmap->pmapVmmExt = hpmap->pmapVmmExt; /* Copy VMM extension block virtual address into guest */
257 gpmap->pmapVmmExtPhys = hpmap->pmapVmmExtPhys; /* and its physical address, too */
258 gpmap->pmapFlags |= pmapVMgsaa; /* Enable GSA for this guest */
259 CEntry->vmmXAFlgs |= vmmGSA; /* Show GSA active here, too */
260
261 return KERN_SUCCESS;
262 }
263
264
265 /*-----------------------------------------------------------------------
266 ** vmm_deactivate_gsa
267 **
268 ** Deactivate guest shadow assist
269 **
270 -----------------------------------------------------------------------*/
271 static void vmm_deactivate_gsa(
272 thread_t act,
273 vmm_thread_index_t index)
274 {
275 vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */
276 if (!CEntry) { /* Caller guarantees that this will work */
277 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
278 act, index);
279 return KERN_FAILURE;
280 }
281
282 pmap_t gpmap = vmm_get_adsp(act, index); /* Get guest pmap */
283 if (!gpmap) { /* Caller guarantees that this will work */
284 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
285 act, index);
286 return KERN_FAILURE;
287 }
288
289 gpmap->pmapFlags &= ~pmapVMgsaa; /* Deactivate GSA for this guest */
290 CEntry->vmmXAFlgs &= ~vmmGSA; /* Show GSA deactivated here, too */
291 }
292
293
294 /*-----------------------------------------------------------------------
295 ** vmm_flush_context
296 **
297 ** Flush specified guest context, purging all guest mappings and clearing
298 ** the context page.
299 **
300 -----------------------------------------------------------------------*/
301 static void vmm_flush_context(
302 thread_t act,
303 vmm_thread_index_t index)
304 {
305 vmmCntrlEntry *CEntry;
306 vmmCntrlTable *CTable;
307 vmm_state_page_t *vks;
308 vmm_version_t version;
309
310 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
311 if (!CEntry) { /* Caller guarantees that this will work */
312 panic("vmm_flush_context: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
313 act, index);
314 return;
315 }
316
317 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
318 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
319 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
320 }
321
322 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
323 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
324 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
325 }
326
327 vmm_unmap_all_pages(act, index); /* Blow away all mappings for this context */
328
329 CTable = act->machine.vmmControl; /* Get the control table address */
330 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
331
332 CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */
333 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
334 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
335 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
336 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
337 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
338 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
339
340 vks = CEntry->vmmContextKern; /* Get address of the context page */
341 version = vks->interface_version; /* Save the version code */
342 bzero((char *)vks, 4096); /* Clear all */
343
344 vks->interface_version = version; /* Set our version code */
345 vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */
346
347 return; /* Context is now flushed */
348 }
349
350
351 /*************************************************************************************
352 Virtual Machine Monitor Exported Functionality
353
354 The following routines are used to implement a quick-switch mechanism for
355 virtual machines that need to execute within their own processor envinroment
356 (including register and MMU state).
357 **************************************************************************************/
358
359 /*-----------------------------------------------------------------------
360 ** vmm_get_version
361 **
362 ** This function returns the current version of the virtual machine
363 ** interface. It is divided into two portions. The top 16 bits
364 ** represent the major version number, and the bottom 16 bits
365 ** represent the minor version number. Clients using the Vmm
366 ** functionality should make sure they are using a verison new
367 ** enough for them.
368 **
369 ** Inputs:
370 ** none
371 **
372 ** Outputs:
373 ** 32-bit number representing major/minor version of
374 ** the Vmm module
375 -----------------------------------------------------------------------*/
376
377 int vmm_get_version(struct savearea *save)
378 {
379 save->save_r3 = kVmmCurrentVersion; /* Return the version */
380 return 1;
381 }
382
383
384 /*-----------------------------------------------------------------------
385 ** Vmm_get_features
386 **
387 ** This function returns a set of flags that represents the functionality
388 ** supported by the current verison of the Vmm interface. Clients should
389 ** use this to determine whether they can run on this system.
390 **
391 ** Inputs:
392 ** none
393 **
394 ** Outputs:
395 ** 32-bit number representing functionality supported by this
396 ** version of the Vmm module
397 -----------------------------------------------------------------------*/
398
399 int vmm_get_features(struct savearea *save)
400 {
401 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
402 if(getPerProc()->pf.Available & pf64Bit) {
403 save->save_r3 &= ~kVmmFeature_LittleEndian; /* No little endian here */
404 save->save_r3 |= kVmmFeature_SixtyFourBit; /* Set that we can do 64-bit */
405 }
406 return 1;
407 }
408
409
410 /*-----------------------------------------------------------------------
411 ** vmm_max_addr
412 **
413 ** This function returns the maximum addressable virtual address sported
414 **
415 ** Outputs:
416 ** Returns max address
417 -----------------------------------------------------------------------*/
418
419 addr64_t vmm_max_addr(thread_t act)
420 {
421 return vm_max_address; /* Return the maximum address */
422 }
423
424 /*-----------------------------------------------------------------------
425 ** vmm_get_XA
426 **
427 ** This function retrieves the eXtended Architecture flags for the specifed VM.
428 **
429 ** We need to return the result in the return code rather than in the return parameters
430 ** because we need an architecture independent format so the results are actually
431 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
432 ** 4 for 32-bit.
433 **
434 **
435 ** Inputs:
436 ** act - pointer to current thread activation structure
437 ** index - index returned by vmm_init_context
438 **
439 ** Outputs:
440 ** Return code is set to the XA flags. If the index is invalid or the
441 ** context has not been created, we return 0.
442 -----------------------------------------------------------------------*/
443
444 unsigned int vmm_get_XA(
445 thread_t act,
446 vmm_thread_index_t index)
447 {
448 vmmCntrlEntry *CEntry;
449
450 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
451 if (CEntry == NULL) return 0; /* Either this isn't a vmm or the index is bogus */
452
453 return CEntry->vmmXAFlgs; /* Return the flags */
454 }
455
456 /*-----------------------------------------------------------------------
457 ** vmm_init_context
458 **
459 ** This function initializes an emulation context. It allocates
460 ** a new pmap (address space) and fills in the initial processor
461 ** state within the specified structure. The structure, mapped
462 ** into the client's logical address space, must be page-aligned.
463 **
464 ** Inputs:
465 ** act - pointer to current thread activation
466 ** version - requested version of the Vmm interface (allowing
467 ** future versions of the interface to change, but still
468 ** support older clients)
469 ** vmm_user_state - pointer to a logical page within the
470 ** client's address space
471 **
472 ** Outputs:
473 ** kernel return code indicating success or failure
474 -----------------------------------------------------------------------*/
475
476 int vmm_init_context(struct savearea *save)
477 {
478
479 thread_t act;
480 vmm_version_t version;
481 vmm_state_page_t * vmm_user_state;
482 vmmCntrlTable *CTable;
483 vm_offset_t conkern;
484 vmm_state_page_t * vks;
485 ppnum_t conphys;
486 kern_return_t ret;
487 int cvi, i;
488 task_t task;
489 thread_t fact, gact;
490
491 vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4); /* Get the user address of the comm area */
492 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
493 save->save_r3 = KERN_FAILURE; /* Return failure */
494 return 1;
495 }
496
497 /* Make sure that the version requested is supported */
498 version = save->save_r3; /* Pick up passed in version */
499 if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
500 save->save_r3 = KERN_FAILURE; /* Return failure */
501 return 1;
502 }
503
504 if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */
505 save->save_r3 = KERN_FAILURE; /* Return failure */
506 return 1;
507 }
508
509 act = current_thread(); /* Pick up our activation */
510
511 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
512
513 task = current_task(); /* Figure out who we are */
514
515 task_lock(task); /* Lock our task */
516
517 fact = (thread_t)task->threads.next; /* Get the first activation on task */
518 gact = 0; /* Pretend we didn't find it yet */
519
520 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
521 if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */
522 gact = fact; /* Yeah... */
523 break; /* Bail the loop... */
524 }
525 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
526 }
527
528
529 /*
530 * We only allow one thread per task to be a virtual machine monitor right now. This solves
531 * a number of potential problems that I can't put my finger on right now.
532 *
533 * Utlimately, I think we want to move the controls and make all this task based instead of
534 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
535 * VM (if they want) rather than hand dispatch contexts.
536 */
537
538 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
539 task_unlock(task); /* Release task lock */
540 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
541 save->save_r3 = KERN_FAILURE; /* We must play alone... */
542 return 1;
543 }
544
545 if(!gact) act->machine.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
546
547 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
548
549 CTable = act->machine.vmmControl; /* Get the control table address */
550 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
551 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
552 act->machine.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
553 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
554 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
555 return 1;
556 }
557
558 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
559 act->machine.vmmControl = CTable; /* Initialize the table anchor */
560 }
561
562 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
563 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
564 }
565
566 if(cvi >= kVmmMaxContexts) { /* Did we find one? */
567 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
568 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
569 return 1;
570 }
571
572 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
573 act->map,
574 (vm_offset_t)vmm_user_state,
575 (vm_offset_t)vmm_user_state + PAGE_SIZE,
576 VM_PROT_READ | VM_PROT_WRITE,
577 FALSE);
578
579 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
580 goto return_in_shame;
581
582 /* Map the vmm state into the kernel's address space. */
583 conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state));
584
585 /* Find a virtual address to use. */
586 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
587 if (ret != KERN_SUCCESS) { /* Did we find an address? */
588 (void) vm_map_unwire(act->map, /* No, unwire the context area */
589 (vm_offset_t)vmm_user_state,
590 (vm_offset_t)vmm_user_state + PAGE_SIZE,
591 TRUE);
592 goto return_in_shame;
593 }
594
595 /* Map it into the kernel's address space. */
596
597 pmap_enter(kernel_pmap, conkern, conphys,
598 VM_PROT_READ | VM_PROT_WRITE,
599 VM_WIMG_USE_DEFAULT, TRUE);
600
601 /* Clear the vmm state structure. */
602 vks = (vmm_state_page_t *)conkern;
603 bzero((char *)vks, PAGE_SIZE);
604
605
606 /* We're home free now. Simply fill in the necessary info and return. */
607
608 vks->interface_version = version; /* Set our version code */
609 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
610
611 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
612 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
613 CTable->vmmc[cvi].vmmContextPhys = conphys; /* Remember the state page physical addr */
614 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
615
616 CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */
617 CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
618 CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
619 CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */
620 CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
621 CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
622 CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */
623
624 hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */
625
626 pmap_t hpmap = act->map->pmap; /* Get host pmap */
627 pmap_t gpmap = pmap_create(0); /* Make a fresh guest pmap */
628 if (gpmap) { /* Did we succeed ? */
629 CTable->vmmAdsp[cvi] = gpmap; /* Remember guest pmap for new context */
630 if (lowGlo.lgVMMforcedFeats & vmmGSA) { /* Forcing on guest shadow assist ? */
631 vmm_activate_gsa(act, cvi+1); /* Activate GSA */
632 }
633 } else {
634 ret = KERN_RESOURCE_SHORTAGE; /* We've failed to allocate a guest pmap */
635 goto return_in_shame; /* Shame on us. */
636 }
637
638 if (!(hpmap->pmapFlags & pmapVMhost)) { /* Do this stuff if this is our first time hosting */
639 hpmap->pmapFlags |= pmapVMhost; /* We're now hosting */
640 }
641
642 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
643 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
644 return 1;
645
646 return_in_shame:
647 if(!gact) kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
648 act->machine.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
649 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
650 save->save_r3 = ret; /* Pass back return code... */
651 return 1;
652
653 }
654
655
656 /*-----------------------------------------------------------------------
657 ** vmm_tear_down_context
658 **
659 ** This function uninitializes an emulation context. It deallocates
660 ** internal resources associated with the context block.
661 **
662 ** Inputs:
663 ** act - pointer to current thread activation structure
664 ** index - index returned by vmm_init_context
665 **
666 ** Outputs:
667 ** kernel return code indicating success or failure
668 **
669 ** Strangeness note:
670 ** This call will also trash the address space with the same ID. While this
671 ** is really not too cool, we have to do it because we need to make
672 ** sure that old VMM users (not that we really have any) who depend upon
673 ** the address space going away with the context still work the same.
674 -----------------------------------------------------------------------*/
675
676 kern_return_t vmm_tear_down_context(
677 thread_t act,
678 vmm_thread_index_t index)
679 {
680 vmmCntrlEntry *CEntry;
681 vmmCntrlTable *CTable;
682 int cvi;
683 register savearea *sv;
684
685 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
686 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
687
688 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
689
690 hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */
691
692 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
693 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
694 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
695 }
696
697 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
698 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
699 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
700 }
701
702 CEntry->vmmPmap = 0; /* Remove this trace */
703 pmap_t gpmap = act->machine.vmmControl->vmmAdsp[index - 1];
704 /* Get context's guest pmap (if any) */
705 if (gpmap) { /* Check if there is an address space assigned here */
706 if (gpmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist case specially */
707 hw_rem_all_gv(gpmap); /* Remove all guest mappings from shadow hash table */
708 } else {
709 mapping_remove(gpmap, 0xFFFFFFFFFFFFF000LL);/* Remove final page explicitly because we might have mapped it */
710 pmap_remove(gpmap, 0, 0xFFFFFFFFFFFFF000LL);/* Remove all entries from this map */
711 }
712 pmap_destroy(gpmap); /* Toss the pmap for this context */
713 act->machine.vmmControl->vmmAdsp[index - 1] = NULL; /* Clean it up */
714 }
715
716 (void) vm_map_unwire( /* Unwire the user comm page */
717 act->map,
718 (vm_offset_t)CEntry->vmmContextUser,
719 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
720 FALSE);
721
722 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
723
724 CTable = act->machine.vmmControl; /* Get the control table address */
725 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
726
727 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
728 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
729 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
730
731 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
732 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
733 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
734 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
735 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
736 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
737 CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */
738
739 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
740 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
741 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
742 return KERN_SUCCESS; /* Leave... */
743 }
744 }
745
746 /*
747 * When we have tossed the last context, toss any address spaces left over before releasing
748 * the VMM control block
749 */
750
751 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
752 if(!act->machine.vmmControl->vmmAdsp[index - 1]) continue; /* Nothing to remove here */
753 mapping_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
754 pmap_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
755 pmap_destroy(act->machine.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */
756 act->machine.vmmControl->vmmAdsp[index - 1] = 0; /* Clear just in case */
757 }
758
759 pmap_t pmap = act->map->pmap; /* Get our pmap */
760 if (pmap->pmapVmmExt) { /* Release any VMM pmap extension block and shadow hash table */
761 vmm_release_shadow_hash(pmap->pmapVmmExt); /* Release extension block and shadow hash table */
762 pmap->pmapVmmExt = 0; /* Forget extension block */
763 pmap->pmapVmmExtPhys = 0; /* Forget extension block's physical address, too */
764 }
765 pmap->pmapFlags &= ~pmapVMhost; /* We're no longer hosting */
766
767 kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
768 act->machine.vmmControl = 0; /* Unmark us as vmm */
769
770 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
771
772 return KERN_SUCCESS;
773 }
774
775
776 /*-----------------------------------------------------------------------
777 ** vmm_activate_XA
778 **
779 ** This function activates the eXtended Architecture flags for the specifed VM.
780 **
781 ** We need to return the result in the return code rather than in the return parameters
782 ** because we need an architecture independent format so the results are actually
783 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
784 ** 4 for 32-bit.
785 **
786 ** Note that this function does a lot of the same stuff as vmm_tear_down_context
787 ** and vmm_init_context.
788 **
789 ** Inputs:
790 ** act - pointer to current thread activation structure
791 ** index - index returned by vmm_init_context
792 ** flags - the extended architecture flags
793 **
794 **
795 ** Outputs:
796 ** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
797 ** Also, the internal flags are set and, additionally, the VM is completely reset.
798 -----------------------------------------------------------------------*/
799 kern_return_t vmm_activate_XA(
800 thread_t act,
801 vmm_thread_index_t index,
802 unsigned int xaflags)
803 {
804 vmmCntrlEntry *CEntry;
805 kern_return_t result = KERN_SUCCESS; /* Assume success */
806
807 if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (!getPerProc()->pf.Available & pf64Bit)))
808 return (KERN_FAILURE); /* Unknown or unsupported feature requested */
809
810 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
811 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
812
813 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
814
815 vmm_flush_context(act, index); /* Flush the context */
816
817 if (xaflags & vmm64Bit) { /* Activating 64-bit mode ? */
818 CEntry->vmmXAFlgs |= vmm64Bit; /* Activate 64-bit mode */
819 }
820
821 if (xaflags & vmmGSA) { /* Activating guest shadow assist ? */
822 result = vmm_activate_gsa(act, index); /* Activate guest shadow assist */
823 }
824
825 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
826
827 return result; /* Return activate result */
828 }
829
830 /*-----------------------------------------------------------------------
831 ** vmm_deactivate_XA
832 **
833 -----------------------------------------------------------------------*/
834 kern_return_t vmm_deactivate_XA(
835 thread_t act,
836 vmm_thread_index_t index,
837 unsigned int xaflags)
838 {
839 vmmCntrlEntry *CEntry;
840 kern_return_t result = KERN_SUCCESS; /* Assume success */
841
842 if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (getPerProc()->pf.Available & pf64Bit)))
843 return (KERN_FAILURE); /* Unknown or unsupported feature requested */
844
845 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
846 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
847
848 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
849
850 vmm_flush_context(act, index); /* Flush the context */
851
852 if (xaflags & vmm64Bit) { /* Deactivating 64-bit mode ? */
853 CEntry->vmmXAFlgs &= ~vmm64Bit; /* Deactivate 64-bit mode */
854 }
855
856 if (xaflags & vmmGSA) { /* Deactivating guest shadow assist ? */
857 vmm_deactivate_gsa(act, index); /* Deactivate guest shadow assist */
858 }
859
860 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
861
862 return result; /* Return deactivate result */
863 }
864
865
866 /*-----------------------------------------------------------------------
867 ** vmm_tear_down_all
868 **
869 ** This function uninitializes all emulation contexts. If there are
870 ** any vmm contexts, it calls vmm_tear_down_context for each one.
871 **
872 ** Note: this can also be called from normal thread termination. Because of
873 ** that, we will context switch out of an alternate if we are currenty in it.
874 ** It will be terminated with no valid return code set because we don't expect
875 ** the activation to ever run again.
876 **
877 ** Inputs:
878 ** activation to tear down
879 **
880 ** Outputs:
881 ** All vmm contexts released and VMM shut down
882 -----------------------------------------------------------------------*/
883 void vmm_tear_down_all(thread_t act) {
884
885 vmmCntrlTable *CTable;
886 int cvi;
887 kern_return_t ret;
888 savearea *save;
889 spl_t s;
890
891 if(act->machine.specFlags & runningVM) { /* Are we actually in a context right now? */
892 save = find_user_regs(act); /* Find the user state context */
893 if(!save) { /* Did we find it? */
894 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
895 return;
896 }
897
898 save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */
899 s = splhigh(); /* Make sure interrupts are off */
900 vmm_force_exit(act, save); /* Force and exit from VM state */
901 splx(s); /* Restore interrupts */
902 }
903
904 if(CTable = act->machine.vmmControl) { /* Do we have a vmm control block? */
905
906
907 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
908 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
909 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
910 if(ret != KERN_SUCCESS) { /* Did it go away? */
911 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
912 ret, act, cvi);
913 }
914 }
915 }
916
917 /*
918 * Note that all address apces should be gone here.
919 */
920 if(act->machine.vmmControl) { /* Did we find one? */
921 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
922 }
923 }
924
925 return;
926 }
927
928 /*-----------------------------------------------------------------------
929 ** vmm_map_page
930 **
931 ** This function maps a page from within the client's logical
932 ** address space into the alternate address space.
933 **
934 ** The page need not be locked or resident. If not resident, it will be faulted
935 ** in by this code, which may take some time. Also, if the page is not locked,
936 ** it, and this mapping may disappear at any time, even before it gets used. Note also
937 ** that reference and change information is NOT preserved when a page is unmapped, either
938 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
939 ** space). This means that if RC is needed, the page MUST be wired.
940 **
941 ** Note that if there is already a mapping at the address, it is removed and all
942 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
943 ** if the map call fails, the old address is still unmapped..
944 **
945 ** Inputs:
946 ** act - pointer to current thread activation
947 ** index - index of address space to map into
948 ** va - virtual address within the client's address
949 ** space
950 ** ava - virtual address within the alternate address
951 ** space
952 ** prot - protection flags
953 **
954 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
955 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
956 **
957 ** Input conditions:
958 ** Interrupts disabled (from fast trap)
959 **
960 ** Outputs:
961 ** kernel return code indicating success or failure
962 ** if success, va resident and alternate mapping made
963 -----------------------------------------------------------------------*/
964
965 kern_return_t vmm_map_page(
966 thread_t act,
967 vmm_adsp_id_t index,
968 addr64_t cva,
969 addr64_t ava,
970 vm_prot_t prot)
971 {
972 kern_return_t ret;
973 register mapping_t *mp;
974 vm_map_t map;
975 addr64_t ova, nextva;
976 pmap_t pmap;
977
978 pmap = vmm_get_adsp(act, index); /* Get the guest pmap for this address space */
979 if(!pmap) return KERN_FAILURE; /* Bogus address space, no VMs, or we can't make a pmap, failure... */
980
981 if(ava > vm_max_address) return kVmmInvalidAddress; /* Does the machine support an address of this size? */
982
983 map = current_thread()->map; /* Get the host's map */
984
985 if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist active ? */
986 ret = hw_res_map_gv(map->pmap, pmap, cva, ava, getProtPPC(prot));
987 /* Attempt to resume an existing gv->phys mapping */
988 if (mapRtOK != ret) { /* Nothing to resume, construct a new mapping */
989
990 while (1) { /* Find host mapping or fail */
991 mp = mapping_find(map->pmap, cva, &nextva, 0);
992 /* Attempt to find host mapping and pin it */
993 if (mp) break; /* Got it */
994
995 ml_set_interrupts_enabled(TRUE);
996 /* Open 'rupt window */
997 ret = vm_fault(map, /* Didn't find it, try to fault in host page read/write */
998 vm_map_trunc_page(cva),
999 VM_PROT_READ | VM_PROT_WRITE,
1000 FALSE, /* change wiring */
1001 THREAD_UNINT,
1002 NULL,
1003 0);
1004 ml_set_interrupts_enabled(FALSE);
1005 /* Close 'rupt window */
1006 if (ret != KERN_SUCCESS)
1007 return KERN_FAILURE; /* Fault failed, return failure */
1008 }
1009
1010 if (mpNormal != (mp->mpFlags & mpType)) {
1011 /* Host mapping must be a vanilla page */
1012 mapping_drop_busy(mp); /* Un-pin host mapping */
1013 return KERN_FAILURE; /* Return failure */
1014 }
1015
1016 /* Partially construct gv->phys mapping */
1017 unsigned int pindex;
1018 phys_entry_t *physent = mapping_phys_lookup(mp->mpPAddr, &pindex);
1019 if (!physent) {
1020 mapping_drop_busy(mp);
1021 return KERN_FAILURE;
1022 }
1023 unsigned int pattr = ((physent->ppLink & (ppI | ppG)) >> 60);
1024 unsigned int wimg = 0x2;
1025 if (pattr & mmFlgCInhib) wimg |= 0x4;
1026 if (pattr & mmFlgGuarded) wimg |= 0x1;
1027 unsigned int mflags = (pindex << 16) | mpGuest;
1028 addr64_t gva = ((ava & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot));
1029
1030 hw_add_map_gv(map->pmap, pmap, gva, mflags, mp->mpPAddr);
1031 /* Construct new guest->phys mapping */
1032
1033 mapping_drop_busy(mp); /* Un-pin host mapping */
1034 }
1035 } else {
1036 while(1) { /* Keep trying until we get it or until we fail */
1037
1038 mp = mapping_find(map->pmap, cva, &nextva, 0); /* Find the mapping for this address */
1039
1040 if(mp) break; /* We found it */
1041
1042 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
1043 ret = vm_fault(map, /* Didn't find it, try to fault it in read/write... */
1044 vm_map_trunc_page(cva),
1045 VM_PROT_READ | VM_PROT_WRITE,
1046 FALSE, /*change wiring */
1047 THREAD_UNINT,
1048 NULL,
1049 0);
1050 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
1051 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
1052 }
1053
1054 if((mp->mpFlags & mpType) != mpNormal) { /* If this is a block, a nest, or some other special thing, we can't map it */
1055 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1056 return KERN_FAILURE; /* Leave in shame */
1057 }
1058
1059 while(1) { /* Keep trying the enter until it goes in */
1060 ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */
1061 if(!ova) break; /* If there were no collisions, we are done... */
1062 mapping_remove(pmap, ova); /* Remove the mapping that collided */
1063 }
1064
1065 mapping_drop_busy(mp); /* We have everything we need from the mapping */
1066 }
1067
1068 if (!((getPerProc()->spcFlags) & FamVMmode)) {
1069 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1070 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
1071 }
1072
1073 return KERN_SUCCESS;
1074 }
1075
1076
1077 /*-----------------------------------------------------------------------
1078 ** vmm_map_execute
1079 **
1080 ** This function maps a page from within the client's logical
1081 ** address space into the alternate address space of the
1082 ** Virtual Machine Monitor context and then directly starts executing.
1083 **
1084 ** See description of vmm_map_page for details.
1085 **
1086 ** Inputs:
1087 ** Index is used for both the context and the address space ID.
1088 ** index[24:31] is the context id and index[16:23] is the address space.
1089 ** if the address space ID is 0, the context ID is used for it.
1090 **
1091 ** Outputs:
1092 ** Normal exit is to run the VM. Abnormal exit is triggered via a
1093 ** non-KERN_SUCCESS return from vmm_map_page or later during the
1094 ** attempt to transition into the VM.
1095 -----------------------------------------------------------------------*/
1096
1097 vmm_return_code_t vmm_map_execute(
1098 thread_t act,
1099 vmm_thread_index_t index,
1100 addr64_t cva,
1101 addr64_t ava,
1102 vm_prot_t prot)
1103 {
1104 kern_return_t ret;
1105 vmmCntrlEntry *CEntry;
1106 unsigned int adsp;
1107 vmm_thread_index_t cndx;
1108
1109 cndx = index & 0xFF; /* Clean it up */
1110
1111 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
1112 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1113
1114 if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
1115 return kVmmBogusContext; /* Yes, invalid index in Fam */
1116
1117 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1118 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
1119
1120 ret = vmm_map_page(act, adsp, cva, ava, prot); /* Go try to map the page on in */
1121
1122
1123 if(ret == KERN_SUCCESS) {
1124 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1125 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
1126 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
1127 }
1128
1129 return ret; /* We had trouble mapping in the page */
1130
1131 }
1132
1133 /*-----------------------------------------------------------------------
1134 ** vmm_map_list
1135 **
1136 ** This function maps a list of pages into various address spaces
1137 **
1138 ** Inputs:
1139 ** act - pointer to current thread activation
1140 ** index - index of default address space (used if not specifed in list entry
1141 ** count - number of pages to release
1142 ** flavor - 0 if 32-bit version, 1 if 64-bit
1143 ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
1144 **
1145 ** Outputs:
1146 ** kernel return code indicating success or failure
1147 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1148 ** or the vmm_map_page call fails.
1149 ** We return kVmmInvalidAddress if virtual address size is not supported
1150 -----------------------------------------------------------------------*/
1151
1152 kern_return_t vmm_map_list(
1153 thread_t act,
1154 vmm_adsp_id_t index,
1155 unsigned int cnt,
1156 unsigned int flavor)
1157 {
1158 vmmCntrlEntry *CEntry;
1159 boolean_t ret;
1160 unsigned int i;
1161 vmmMList *lst;
1162 vmmMList64 *lstx;
1163 addr64_t cva;
1164 addr64_t ava;
1165 vm_prot_t prot;
1166 vmm_adsp_id_t adsp;
1167
1168 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1169 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
1170
1171 if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */
1172 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
1173
1174 lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
1175 lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
1176
1177 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
1178 if(flavor) { /* Check if 32- or 64-bit addresses */
1179 cva = lstx[i].vmlva; /* Get the 64-bit actual address */
1180 ava = lstx[i].vmlava; /* Get the 64-bit guest address */
1181 }
1182 else {
1183 cva = lst[i].vmlva; /* Get the 32-bit actual address */
1184 ava = lst[i].vmlava; /* Get the 32-bit guest address */
1185 }
1186
1187 prot = ava & vmmlProt; /* Extract the protection bits */
1188 adsp = (ava & vmmlAdID) >> 4; /* Extract an explicit address space request */
1189 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
1190 ava = ava &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1191
1192 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
1193 if(ret != KERN_SUCCESS) return ret; /* Bail if any error */
1194 }
1195
1196 return KERN_SUCCESS ; /* Return... */
1197 }
1198
1199 /*-----------------------------------------------------------------------
1200 ** vmm_get_page_mapping
1201 **
1202 ** Given a context index and a guest virtual address, convert the address
1203 ** to its corresponding host virtual address.
1204 **
1205 ** Inputs:
1206 ** act - pointer to current thread activation
1207 ** index - context index
1208 ** gva - guest virtual address
1209 **
1210 ** Outputs:
1211 ** Host virtual address (page aligned) or -1 if not mapped or any failure
1212 **
1213 ** Note:
1214 ** If the host address space contains multiple virtual addresses mapping
1215 ** to the physical address corresponding to the specified guest virtual
1216 ** address (i.e., host virtual aliases), it is unpredictable which host
1217 ** virtual address (alias) will be returned. Moral of the story: No host
1218 ** virtual aliases.
1219 -----------------------------------------------------------------------*/
1220
1221 addr64_t vmm_get_page_mapping(
1222 thread_t act,
1223 vmm_adsp_id_t index,
1224 addr64_t gva)
1225 {
1226 register mapping_t *mp;
1227 pmap_t pmap;
1228 addr64_t nextva, hva;
1229 ppnum_t pa;
1230
1231 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
1232 if (!pmap)return -1; /* No good, failure... */
1233
1234 if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist (GSA) active ? */
1235 return (hw_gva_to_hva(pmap, gva)); /* Convert guest to host virtual address */
1236 } else {
1237 mp = mapping_find(pmap, gva, &nextva, 0); /* Find guest mapping for this virtual address */
1238
1239 if(!mp) return -1; /* Not mapped, return -1 */
1240
1241 pa = mp->mpPAddr; /* Remember the physical page address */
1242
1243 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
1244
1245 pmap = current_thread()->map->pmap; /* Get the host pmap */
1246 hva = mapping_p2v(pmap, pa); /* Now find the source virtual */
1247
1248 if(hva != 0) return hva; /* We found it... */
1249
1250 panic("vmm_get_page_mapping: could not back-map guest va (%016llX)\n", gva);
1251 /* We are bad wrong if we can't find it */
1252
1253 return -1; /* Never executed, prevents compiler warning */
1254 }
1255 }
1256
1257 /*-----------------------------------------------------------------------
1258 ** vmm_unmap_page
1259 **
1260 ** This function unmaps a page from the guest address space.
1261 **
1262 ** Inputs:
1263 ** act - pointer to current thread activation
1264 ** index - index of vmm state for this page
1265 ** va - virtual address within the vmm's address
1266 ** space
1267 **
1268 ** Outputs:
1269 ** kernel return code indicating success or failure
1270 -----------------------------------------------------------------------*/
1271
1272 kern_return_t vmm_unmap_page(
1273 thread_t act,
1274 vmm_adsp_id_t index,
1275 addr64_t va)
1276 {
1277 vmmCntrlEntry *CEntry;
1278 addr64_t nadd;
1279 pmap_t pmap;
1280
1281 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
1282 if (!pmap)return -1; /* No good, failure... */
1283
1284 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1285 hw_susp_map_gv(act->map->pmap, pmap, va); /* Suspend the mapping */
1286 return (KERN_SUCCESS); /* Always returns success */
1287 } else {
1288 nadd = mapping_remove(pmap, va); /* Toss the mapping */
1289
1290 return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS); /* Return... */
1291 }
1292 }
1293
1294 /*-----------------------------------------------------------------------
1295 ** vmm_unmap_list
1296 **
1297 ** This function unmaps a list of pages from the alternate's logical
1298 ** address space.
1299 **
1300 ** Inputs:
1301 ** act - pointer to current thread activation
1302 ** index - index of vmm state for this page
1303 ** count - number of pages to release
1304 ** flavor - 0 if 32-bit, 1 if 64-bit
1305 ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
1306 **
1307 ** Outputs:
1308 ** kernel return code indicating success or failure
1309 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
1310 -----------------------------------------------------------------------*/
1311
1312 kern_return_t vmm_unmap_list(
1313 thread_t act,
1314 vmm_adsp_id_t index,
1315 unsigned int cnt,
1316 unsigned int flavor)
1317 {
1318 vmmCntrlEntry *CEntry;
1319 boolean_t ret;
1320 kern_return_t kern_result = KERN_SUCCESS;
1321 unsigned int *pgaddr, i;
1322 addr64_t gva;
1323 vmmUMList *lst;
1324 vmmUMList64 *lstx;
1325 pmap_t pmap;
1326 int adsp;
1327
1328 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1329 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
1330
1331 if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */
1332 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
1333
1334 lst = (vmmUMList *)lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
1335
1336 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
1337 if(flavor) { /* Check if 32- or 64-bit addresses */
1338 gva = lstx[i].vmlava; /* Get the 64-bit guest address */
1339 }
1340 else {
1341 gva = lst[i].vmlava; /* Get the 32-bit guest address */
1342 }
1343
1344 adsp = (gva & vmmlAdID) >> 4; /* Extract an explicit address space request */
1345 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
1346 pmap = act->machine.vmmControl->vmmAdsp[adsp]; /* Get the pmap for this request */
1347 if(!pmap) continue; /* Ain't nuthin' mapped here, no durn map... */
1348
1349 gva = gva &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1350 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1351 hw_susp_map_gv(act->map->pmap, pmap, gva);
1352 /* Suspend the mapping */
1353 } else {
1354 (void)mapping_remove(pmap, gva); /* Toss the mapping */
1355 }
1356 }
1357
1358 return KERN_SUCCESS ; /* Return... */
1359 }
1360
1361 /*-----------------------------------------------------------------------
1362 ** vmm_unmap_all_pages
1363 **
1364 ** This function unmaps all pages from the alternates's logical
1365 ** address space.
1366 **
1367 ** Inputs:
1368 ** act - pointer to current thread activation
1369 ** index - index of context state
1370 **
1371 ** Outputs:
1372 ** none
1373 **
1374 ** Note:
1375 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
1376 -----------------------------------------------------------------------*/
1377
1378 void vmm_unmap_all_pages(
1379 thread_t act,
1380 vmm_adsp_id_t index)
1381 {
1382 vmmCntrlEntry *CEntry;
1383 pmap_t pmap;
1384
1385 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1386 if (!pmap) return; /* Either this isn't vmm thread or the index is bogus */
1387
1388 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1389 hw_rem_all_gv(pmap); /* Remove all guest's mappings from shadow hash table */
1390 } else {
1391 /*
1392 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
1393 */
1394 mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
1395 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
1396 }
1397 return;
1398 }
1399
1400
1401 /*-----------------------------------------------------------------------
1402 ** vmm_get_page_dirty_flag
1403 **
1404 ** This function returns the changed flag of the page
1405 ** and optionally clears clears the flag.
1406 **
1407 ** Inputs:
1408 ** act - pointer to current thread activation
1409 ** index - index of vmm state for this page
1410 ** va - virtual address within the vmm's address
1411 ** space
1412 ** reset - Clears dirty if true, untouched if not
1413 **
1414 ** Outputs:
1415 ** the dirty bit
1416 ** clears the dirty bit in the pte if requested
1417 **
1418 ** Note:
1419 ** The RC bits are merged into the global physical entry
1420 -----------------------------------------------------------------------*/
1421
1422 boolean_t vmm_get_page_dirty_flag(
1423 thread_t act,
1424 vmm_adsp_id_t index,
1425 addr64_t va,
1426 unsigned int reset)
1427 {
1428 vmmCntrlEntry *CEntry;
1429 register mapping_t *mpv, *mp;
1430 unsigned int RC;
1431 pmap_t pmap;
1432
1433 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1434 if (!pmap) return 1; /* Either this isn't vmm thread or the index is bogus */
1435
1436 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1437 RC = hw_test_rc_gv(act->map->pmap, pmap, va, reset);/* Fetch the RC bits and clear if requested */
1438 } else {
1439 RC = hw_test_rc(pmap, (addr64_t)va, reset); /* Fetch the RC bits and clear if requested */
1440 }
1441
1442 switch (RC & mapRetCode) { /* Decode return code */
1443
1444 case mapRtOK: /* Changed */
1445 return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */
1446 break;
1447
1448 case mapRtNotFnd: /* Didn't find it */
1449 return 1; /* Return dirty */
1450 break;
1451
1452 default:
1453 panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC, pmap, va);
1454
1455 }
1456
1457 return 1; /* Return the change bit */
1458 }
1459
1460
1461 /*-----------------------------------------------------------------------
1462 ** vmm_protect_page
1463 **
1464 ** This function sets the protection bits of a mapped page
1465 **
1466 ** Inputs:
1467 ** act - pointer to current thread activation
1468 ** index - index of vmm state for this page
1469 ** va - virtual address within the vmm's address
1470 ** space
1471 ** prot - Protection flags
1472 **
1473 ** Outputs:
1474 ** none
1475 ** Protection bits of the mapping are modifed
1476 **
1477 -----------------------------------------------------------------------*/
1478
1479 kern_return_t vmm_protect_page(
1480 thread_t act,
1481 vmm_adsp_id_t index,
1482 addr64_t va,
1483 vm_prot_t prot)
1484 {
1485 vmmCntrlEntry *CEntry;
1486 addr64_t nextva;
1487 int ret;
1488 pmap_t pmap;
1489
1490 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1491 if (!pmap) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1492
1493 if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */
1494 ret = hw_protect_gv(pmap, va, prot); /* Try to change protection, GSA varient */
1495 } else {
1496 ret = hw_protect(pmap, va, prot, &nextva); /* Try to change protection */
1497 }
1498
1499 switch (ret) { /* Decode return code */
1500
1501 case mapRtOK: /* All ok... */
1502 break; /* Outta here */
1503
1504 case mapRtNotFnd: /* Didn't find it */
1505 return KERN_SUCCESS; /* Ok, return... */
1506 break;
1507
1508 default:
1509 panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, (addr64_t)va);
1510
1511 }
1512
1513 if (!((getPerProc()->spcFlags) & FamVMmode)) {
1514 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1515 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
1516 }
1517
1518 return KERN_SUCCESS; /* Return */
1519 }
1520
1521
1522 /*-----------------------------------------------------------------------
1523 ** vmm_protect_execute
1524 **
1525 ** This function sets the protection bits of a mapped page
1526 ** and then directly starts executing.
1527 **
1528 ** See description of vmm_protect_page for details
1529 **
1530 ** Inputs:
1531 ** See vmm_protect_page and vmm_map_execute
1532 **
1533 ** Outputs:
1534 ** Normal exit is to run the VM. Abnormal exit is triggered via a
1535 ** non-KERN_SUCCESS return from vmm_map_page or later during the
1536 ** attempt to transition into the VM.
1537 -----------------------------------------------------------------------*/
1538
1539 vmm_return_code_t vmm_protect_execute(
1540 thread_t act,
1541 vmm_thread_index_t index,
1542 addr64_t va,
1543 vm_prot_t prot)
1544 {
1545 kern_return_t ret;
1546 vmmCntrlEntry *CEntry;
1547 unsigned int adsp;
1548 vmm_thread_index_t cndx;
1549
1550 cndx = index & 0xFF; /* Clean it up */
1551 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
1552 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1553
1554 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1555 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
1556
1557 if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
1558 return kVmmBogusContext; /* Yes, invalid index in Fam */
1559
1560 ret = vmm_protect_page(act, adsp, va, prot); /* Go try to change access */
1561
1562 if(ret == KERN_SUCCESS) {
1563 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1564 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
1565 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
1566 }
1567
1568 return ret; /* We had trouble of some kind (shouldn't happen) */
1569
1570 }
1571
1572
1573 /*-----------------------------------------------------------------------
1574 ** vmm_get_float_state
1575 **
1576 ** This function causes the current floating point state to
1577 ** be saved into the shared context area. It also clears the
1578 ** vmmFloatCngd changed flag.
1579 **
1580 ** Inputs:
1581 ** act - pointer to current thread activation structure
1582 ** index - index returned by vmm_init_context
1583 **
1584 ** Outputs:
1585 ** context saved
1586 -----------------------------------------------------------------------*/
1587
1588 kern_return_t vmm_get_float_state(
1589 thread_t act,
1590 vmm_thread_index_t index)
1591 {
1592 vmmCntrlEntry *CEntry;
1593 vmmCntrlTable *CTable;
1594 int i;
1595 register struct savearea_fpu *sv;
1596
1597 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1598 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1599
1600 act->machine.specFlags &= ~floatCng; /* Clear the special flag */
1601 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
1602
1603 fpu_save(&CEntry->vmmFacCtx); /* Save context if live */
1604
1605 if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */
1606 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */
1607 return KERN_SUCCESS;
1608 }
1609
1610
1611 for(i = 0; i < 32; i++) { /* Initialize floating points */
1612 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
1613 }
1614
1615 return KERN_SUCCESS;
1616 }
1617
1618 /*-----------------------------------------------------------------------
1619 ** vmm_get_vector_state
1620 **
1621 ** This function causes the current vector state to
1622 ** be saved into the shared context area. It also clears the
1623 ** vmmVectorCngd changed flag.
1624 **
1625 ** Inputs:
1626 ** act - pointer to current thread activation structure
1627 ** index - index returned by vmm_init_context
1628 **
1629 ** Outputs:
1630 ** context saved
1631 -----------------------------------------------------------------------*/
1632
1633 kern_return_t vmm_get_vector_state(
1634 thread_t act,
1635 vmm_thread_index_t index)
1636 {
1637 vmmCntrlEntry *CEntry;
1638 vmmCntrlTable *CTable;
1639 int i, j;
1640 unsigned int vrvalidwrk;
1641 register struct savearea_vec *sv;
1642
1643 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1644 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1645
1646 vec_save(&CEntry->vmmFacCtx); /* Save context if live */
1647
1648 act->machine.specFlags &= ~vectorCng; /* Clear the special flag */
1649 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
1650
1651 if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */
1652
1653 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
1654
1655 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
1656 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
1657 for(j = 0; j < 4; j++) { /* If so, copy it over */
1658 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
1659 }
1660 }
1661 else {
1662 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
1663 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
1664 }
1665 }
1666
1667 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
1668
1669 }
1670
1671 return KERN_SUCCESS;
1672 }
1673
1674 for(i = 0; i < 32; i++) { /* Initialize vector registers */
1675 for(j=0; j < 4; j++) { /* Do words */
1676 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
1677 }
1678 }
1679
1680 return KERN_SUCCESS;
1681 }
1682
1683 /*-----------------------------------------------------------------------
1684 ** vmm_set_timer
1685 **
1686 ** This function causes a timer (in AbsoluteTime) for a specific time
1687 ** to be set It also clears the vmmTimerPop flag if the timer is actually
1688 ** set, it is cleared otherwise.
1689 **
1690 ** A timer is cleared by setting setting the time to 0. This will clear
1691 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
1692 ** current time clears the internal timer request, but leaves the
1693 ** vmmTimerPop flag set.
1694 **
1695 **
1696 ** Inputs:
1697 ** act - pointer to current thread activation structure
1698 ** index - index returned by vmm_init_context
1699 ** timerhi - high order word of AbsoluteTime to pop
1700 ** timerlo - low order word of AbsoluteTime to pop
1701 **
1702 ** Outputs:
1703 ** timer set, vmmTimerPop cleared
1704 -----------------------------------------------------------------------*/
1705
1706 kern_return_t vmm_set_timer(
1707 thread_t act,
1708 vmm_thread_index_t index,
1709 unsigned int timerhi,
1710 unsigned int timerlo)
1711 {
1712 vmmCntrlEntry *CEntry;
1713
1714 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1715 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1716
1717 CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
1718
1719 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
1720 return KERN_SUCCESS; /* Leave now... */
1721 }
1722
1723
1724 /*-----------------------------------------------------------------------
1725 ** vmm_get_timer
1726 **
1727 ** This function causes the timer for a specified VM to be
1728 ** returned in return_params[0] and return_params[1].
1729 ** Note that this is kind of funky for 64-bit VMs because we
1730 ** split the timer into two parts so that we still set parms 0 and 1.
1731 ** Obviously, we don't need to do this because the parms are 8 bytes
1732 ** wide.
1733 **
1734 **
1735 ** Inputs:
1736 ** act - pointer to current thread activation structure
1737 ** index - index returned by vmm_init_context
1738 **
1739 ** Outputs:
1740 ** Timer value set in return_params[0] and return_params[1].
1741 ** Set to 0 if timer is not set.
1742 -----------------------------------------------------------------------*/
1743
1744 kern_return_t vmm_get_timer(
1745 thread_t act,
1746 vmm_thread_index_t index)
1747 {
1748 vmmCntrlEntry *CEntry;
1749 vmmCntrlTable *CTable;
1750
1751 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1752 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1753
1754 if(CEntry->vmmXAFlgs & vmm64Bit) { /* A 64-bit virtual machine? */
1755 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32); /* Return the last timer value */
1756 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1757 }
1758 else {
1759 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */
1760 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1761 }
1762 return KERN_SUCCESS;
1763 }
1764
1765
1766 /*-----------------------------------------------------------------------
1767 ** vmm_timer_pop
1768 **
1769 ** This function causes all timers in the array of VMs to be updated.
1770 ** All appropriate flags are set or reset. If a VM is currently
1771 ** running and its timer expired, it is intercepted.
1772 **
1773 ** The qactTimer value is set to the lowest unexpired timer. It is
1774 ** zeroed if all timers are expired or have been reset.
1775 **
1776 ** Inputs:
1777 ** act - pointer to current thread activation structure
1778 **
1779 ** Outputs:
1780 ** timers set, vmmTimerPop cleared or set
1781 -----------------------------------------------------------------------*/
1782
1783 void vmm_timer_pop(
1784 thread_t act)
1785 {
1786 vmmCntrlEntry *CEntry;
1787 vmmCntrlTable *CTable;
1788 int cvi, any;
1789 uint64_t now, soonest;
1790 savearea *sv;
1791
1792 if(!((unsigned int)act->machine.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
1793 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
1794 }
1795
1796 soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */
1797
1798 clock_get_uptime(&now); /* What time is it? */
1799
1800 CTable = act->machine.vmmControl; /* Make this easier */
1801 any = 0; /* Haven't found a running unexpired timer yet */
1802
1803 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Cycle through all and check time now */
1804
1805 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
1806
1807 if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */
1808 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
1809 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
1810 continue; /* Check next */
1811 }
1812
1813 if (CTable->vmmc[cvi].vmmTimer <= now) {
1814 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
1815 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
1816 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->machine.vmmCEntry) { /* Is this the running VM? */
1817 sv = find_user_regs(act); /* Get the user state registers */
1818 if(!sv) { /* Did we find something? */
1819 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1820 }
1821 sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */
1822 vmm_force_exit(act, sv); /* Intercept a running VM */
1823 }
1824 continue; /* Check the rest */
1825 }
1826 else { /* It hasn't popped yet */
1827 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1828 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1829 }
1830
1831 any = 1; /* Show we found an active unexpired timer */
1832
1833 if (CTable->vmmc[cvi].vmmTimer < soonest)
1834 soonest = CTable->vmmc[cvi].vmmTimer;
1835 }
1836
1837 if(any) {
1838 if (act->machine.qactTimer == 0 || soonest <= act->machine.qactTimer)
1839 act->machine.qactTimer = soonest; /* Set lowest timer */
1840 }
1841
1842 return;
1843 }
1844
1845
1846
1847 /*-----------------------------------------------------------------------
1848 ** vmm_stop_vm
1849 **
1850 ** This function prevents the specified VM(s) to from running.
1851 ** If any is currently executing, the execution is intercepted
1852 ** with a code of kVmmStopped. Note that execution of the VM is
1853 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
1854 ** This provides the ability for a thread to stop execution of a VM and
1855 ** insure that it will not be run until the emulator has processed the
1856 ** "virtual" interruption.
1857 **
1858 ** Inputs:
1859 ** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1860 ** NOTE: if this mask is all 0s, any executing VM is intercepted with
1861 * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1862 ** note that there is a potential race here and the VM may not stop.
1863 **
1864 ** Outputs:
1865 ** kernel return code indicating success
1866 ** or if no VMs are enabled, an invalid syscall exception.
1867 -----------------------------------------------------------------------*/
1868
1869 int vmm_stop_vm(struct savearea *save)
1870 {
1871
1872 thread_t act;
1873 vmmCntrlTable *CTable;
1874 int cvi, i;
1875 task_t task;
1876 thread_t fact;
1877 unsigned int vmmask;
1878 ReturnHandler *stopapc;
1879
1880 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1881
1882 task = current_task(); /* Figure out who we are */
1883
1884 task_lock(task); /* Lock our task */
1885
1886 fact = (thread_t)task->threads.next; /* Get the first activation on task */
1887 act = 0; /* Pretend we didn't find it yet */
1888
1889 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
1890 if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */
1891 act = fact; /* Yeah... */
1892 break; /* Bail the loop... */
1893 }
1894 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
1895 }
1896
1897 if(!((unsigned int)act)) { /* See if we have VMMs yet */
1898 task_unlock(task); /* No, unlock the task */
1899 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1900 return 0; /* Go generate a syscall exception */
1901 }
1902
1903 thread_reference(act);
1904
1905 task_unlock(task); /* Safe to release now */
1906
1907 thread_mtx_lock(act);
1908
1909 CTable = act->machine.vmmControl; /* Get the pointer to the table */
1910
1911 if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */
1912 thread_mtx_unlock(act); /* Unlock the activation */
1913 thread_deallocate(act);
1914 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1915 return 0; /* Go generate a syscall exception */
1916 }
1917
1918 if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */
1919 thread_mtx_unlock(act); /* Unlock the activation */
1920 thread_deallocate(act);
1921 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1922 save->save_r3 = KERN_SUCCESS; /* Set success */
1923 return 1; /* Return... */
1924 }
1925
1926 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search slots */
1927 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */
1928 hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */
1929 }
1930 vmmask = vmmask << 1; /* Slide mask over */
1931 }
1932
1933 if(hw_compare_and_store(0, 1, &act->machine.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */
1934 thread_mtx_unlock(act); /* Already one pending, unlock the activation */
1935 thread_deallocate(act);
1936 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1937 save->save_r3 = KERN_SUCCESS; /* Say we did it... */
1938 return 1; /* Leave */
1939 }
1940
1941 if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */
1942 act->machine.emPendRupts = 0; /* No memory, say we have given up request */
1943 thread_mtx_unlock(act); /* Unlock the activation */
1944 thread_deallocate(act);
1945 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1946 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
1947 return 1; /* Return... */
1948 }
1949
1950 ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1951
1952 stopapc->handler = vmm_interrupt; /* Set interruption routine */
1953
1954 stopapc->next = act->handlers; /* Put our interrupt at the start of the list */
1955 act->handlers = stopapc; /* Point to us */
1956
1957 act_set_apc(act); /* Set an APC AST */
1958 ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */
1959
1960 thread_mtx_unlock(act); /* Unlock the activation */
1961 thread_deallocate(act);
1962
1963 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1964 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
1965 return 1;
1966 }
1967
1968 /*-----------------------------------------------------------------------
1969 ** vmm_interrupt
1970 **
1971 ** This function is executed asynchronously from an APC AST.
1972 ** It is to be used for anything that needs to interrupt a running VM.
1973 ** This include any kind of interruption generation (other than timer pop)
1974 ** or entering the stopped state.
1975 **
1976 ** Inputs:
1977 ** ReturnHandler *rh - the return handler control block as required by the APC.
1978 ** thread_t act - the activation
1979 **
1980 ** Outputs:
1981 ** Whatever needed to be done is done.
1982 -----------------------------------------------------------------------*/
1983
1984 void vmm_interrupt(ReturnHandler *rh, thread_t act) {
1985
1986 vmmCntrlTable *CTable;
1987 savearea *sv;
1988 boolean_t inter;
1989
1990
1991
1992 kfree(rh, sizeof(ReturnHandler)); /* Release the return handler block */
1993
1994 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1995
1996 act->machine.emPendRupts = 0; /* Say that there are no more interrupts pending */
1997 CTable = act->machine.vmmControl; /* Get the pointer to the table */
1998
1999 if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */
2000
2001 if(act->machine.vmmCEntry && (act->machine.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */
2002 sv = find_user_regs(act); /* Get the user state registers */
2003 if(!sv) { /* Did we find something? */
2004 panic("vmm_interrupt: no user context; act = %08X\n", act);
2005 }
2006 sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */
2007 vmm_force_exit(act, sv); /* Intercept a running VM */
2008 }
2009 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
2010
2011 return;
2012 }