]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /*----------------------------------------------------------------------- | |
29 | ** vmachmon.c | |
30 | ** | |
31 | ** C routines that we are adding to the MacOS X kernel. | |
32 | ** | |
33 | -----------------------------------------------------------------------*/ | |
34 | ||
35 | #include <mach/mach_types.h> | |
36 | #include <mach/kern_return.h> | |
37 | #include <mach/host_info.h> | |
38 | #include <kern/kern_types.h> | |
39 | #include <kern/kalloc.h> | |
40 | #include <kern/host.h> | |
41 | #include <kern/task.h> | |
42 | #include <kern/thread.h> | |
43 | #include <ppc/exception.h> | |
44 | #include <ppc/mappings.h> | |
45 | #include <ppc/thread.h> | |
46 | #include <ppc/savearea.h> | |
47 | #include <ppc/misc_protos.h> | |
48 | #include <ppc/fpu_protos.h> | |
49 | #include <vm/vm_kern.h> | |
50 | #include <vm/vm_fault.h> | |
51 | ||
52 | #include <ppc/vmachmon.h> | |
53 | #include <ppc/lowglobals.h> | |
54 | ||
55 | extern double FloatInit; | |
56 | extern unsigned long QNaNbarbarian[4]; | |
57 | ||
58 | /************************************************************************************* | |
59 | Virtual Machine Monitor Internal Routines | |
60 | **************************************************************************************/ | |
61 | ||
62 | /*----------------------------------------------------------------------- | |
63 | ** vmm_get_entry | |
64 | ** | |
65 | ** This function verifies and return a vmm context entry index | |
66 | ** | |
67 | ** Inputs: | |
68 | ** act - pointer to current thread activation | |
69 | ** index - index into vmm control table (this is a "one based" value) | |
70 | ** | |
71 | ** Outputs: | |
72 | ** address of a vmmCntrlEntry or 0 if not found | |
73 | -----------------------------------------------------------------------*/ | |
74 | ||
75 | static vmmCntrlEntry *vmm_get_entry( | |
76 | thread_t act, | |
77 | vmm_thread_index_t index) | |
78 | { | |
79 | vmmCntrlTable *CTable; | |
80 | vmmCntrlEntry *CEntry; | |
81 | ||
82 | index = index & vmmTInum; /* Clean up the index */ | |
83 | ||
84 | if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */ | |
85 | if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */ | |
86 | ||
87 | CTable = act->machine.vmmControl; /* Make the address a bit more convienient */ | |
88 | CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */ | |
89 | ||
90 | if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */ | |
91 | ||
92 | return CEntry; | |
93 | } | |
94 | ||
95 | /*----------------------------------------------------------------------- | |
96 | ** vmm_get_adsp | |
97 | ** | |
98 | ** This function verifies and returns the pmap for an address space. | |
99 | ** If there is none and the request is valid, a pmap will be created. | |
100 | ** | |
101 | ** Inputs: | |
102 | ** act - pointer to current thread activation | |
103 | ** index - index into vmm control table (this is a "one based" value) | |
104 | ** | |
105 | ** Outputs: | |
106 | ** address of a pmap or 0 if not found or could no be created | |
107 | ** Note that if there is no pmap for the address space it will be created. | |
108 | -----------------------------------------------------------------------*/ | |
109 | ||
110 | static pmap_t vmm_get_adsp(thread_t act, vmm_thread_index_t index) | |
111 | { | |
112 | pmap_t pmap; | |
113 | ||
114 | if (act->machine.vmmControl == 0) return NULL; /* No control table means no vmm */ | |
115 | if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */ | |
116 | ||
117 | pmap = act->machine.vmmControl->vmmAdsp[index - 1]; /* Get the pmap */ | |
118 | return (pmap); /* and return it. */ | |
119 | } | |
120 | ||
121 | /*----------------------------------------------------------------------- | |
122 | ** vmm_build_shadow_hash | |
123 | ** | |
124 | ** Allocate and initialize a shadow hash table. | |
125 | ** | |
126 | ** This function assumes that PAGE_SIZE is 4k-bytes. | |
127 | ** | |
128 | -----------------------------------------------------------------------*/ | |
129 | static pmap_vmm_ext *vmm_build_shadow_hash(pmap_t pmap) | |
130 | { | |
131 | pmap_vmm_ext *ext; /* VMM pmap extension we're building */ | |
132 | ppnum_t extPP; /* VMM pmap extension physical page number */ | |
133 | kern_return_t ret; /* Return code from various calls */ | |
134 | uint32_t pages = GV_HPAGES; /* Number of pages in the hash table */ | |
135 | vm_offset_t free = VMX_HPIDX_OFFSET; /* Offset into extension page of free area (128-byte aligned) */ | |
136 | uint32_t freeSize = PAGE_SIZE - free; /* Number of free bytes in the extension page */ | |
137 | uint32_t idx; | |
138 | ||
139 | if ((pages * sizeof(addr64_t)) + (pages * sizeof(vm_offset_t)) > freeSize) { | |
140 | panic("vmm_build_shadow_hash: too little pmap_vmm_ext free space\n"); | |
141 | } | |
142 | ||
143 | ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&ext, PAGE_SIZE); | |
144 | /* Allocate a page-sized extension block */ | |
145 | if (ret != KERN_SUCCESS) return (NULL); /* Return NULL for failed allocate */ | |
146 | bzero((char *)ext, PAGE_SIZE); /* Zero the entire extension block page */ | |
147 | ||
148 | extPP = pmap_find_phys(kernel_pmap, (vm_offset_t)ext); | |
149 | /* Get extension block's physical page number */ | |
150 | if (!extPP) { /* This should not fail, but then again... */ | |
151 | panic("vmm_build_shadow_hash: could not translate pmap_vmm_ext vaddr %p\n", ext); | |
152 | } | |
153 | ||
154 | ext->vmxSalt = (addr64_t)(vm_offset_t)ext ^ ptoa_64(extPP); | |
155 | /* Set effective<->physical conversion salt */ | |
156 | ext->vmxHostPmapPhys = (addr64_t)(vm_offset_t)pmap ^ pmap->pmapvr; | |
157 | /* Set host pmap's physical address */ | |
158 | ext->vmxHostPmap = pmap; /* Set host pmap's effective address */ | |
159 | ext->vmxHashPgIdx = (addr64_t *)((vm_offset_t)ext + VMX_HPIDX_OFFSET); | |
160 | /* Allocate physical index */ | |
161 | ext->vmxHashPgList = (vm_offset_t *)((vm_offset_t)ext + VMX_HPLIST_OFFSET); | |
162 | /* Allocate page list */ | |
163 | ext->vmxActiveBitmap = (vm_offset_t *)((vm_offset_t)ext + VMX_ACTMAP_OFFSET); | |
164 | /* Allocate active mapping bitmap */ | |
165 | ||
166 | /* The hash table is typically larger than a single page, but we don't require it to be in a | |
167 | contiguous virtual or physical chunk. So, we allocate it page by page, noting the effective and | |
168 | physical address of each page in vmxHashPgList and vmxHashPgIdx, respectively. */ | |
169 | for (idx = 0; idx < pages; idx++) { | |
170 | mapping_t *map; | |
171 | uint32_t mapIdx; | |
172 | ret = kmem_alloc_wired(kernel_map, &ext->vmxHashPgList[idx], PAGE_SIZE); | |
173 | /* Allocate a hash-table page */ | |
174 | if (ret != KERN_SUCCESS) goto fail; /* Allocation failed, exit through cleanup */ | |
175 | bzero((char *)ext->vmxHashPgList[idx], PAGE_SIZE); /* Zero the page */ | |
176 | ext->vmxHashPgIdx[idx] = ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)ext->vmxHashPgList[idx])); | |
177 | /* Put page's physical address into index */ | |
178 | if (!ext->vmxHashPgIdx[idx]) { /* Hash-table page's LRA failed */ | |
179 | panic("vmm_build_shadow_hash: could not translate hash-table vaddr %08X\n", ext->vmxHashPgList[idx]); | |
180 | } | |
181 | map = (mapping_t *)ext->vmxHashPgList[idx]; | |
182 | for (mapIdx = 0; mapIdx < GV_SLTS_PPG; mapIdx++) { /* Iterate over mappings in this page */ | |
183 | map->mpFlags = (mpGuest | mpgFree); /* Mark guest type and free */ | |
184 | map = (mapping_t *)((char *)map + GV_SLOT_SZ); /* Next slot-sized mapping */ | |
185 | } | |
186 | } | |
187 | ||
188 | return (ext); /* Return newly-minted VMM pmap extension */ | |
189 | ||
190 | fail: | |
191 | for (idx = 0; idx < pages; idx++) { /* De-allocate any pages we managed to allocate */ | |
192 | if (ext->vmxHashPgList[idx]) { | |
193 | kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE); | |
194 | } | |
195 | } | |
196 | kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */ | |
197 | return (NULL); /* Return NULL for failure */ | |
198 | } | |
199 | ||
200 | ||
201 | /*----------------------------------------------------------------------- | |
202 | ** vmm_release_shadow_hash | |
203 | ** | |
204 | ** Release shadow hash table and VMM extension block | |
205 | ** | |
206 | -----------------------------------------------------------------------*/ | |
207 | static void vmm_release_shadow_hash(pmap_vmm_ext *ext) | |
208 | { | |
209 | uint32_t idx; | |
210 | ||
211 | for (idx = 0; idx < GV_HPAGES; idx++) { /* Release the hash table page by page */ | |
212 | kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE); | |
213 | } | |
214 | ||
215 | kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE); /* Release the VMM pmap extension page */ | |
216 | } | |
217 | ||
218 | /*----------------------------------------------------------------------- | |
219 | ** vmm_activate_gsa | |
220 | ** | |
221 | ** Activate guest shadow assist | |
222 | ** | |
223 | -----------------------------------------------------------------------*/ | |
224 | static kern_return_t vmm_activate_gsa( | |
225 | thread_t act, | |
226 | vmm_thread_index_t index) | |
227 | { | |
228 | vmmCntrlTable *CTable = act->machine.vmmControl; /* Get VMM control table */ | |
229 | vmmCntrlEntry *CEntry; | |
230 | pmap_t hpmap; | |
231 | pmap_t gpmap; | |
232 | if (!CTable) { /* Caller guarantees that this will work */ | |
233 | panic("vmm_activate_gsa: VMM control table not present; act = %p, idx = %lu\n", | |
234 | act, index); | |
235 | return KERN_FAILURE; | |
236 | } | |
237 | CEntry = vmm_get_entry(act, index); /* Get context from index */ | |
238 | if (!CEntry) { /* Caller guarantees that this will work */ | |
239 | panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %p, idx = %lu\n", | |
240 | act, index); | |
241 | return KERN_FAILURE; | |
242 | } | |
243 | ||
244 | hpmap = act->map->pmap; /* Get host pmap */ | |
245 | gpmap = vmm_get_adsp(act, index); /* Get guest pmap */ | |
246 | if (!gpmap) { /* Caller guarantees that this will work */ | |
247 | panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %p, idx = %lu\n", | |
248 | act, index); | |
249 | return KERN_FAILURE; | |
250 | } | |
251 | ||
252 | if (!hpmap->pmapVmmExt) { /* If there's no VMM extension for this host, create one */ | |
253 | hpmap->pmapVmmExt = vmm_build_shadow_hash(hpmap); /* Build VMM extension plus shadow hash and attach */ | |
254 | if (hpmap->pmapVmmExt) { /* See if we succeeded */ | |
255 | hpmap->pmapVmmExtPhys = (addr64_t)(vm_offset_t)hpmap->pmapVmmExt ^ hpmap->pmapVmmExt->vmxSalt; | |
256 | /* Get VMM extensions block physical address */ | |
257 | } else { | |
258 | return KERN_RESOURCE_SHORTAGE; /* Not enough mojo to go */ | |
259 | } | |
260 | } | |
261 | gpmap->pmapVmmExt = hpmap->pmapVmmExt; /* Copy VMM extension block virtual address into guest */ | |
262 | gpmap->pmapVmmExtPhys = hpmap->pmapVmmExtPhys; /* and its physical address, too */ | |
263 | gpmap->pmapFlags |= pmapVMgsaa; /* Enable GSA for this guest */ | |
264 | CEntry->vmmXAFlgs |= vmmGSA; /* Show GSA active here, too */ | |
265 | ||
266 | return KERN_SUCCESS; | |
267 | } | |
268 | ||
269 | ||
270 | /*----------------------------------------------------------------------- | |
271 | ** vmm_deactivate_gsa | |
272 | ** | |
273 | ** Deactivate guest shadow assist | |
274 | ** | |
275 | -----------------------------------------------------------------------*/ | |
276 | static void | |
277 | vmm_deactivate_gsa( | |
278 | thread_t act, | |
279 | vmm_thread_index_t index) | |
280 | { | |
281 | vmmCntrlEntry *CEntry = vmm_get_entry(act, index); /* Get context from index */ | |
282 | pmap_t gpmap; | |
283 | if (!CEntry) { /* Caller guarantees that this will work */ | |
284 | panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %p, idx = %lu\n", | |
285 | act, index); | |
286 | } | |
287 | ||
288 | gpmap = vmm_get_adsp(act, index); /* Get guest pmap */ | |
289 | if (!gpmap) { /* Caller guarantees that this will work */ | |
290 | panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %p, idx = %lu\n", | |
291 | act, index); | |
292 | } | |
293 | ||
294 | gpmap->pmapFlags &= ~pmapVMgsaa; /* Deactivate GSA for this guest */ | |
295 | CEntry->vmmXAFlgs &= ~vmmGSA; /* Show GSA deactivated here, too */ | |
296 | } | |
297 | ||
298 | ||
299 | /*----------------------------------------------------------------------- | |
300 | ** vmm_flush_context | |
301 | ** | |
302 | ** Flush specified guest context, purging all guest mappings and clearing | |
303 | ** the context page. | |
304 | ** | |
305 | -----------------------------------------------------------------------*/ | |
306 | static void vmm_flush_context( | |
307 | thread_t act, | |
308 | vmm_thread_index_t index) | |
309 | { | |
310 | vmmCntrlEntry *CEntry; | |
311 | vmmCntrlTable *CTable; | |
312 | vmm_state_page_t *vks; | |
313 | vmm_version_t version; | |
314 | ||
315 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
316 | if (!CEntry) { /* Caller guarantees that this will work */ | |
317 | panic("vmm_flush_context: Unexpected failure of vmm_get_entry; act = %p, idx = %lu\n", | |
318 | act, index); | |
319 | return; | |
320 | } | |
321 | ||
322 | if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */ | |
323 | toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */ | |
324 | save_release((struct savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */ | |
325 | } | |
326 | ||
327 | if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */ | |
328 | toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */ | |
329 | save_release((struct savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */ | |
330 | } | |
331 | ||
332 | vmm_unmap_all_pages(act, index); /* Blow away all mappings for this context */ | |
333 | ||
334 | CTable = act->machine.vmmControl; /* Get the control table address */ | |
335 | CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */ | |
336 | ||
337 | CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */ | |
338 | CEntry->vmmFacCtx.FPUsave = NULL; /* Clear facility context control */ | |
339 | CEntry->vmmFacCtx.FPUlevel = NULL; /* Clear facility context control */ | |
340 | CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */ | |
341 | CEntry->vmmFacCtx.VMXsave = NULL; /* Clear facility context control */ | |
342 | CEntry->vmmFacCtx.VMXlevel = NULL; /* Clear facility context control */ | |
343 | CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ | |
344 | ||
345 | vks = CEntry->vmmContextKern; /* Get address of the context page */ | |
346 | version = vks->interface_version; /* Save the version code */ | |
347 | bzero((char *)vks, 4096); /* Clear all */ | |
348 | ||
349 | vks->interface_version = version; /* Set our version code */ | |
350 | vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */ | |
351 | ||
352 | /* Context is now flushed */ | |
353 | } | |
354 | ||
355 | ||
356 | /************************************************************************************* | |
357 | Virtual Machine Monitor Exported Functionality | |
358 | ||
359 | The following routines are used to implement a quick-switch mechanism for | |
360 | virtual machines that need to execute within their own processor envinroment | |
361 | (including register and MMU state). | |
362 | **************************************************************************************/ | |
363 | ||
364 | /*----------------------------------------------------------------------- | |
365 | ** vmm_get_version | |
366 | ** | |
367 | ** This function returns the current version of the virtual machine | |
368 | ** interface. It is divided into two portions. The top 16 bits | |
369 | ** represent the major version number, and the bottom 16 bits | |
370 | ** represent the minor version number. Clients using the Vmm | |
371 | ** functionality should make sure they are using a verison new | |
372 | ** enough for them. | |
373 | ** | |
374 | ** Inputs: | |
375 | ** none | |
376 | ** | |
377 | ** Outputs: | |
378 | ** 32-bit number representing major/minor version of | |
379 | ** the Vmm module | |
380 | -----------------------------------------------------------------------*/ | |
381 | ||
382 | int vmm_get_version(struct savearea *save) | |
383 | { | |
384 | save->save_r3 = kVmmCurrentVersion; /* Return the version */ | |
385 | return 1; | |
386 | } | |
387 | ||
388 | ||
389 | /*----------------------------------------------------------------------- | |
390 | ** Vmm_get_features | |
391 | ** | |
392 | ** This function returns a set of flags that represents the functionality | |
393 | ** supported by the current verison of the Vmm interface. Clients should | |
394 | ** use this to determine whether they can run on this system. | |
395 | ** | |
396 | ** Inputs: | |
397 | ** none | |
398 | ** | |
399 | ** Outputs: | |
400 | ** 32-bit number representing functionality supported by this | |
401 | ** version of the Vmm module | |
402 | -----------------------------------------------------------------------*/ | |
403 | ||
404 | int vmm_get_features(struct savearea *save) | |
405 | { | |
406 | save->save_r3 = kVmmCurrentFeatures; /* Return the features */ | |
407 | if(getPerProc()->pf.Available & pf64Bit) { | |
408 | save->save_r3 &= ~kVmmFeature_LittleEndian; /* No little endian here */ | |
409 | save->save_r3 |= kVmmFeature_SixtyFourBit; /* Set that we can do 64-bit */ | |
410 | } | |
411 | return 1; | |
412 | } | |
413 | ||
414 | ||
415 | /*----------------------------------------------------------------------- | |
416 | ** vmm_max_addr | |
417 | ** | |
418 | ** This function returns the maximum addressable virtual address sported | |
419 | ** | |
420 | ** Outputs: | |
421 | ** Returns max address | |
422 | -----------------------------------------------------------------------*/ | |
423 | ||
424 | addr64_t | |
425 | vmm_max_addr(__unused thread_t act) | |
426 | { | |
427 | return vm_max_address; /* Return the maximum address */ | |
428 | } | |
429 | ||
430 | /*----------------------------------------------------------------------- | |
431 | ** vmm_get_XA | |
432 | ** | |
433 | ** This function retrieves the eXtended Architecture flags for the specifed VM. | |
434 | ** | |
435 | ** We need to return the result in the return code rather than in the return parameters | |
436 | ** because we need an architecture independent format so the results are actually | |
437 | ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs. | |
438 | ** 4 for 32-bit. | |
439 | ** | |
440 | ** | |
441 | ** Inputs: | |
442 | ** act - pointer to current thread activation structure | |
443 | ** index - index returned by vmm_init_context | |
444 | ** | |
445 | ** Outputs: | |
446 | ** Return code is set to the XA flags. If the index is invalid or the | |
447 | ** context has not been created, we return 0. | |
448 | -----------------------------------------------------------------------*/ | |
449 | ||
450 | unsigned int vmm_get_XA( | |
451 | thread_t act, | |
452 | vmm_thread_index_t index) | |
453 | { | |
454 | vmmCntrlEntry *CEntry; | |
455 | ||
456 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
457 | if (CEntry == NULL) return 0; /* Either this isn't a vmm or the index is bogus */ | |
458 | ||
459 | return CEntry->vmmXAFlgs; /* Return the flags */ | |
460 | } | |
461 | ||
462 | /*----------------------------------------------------------------------- | |
463 | ** vmm_init_context | |
464 | ** | |
465 | ** This function initializes an emulation context. It allocates | |
466 | ** a new pmap (address space) and fills in the initial processor | |
467 | ** state within the specified structure. The structure, mapped | |
468 | ** into the client's logical address space, must be page-aligned. | |
469 | ** | |
470 | ** Inputs: | |
471 | ** act - pointer to current thread activation | |
472 | ** version - requested version of the Vmm interface (allowing | |
473 | ** future versions of the interface to change, but still | |
474 | ** support older clients) | |
475 | ** vmm_user_state - pointer to a logical page within the | |
476 | ** client's address space | |
477 | ** | |
478 | ** Outputs: | |
479 | ** kernel return code indicating success or failure | |
480 | -----------------------------------------------------------------------*/ | |
481 | ||
482 | int vmm_init_context(struct savearea *save) | |
483 | { | |
484 | ||
485 | thread_t act; | |
486 | vmm_version_t version; | |
487 | vmm_state_page_t * vmm_user_state; | |
488 | vmmCntrlTable *CTable; | |
489 | vm_offset_t conkern; | |
490 | vmm_state_page_t * vks; | |
491 | ppnum_t conphys; | |
492 | kern_return_t ret; | |
493 | int cvi, i; | |
494 | task_t task; | |
495 | thread_t fact, gact; | |
496 | pmap_t hpmap; | |
497 | pmap_t gpmap; | |
498 | ||
499 | vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4); /* Get the user address of the comm area */ | |
500 | if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */ | |
501 | save->save_r3 = KERN_FAILURE; /* Return failure */ | |
502 | return 1; | |
503 | } | |
504 | ||
505 | /* Make sure that the version requested is supported */ | |
506 | version = save->save_r3; /* Pick up passed in version */ | |
507 | if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) { | |
508 | save->save_r3 = KERN_FAILURE; /* Return failure */ | |
509 | return 1; | |
510 | } | |
511 | ||
512 | if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */ | |
513 | save->save_r3 = KERN_FAILURE; /* Return failure */ | |
514 | return 1; | |
515 | } | |
516 | ||
517 | act = current_thread(); /* Pick up our activation */ | |
518 | ||
519 | ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ | |
520 | ||
521 | task = current_task(); /* Figure out who we are */ | |
522 | ||
523 | task_lock(task); /* Lock our task */ | |
524 | ||
525 | fact = (thread_t)task->threads.next; /* Get the first activation on task */ | |
526 | gact = NULL; /* Pretend we didn't find it yet */ | |
527 | ||
528 | for(i = 0; i < task->thread_count; i++) { /* All of the activations */ | |
529 | if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */ | |
530 | gact = fact; /* Yeah... */ | |
531 | break; /* Bail the loop... */ | |
532 | } | |
533 | fact = (thread_t)fact->task_threads.next; /* Go to the next one */ | |
534 | } | |
535 | ||
536 | ||
537 | /* | |
538 | * We only allow one thread per task to be a virtual machine monitor right now. This solves | |
539 | * a number of potential problems that I can't put my finger on right now. | |
540 | * | |
541 | * Utlimately, I think we want to move the controls and make all this task based instead of | |
542 | * thread based. That would allow an emulator architecture to spawn a kernel thread for each | |
543 | * VM (if they want) rather than hand dispatch contexts. | |
544 | */ | |
545 | ||
546 | if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */ | |
547 | task_unlock(task); /* Release task lock */ | |
548 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
549 | save->save_r3 = KERN_FAILURE; /* We must play alone... */ | |
550 | return 1; | |
551 | } | |
552 | ||
553 | if(!gact) act->machine.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */ | |
554 | ||
555 | task_unlock(task); /* Safe to release now (because we've marked ourselves) */ | |
556 | ||
557 | CTable = act->machine.vmmControl; /* Get the control table address */ | |
558 | if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */ | |
559 | if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */ | |
560 | act->machine.vmmControl = NULL; /* Unmark us as vmm 'cause we failed */ | |
561 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
562 | save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */ | |
563 | return 1; | |
564 | } | |
565 | ||
566 | bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */ | |
567 | act->machine.vmmControl = CTable; /* Initialize the table anchor */ | |
568 | } | |
569 | ||
570 | for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */ | |
571 | if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */ | |
572 | } | |
573 | ||
574 | if(cvi >= kVmmMaxContexts) { /* Did we find one? */ | |
575 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
576 | save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */ | |
577 | return 1; | |
578 | } | |
579 | ||
580 | ret = vm_map_wire( /* Wire the virtual machine monitor's context area */ | |
581 | act->map, | |
582 | (vm_offset_t)vmm_user_state, | |
583 | (vm_offset_t)vmm_user_state + PAGE_SIZE, | |
584 | VM_PROT_READ | VM_PROT_WRITE, | |
585 | FALSE); | |
586 | ||
587 | if (ret != KERN_SUCCESS) /* The wire failed, return the code */ | |
588 | goto return_in_shame; | |
589 | ||
590 | /* Map the vmm state into the kernel's address space. */ | |
591 | conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state)); | |
592 | ||
593 | /* Find a virtual address to use. */ | |
594 | ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE); | |
595 | if (ret != KERN_SUCCESS) { /* Did we find an address? */ | |
596 | (void) vm_map_unwire(act->map, /* No, unwire the context area */ | |
597 | (vm_offset_t)vmm_user_state, | |
598 | (vm_offset_t)vmm_user_state + PAGE_SIZE, | |
599 | TRUE); | |
600 | goto return_in_shame; | |
601 | } | |
602 | ||
603 | /* Map it into the kernel's address space. */ | |
604 | ||
605 | pmap_enter(kernel_pmap, conkern, conphys, | |
606 | VM_PROT_READ | VM_PROT_WRITE, | |
607 | VM_WIMG_USE_DEFAULT, TRUE); | |
608 | ||
609 | /* Clear the vmm state structure. */ | |
610 | vks = (vmm_state_page_t *)conkern; | |
611 | bzero((char *)vks, PAGE_SIZE); | |
612 | ||
613 | ||
614 | /* We're home free now. Simply fill in the necessary info and return. */ | |
615 | ||
616 | vks->interface_version = version; /* Set our version code */ | |
617 | vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */ | |
618 | ||
619 | CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */ | |
620 | CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */ | |
621 | CTable->vmmc[cvi].vmmContextPhys = conphys; /* Remember the state page physical addr */ | |
622 | CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */ | |
623 | ||
624 | CTable->vmmc[cvi].vmmFacCtx.FPUsave = NULL; /* Clear facility context control */ | |
625 | CTable->vmmc[cvi].vmmFacCtx.FPUlevel = NULL; /* Clear facility context control */ | |
626 | CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */ | |
627 | CTable->vmmc[cvi].vmmFacCtx.VMXsave = NULL; /* Clear facility context control */ | |
628 | CTable->vmmc[cvi].vmmFacCtx.VMXlevel = NULL; /* Clear facility context control */ | |
629 | CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ | |
630 | CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */ | |
631 | ||
632 | (void)hw_atomic_add(&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */ | |
633 | ||
634 | hpmap = act->map->pmap; /* Get host pmap */ | |
635 | gpmap = pmap_create(0, FALSE); /* Make a fresh guest pmap */ | |
636 | if (gpmap) { /* Did we succeed ? */ | |
637 | CTable->vmmAdsp[cvi] = gpmap; /* Remember guest pmap for new context */ | |
638 | if (lowGlo.lgVMMforcedFeats & vmmGSA) { /* Forcing on guest shadow assist ? */ | |
639 | vmm_activate_gsa(act, cvi+1); /* Activate GSA */ | |
640 | } | |
641 | } else { | |
642 | ret = KERN_RESOURCE_SHORTAGE; /* We've failed to allocate a guest pmap */ | |
643 | goto return_in_shame; /* Shame on us. */ | |
644 | } | |
645 | ||
646 | if (!(hpmap->pmapFlags & pmapVMhost)) { /* Do this stuff if this is our first time hosting */ | |
647 | hpmap->pmapFlags |= pmapVMhost; /* We're now hosting */ | |
648 | } | |
649 | ||
650 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
651 | save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */ | |
652 | return 1; | |
653 | ||
654 | return_in_shame: | |
655 | if(!gact) kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */ | |
656 | act->machine.vmmControl = NULL; /* Unmark us as vmm 'cause we failed */ | |
657 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
658 | save->save_r3 = ret; /* Pass back return code... */ | |
659 | return 1; | |
660 | ||
661 | } | |
662 | ||
663 | ||
664 | /*----------------------------------------------------------------------- | |
665 | ** vmm_tear_down_context | |
666 | ** | |
667 | ** This function uninitializes an emulation context. It deallocates | |
668 | ** internal resources associated with the context block. | |
669 | ** | |
670 | ** Inputs: | |
671 | ** act - pointer to current thread activation structure | |
672 | ** index - index returned by vmm_init_context | |
673 | ** | |
674 | ** Outputs: | |
675 | ** kernel return code indicating success or failure | |
676 | ** | |
677 | ** Strangeness note: | |
678 | ** This call will also trash the address space with the same ID. While this | |
679 | ** is really not too cool, we have to do it because we need to make | |
680 | ** sure that old VMM users (not that we really have any) who depend upon | |
681 | ** the address space going away with the context still work the same. | |
682 | -----------------------------------------------------------------------*/ | |
683 | ||
684 | kern_return_t vmm_tear_down_context( | |
685 | thread_t act, | |
686 | vmm_thread_index_t index) | |
687 | { | |
688 | vmmCntrlEntry *CEntry; | |
689 | vmmCntrlTable *CTable; | |
690 | int cvi; | |
691 | pmap_t gpmap; | |
692 | pmap_t pmap; | |
693 | ||
694 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
695 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
696 | ||
697 | ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ | |
698 | ||
699 | (void)hw_atomic_sub(&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */ | |
700 | ||
701 | if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */ | |
702 | toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */ | |
703 | save_release((struct savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */ | |
704 | } | |
705 | ||
706 | if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */ | |
707 | toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */ | |
708 | save_release((struct savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */ | |
709 | } | |
710 | ||
711 | CEntry->vmmPmap = NULL; /* Remove this trace */ | |
712 | gpmap = act->machine.vmmControl->vmmAdsp[index - 1]; | |
713 | /* Get context's guest pmap (if any) */ | |
714 | if (gpmap) { /* Check if there is an address space assigned here */ | |
715 | if (gpmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist case specially */ | |
716 | hw_rem_all_gv(gpmap); /* Remove all guest mappings from shadow hash table */ | |
717 | } else { | |
718 | mapping_remove(gpmap, 0xFFFFFFFFFFFFF000LL);/* Remove final page explicitly because we might have mapped it */ | |
719 | pmap_remove(gpmap, 0, 0xFFFFFFFFFFFFF000LL);/* Remove all entries from this map */ | |
720 | } | |
721 | pmap_destroy(gpmap); /* Toss the pmap for this context */ | |
722 | act->machine.vmmControl->vmmAdsp[index - 1] = NULL; /* Clean it up */ | |
723 | } | |
724 | ||
725 | (void) vm_map_unwire( /* Unwire the user comm page */ | |
726 | act->map, | |
727 | (vm_offset_t)CEntry->vmmContextUser, | |
728 | (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE, | |
729 | FALSE); | |
730 | ||
731 | kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */ | |
732 | ||
733 | CTable = act->machine.vmmControl; /* Get the control table address */ | |
734 | CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */ | |
735 | ||
736 | CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */ | |
737 | CEntry->vmmContextKern = NULL; /* Clear the kernel address of comm area */ | |
738 | CEntry->vmmContextUser = NULL; /* Clear the user address of comm area */ | |
739 | ||
740 | CEntry->vmmFacCtx.FPUsave = NULL; /* Clear facility context control */ | |
741 | CEntry->vmmFacCtx.FPUlevel = NULL; /* Clear facility context control */ | |
742 | CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */ | |
743 | CEntry->vmmFacCtx.VMXsave = NULL; /* Clear facility context control */ | |
744 | CEntry->vmmFacCtx.VMXlevel = NULL; /* Clear facility context control */ | |
745 | CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ | |
746 | CEntry->vmmFacCtx.facAct = NULL; /* Clear facility context control */ | |
747 | ||
748 | for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */ | |
749 | if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */ | |
750 | ml_set_interrupts_enabled(FALSE); /* No more interruptions */ | |
751 | return KERN_SUCCESS; /* Leave... */ | |
752 | } | |
753 | } | |
754 | ||
755 | /* | |
756 | * When we have tossed the last context, toss any address spaces left over before releasing | |
757 | * the VMM control block | |
758 | */ | |
759 | ||
760 | for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */ | |
761 | if(!act->machine.vmmControl->vmmAdsp[index - 1]) continue; /* Nothing to remove here */ | |
762 | mapping_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */ | |
763 | pmap_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */ | |
764 | pmap_destroy(act->machine.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */ | |
765 | act->machine.vmmControl->vmmAdsp[index - 1] = NULL; /* Clear just in case */ | |
766 | } | |
767 | ||
768 | pmap = act->map->pmap; /* Get our pmap */ | |
769 | if (pmap->pmapVmmExt) { /* Release any VMM pmap extension block and shadow hash table */ | |
770 | vmm_release_shadow_hash(pmap->pmapVmmExt); /* Release extension block and shadow hash table */ | |
771 | pmap->pmapVmmExt = NULL; /* Forget extension block */ | |
772 | pmap->pmapVmmExtPhys = 0; /* Forget extension block's physical address, too */ | |
773 | } | |
774 | pmap->pmapFlags &= ~pmapVMhost; /* We're no longer hosting */ | |
775 | ||
776 | kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */ | |
777 | act->machine.vmmControl = NULL; /* Unmark us as vmm */ | |
778 | ||
779 | ml_set_interrupts_enabled(FALSE); /* No more interruptions */ | |
780 | ||
781 | return KERN_SUCCESS; | |
782 | } | |
783 | ||
784 | ||
785 | /*----------------------------------------------------------------------- | |
786 | ** vmm_activate_XA | |
787 | ** | |
788 | ** This function activates the eXtended Architecture flags for the specifed VM. | |
789 | ** | |
790 | ** We need to return the result in the return code rather than in the return parameters | |
791 | ** because we need an architecture independent format so the results are actually | |
792 | ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs. | |
793 | ** 4 for 32-bit. | |
794 | ** | |
795 | ** Note that this function does a lot of the same stuff as vmm_tear_down_context | |
796 | ** and vmm_init_context. | |
797 | ** | |
798 | ** Inputs: | |
799 | ** act - pointer to current thread activation structure | |
800 | ** index - index returned by vmm_init_context | |
801 | ** flags - the extended architecture flags | |
802 | ** | |
803 | ** | |
804 | ** Outputs: | |
805 | ** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not. | |
806 | ** Also, the internal flags are set and, additionally, the VM is completely reset. | |
807 | -----------------------------------------------------------------------*/ | |
808 | kern_return_t vmm_activate_XA( | |
809 | thread_t act, | |
810 | vmm_thread_index_t index, | |
811 | unsigned int xaflags) | |
812 | { | |
813 | vmmCntrlEntry *CEntry; | |
814 | kern_return_t result = KERN_SUCCESS; /* Assume success */ | |
815 | ||
816 | if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (!getPerProc()->pf.Available & pf64Bit))) | |
817 | return (KERN_FAILURE); /* Unknown or unsupported feature requested */ | |
818 | ||
819 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
820 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */ | |
821 | ||
822 | ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ | |
823 | ||
824 | vmm_flush_context(act, index); /* Flush the context */ | |
825 | ||
826 | if (xaflags & vmm64Bit) { /* Activating 64-bit mode ? */ | |
827 | CEntry->vmmXAFlgs |= vmm64Bit; /* Activate 64-bit mode */ | |
828 | } | |
829 | ||
830 | if (xaflags & vmmGSA) { /* Activating guest shadow assist ? */ | |
831 | result = vmm_activate_gsa(act, index); /* Activate guest shadow assist */ | |
832 | } | |
833 | ||
834 | ml_set_interrupts_enabled(FALSE); /* No more interruptions */ | |
835 | ||
836 | return result; /* Return activate result */ | |
837 | } | |
838 | ||
839 | /*----------------------------------------------------------------------- | |
840 | ** vmm_deactivate_XA | |
841 | ** | |
842 | -----------------------------------------------------------------------*/ | |
843 | kern_return_t vmm_deactivate_XA( | |
844 | thread_t act, | |
845 | vmm_thread_index_t index, | |
846 | unsigned int xaflags) | |
847 | { | |
848 | vmmCntrlEntry *CEntry; | |
849 | kern_return_t result = KERN_SUCCESS; /* Assume success */ | |
850 | ||
851 | if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (getPerProc()->pf.Available & pf64Bit))) | |
852 | return (KERN_FAILURE); /* Unknown or unsupported feature requested */ | |
853 | ||
854 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
855 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */ | |
856 | ||
857 | ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ | |
858 | ||
859 | vmm_flush_context(act, index); /* Flush the context */ | |
860 | ||
861 | if (xaflags & vmm64Bit) { /* Deactivating 64-bit mode ? */ | |
862 | CEntry->vmmXAFlgs &= ~vmm64Bit; /* Deactivate 64-bit mode */ | |
863 | } | |
864 | ||
865 | if (xaflags & vmmGSA) { /* Deactivating guest shadow assist ? */ | |
866 | vmm_deactivate_gsa(act, index); /* Deactivate guest shadow assist */ | |
867 | } | |
868 | ||
869 | ml_set_interrupts_enabled(FALSE); /* No more interruptions */ | |
870 | ||
871 | return result; /* Return deactivate result */ | |
872 | } | |
873 | ||
874 | ||
875 | /*----------------------------------------------------------------------- | |
876 | ** vmm_tear_down_all | |
877 | ** | |
878 | ** This function uninitializes all emulation contexts. If there are | |
879 | ** any vmm contexts, it calls vmm_tear_down_context for each one. | |
880 | ** | |
881 | ** Note: this can also be called from normal thread termination. Because of | |
882 | ** that, we will context switch out of an alternate if we are currenty in it. | |
883 | ** It will be terminated with no valid return code set because we don't expect | |
884 | ** the activation to ever run again. | |
885 | ** | |
886 | ** Inputs: | |
887 | ** activation to tear down | |
888 | ** | |
889 | ** Outputs: | |
890 | ** All vmm contexts released and VMM shut down | |
891 | -----------------------------------------------------------------------*/ | |
892 | void vmm_tear_down_all(thread_t act) { | |
893 | ||
894 | vmmCntrlTable *CTable; | |
895 | int cvi; | |
896 | kern_return_t ret; | |
897 | struct savearea *save; | |
898 | spl_t s; | |
899 | ||
900 | if(act->machine.specFlags & runningVM) { /* Are we actually in a context right now? */ | |
901 | save = find_user_regs(act); /* Find the user state context */ | |
902 | if(!save) { /* Did we find it? */ | |
903 | panic("vmm_tear_down_all: runningVM marked but no user state context\n"); | |
904 | return; | |
905 | } | |
906 | ||
907 | save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */ | |
908 | s = splhigh(); /* Make sure interrupts are off */ | |
909 | vmm_force_exit(act, save); /* Force and exit from VM state */ | |
910 | splx(s); /* Restore interrupts */ | |
911 | } | |
912 | ||
913 | if(act->machine.vmmControl) { /* Do we have a vmm control block? */ | |
914 | CTable = act->machine.vmmControl; | |
915 | for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */ | |
916 | if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */ | |
917 | ret = vmm_tear_down_context(act, cvi); /* Take down the found context */ | |
918 | if(ret != KERN_SUCCESS) { /* Did it go away? */ | |
919 | panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %p, cvi = %d\n", | |
920 | ret, act, cvi); | |
921 | } | |
922 | } | |
923 | } | |
924 | ||
925 | /* | |
926 | * Note that all address apces should be gone here. | |
927 | */ | |
928 | if(act->machine.vmmControl) { /* Did we find one? */ | |
929 | panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */ | |
930 | } | |
931 | } | |
932 | } | |
933 | ||
934 | /*----------------------------------------------------------------------- | |
935 | ** vmm_map_page | |
936 | ** | |
937 | ** This function maps a page from within the client's logical | |
938 | ** address space into the alternate address space. | |
939 | ** | |
940 | ** The page need not be locked or resident. If not resident, it will be faulted | |
941 | ** in by this code, which may take some time. Also, if the page is not locked, | |
942 | ** it, and this mapping may disappear at any time, even before it gets used. Note also | |
943 | ** that reference and change information is NOT preserved when a page is unmapped, either | |
944 | ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address | |
945 | ** space). This means that if RC is needed, the page MUST be wired. | |
946 | ** | |
947 | ** Note that if there is already a mapping at the address, it is removed and all | |
948 | ** information (including RC) is lost BEFORE an attempt is made to map it. Also, | |
949 | ** if the map call fails, the old address is still unmapped.. | |
950 | ** | |
951 | ** Inputs: | |
952 | ** act - pointer to current thread activation | |
953 | ** index - index of address space to map into | |
954 | ** va - virtual address within the client's address | |
955 | ** space | |
956 | ** ava - virtual address within the alternate address | |
957 | ** space | |
958 | ** prot - protection flags | |
959 | ** | |
960 | ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped | |
961 | ** areas are not allowed and will fail. Same with directly mapped I/O areas. | |
962 | ** | |
963 | ** Input conditions: | |
964 | ** Interrupts disabled (from fast trap) | |
965 | ** | |
966 | ** Outputs: | |
967 | ** kernel return code indicating success or failure | |
968 | ** if success, va resident and alternate mapping made | |
969 | -----------------------------------------------------------------------*/ | |
970 | ||
971 | kern_return_t vmm_map_page( | |
972 | thread_t act, | |
973 | vmm_adsp_id_t index, | |
974 | addr64_t cva, | |
975 | addr64_t ava, | |
976 | vm_prot_t prot) | |
977 | { | |
978 | kern_return_t ret; | |
979 | register mapping_t *mp; | |
980 | vm_map_t map; | |
981 | addr64_t ova, nextva; | |
982 | pmap_t pmap; | |
983 | ||
984 | pmap = vmm_get_adsp(act, index); /* Get the guest pmap for this address space */ | |
985 | if(!pmap) return KERN_FAILURE; /* Bogus address space, no VMs, or we can't make a pmap, failure... */ | |
986 | ||
987 | if(ava > vm_max_address) return kVmmInvalidAddress; /* Does the machine support an address of this size? */ | |
988 | ||
989 | map = current_thread()->map; /* Get the host's map */ | |
990 | ||
991 | if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist active ? */ | |
992 | ret = hw_res_map_gv(map->pmap, pmap, cva, ava, getProtPPC(prot, TRUE)); | |
993 | /* Attempt to resume an existing gv->phys mapping */ | |
994 | if (mapRtOK != ret) { /* Nothing to resume, construct a new mapping */ | |
995 | unsigned int pindex; | |
996 | phys_entry_t *physent; | |
997 | unsigned int pattr; | |
998 | unsigned int wimg; | |
999 | unsigned int mflags; | |
1000 | addr64_t gva; | |
1001 | ||
1002 | while (1) { /* Find host mapping or fail */ | |
1003 | mp = mapping_find(map->pmap, cva, &nextva, 0); | |
1004 | /* Attempt to find host mapping and pin it */ | |
1005 | if (mp) break; /* Got it */ | |
1006 | ||
1007 | ml_set_interrupts_enabled(TRUE); | |
1008 | /* Open 'rupt window */ | |
1009 | ret = vm_fault(map, /* Didn't find it, try to fault in host page read/write */ | |
1010 | vm_map_trunc_page(cva), | |
1011 | VM_PROT_READ | VM_PROT_WRITE, | |
1012 | FALSE, /* change wiring */ | |
1013 | THREAD_UNINT, | |
1014 | NULL, | |
1015 | 0); | |
1016 | ml_set_interrupts_enabled(FALSE); | |
1017 | /* Close 'rupt window */ | |
1018 | if (ret != KERN_SUCCESS) | |
1019 | return KERN_FAILURE; /* Fault failed, return failure */ | |
1020 | } | |
1021 | ||
1022 | if (mpNormal != (mp->mpFlags & mpType)) { | |
1023 | /* Host mapping must be a vanilla page */ | |
1024 | mapping_drop_busy(mp); /* Un-pin host mapping */ | |
1025 | return KERN_FAILURE; /* Return failure */ | |
1026 | } | |
1027 | ||
1028 | /* Partially construct gv->phys mapping */ | |
1029 | physent = mapping_phys_lookup(mp->mpPAddr, &pindex); | |
1030 | if (!physent) { | |
1031 | mapping_drop_busy(mp); | |
1032 | return KERN_FAILURE; | |
1033 | } | |
1034 | pattr = ((physent->ppLink & (ppI | ppG)) >> 60); | |
1035 | wimg = 0x2; | |
1036 | if (pattr & mmFlgCInhib) wimg |= 0x4; | |
1037 | if (pattr & mmFlgGuarded) wimg |= 0x1; | |
1038 | mflags = (pindex << 16) | mpGuest; | |
1039 | gva = ((ava & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot, TRUE)); | |
1040 | ||
1041 | hw_add_map_gv(map->pmap, pmap, gva, mflags, mp->mpPAddr); | |
1042 | /* Construct new guest->phys mapping */ | |
1043 | ||
1044 | mapping_drop_busy(mp); /* Un-pin host mapping */ | |
1045 | } | |
1046 | } else { | |
1047 | while(1) { /* Keep trying until we get it or until we fail */ | |
1048 | ||
1049 | mp = mapping_find(map->pmap, cva, &nextva, 0); /* Find the mapping for this address */ | |
1050 | ||
1051 | if(mp) break; /* We found it */ | |
1052 | ||
1053 | ml_set_interrupts_enabled(TRUE); /* Enable interruptions */ | |
1054 | ret = vm_fault(map, /* Didn't find it, try to fault it in read/write... */ | |
1055 | vm_map_trunc_page(cva), | |
1056 | VM_PROT_READ | VM_PROT_WRITE, | |
1057 | FALSE, /*change wiring */ | |
1058 | THREAD_UNINT, | |
1059 | NULL, | |
1060 | 0); | |
1061 | ml_set_interrupts_enabled(FALSE); /* Disable interruptions */ | |
1062 | if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */ | |
1063 | } | |
1064 | ||
1065 | if((mp->mpFlags & mpType) != mpNormal) { /* If this is a block, a nest, or some other special thing, we can't map it */ | |
1066 | mapping_drop_busy(mp); /* We have everything we need from the mapping */ | |
1067 | return KERN_FAILURE; /* Leave in shame */ | |
1068 | } | |
1069 | ||
1070 | while(1) { /* Keep trying the enter until it goes in */ | |
1071 | ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */ | |
1072 | if(!ova) break; /* If there were no collisions, we are done... */ | |
1073 | mapping_remove(pmap, ova); /* Remove the mapping that collided */ | |
1074 | } | |
1075 | ||
1076 | mapping_drop_busy(mp); /* We have everything we need from the mapping */ | |
1077 | } | |
1078 | ||
1079 | if (!((getPerProc()->spcFlags) & FamVMmode)) { | |
1080 | act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ | |
1081 | act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */ | |
1082 | } | |
1083 | ||
1084 | return KERN_SUCCESS; | |
1085 | } | |
1086 | ||
1087 | ||
1088 | /*----------------------------------------------------------------------- | |
1089 | ** vmm_map_execute | |
1090 | ** | |
1091 | ** This function maps a page from within the client's logical | |
1092 | ** address space into the alternate address space of the | |
1093 | ** Virtual Machine Monitor context and then directly starts executing. | |
1094 | ** | |
1095 | ** See description of vmm_map_page for details. | |
1096 | ** | |
1097 | ** Inputs: | |
1098 | ** Index is used for both the context and the address space ID. | |
1099 | ** index[24:31] is the context id and index[16:23] is the address space. | |
1100 | ** if the address space ID is 0, the context ID is used for it. | |
1101 | ** | |
1102 | ** Outputs: | |
1103 | ** Normal exit is to run the VM. Abnormal exit is triggered via a | |
1104 | ** non-KERN_SUCCESS return from vmm_map_page or later during the | |
1105 | ** attempt to transition into the VM. | |
1106 | -----------------------------------------------------------------------*/ | |
1107 | ||
1108 | vmm_return_code_t vmm_map_execute( | |
1109 | thread_t act, | |
1110 | vmm_thread_index_t index, | |
1111 | addr64_t cva, | |
1112 | addr64_t ava, | |
1113 | vm_prot_t prot) | |
1114 | { | |
1115 | kern_return_t ret; | |
1116 | vmmCntrlEntry *CEntry; | |
1117 | unsigned int adsp; | |
1118 | vmm_thread_index_t cndx; | |
1119 | ||
1120 | cndx = index & 0xFF; /* Clean it up */ | |
1121 | ||
1122 | CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */ | |
1123 | if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ | |
1124 | ||
1125 | if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry)) | |
1126 | return kVmmBogusContext; /* Yes, invalid index in Fam */ | |
1127 | ||
1128 | adsp = (index >> 8) & 0xFF; /* Get any requested address space */ | |
1129 | if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */ | |
1130 | ||
1131 | ret = vmm_map_page(act, adsp, cva, ava, prot); /* Go try to map the page on in */ | |
1132 | ||
1133 | ||
1134 | if(ret == KERN_SUCCESS) { | |
1135 | act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ | |
1136 | act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */ | |
1137 | vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */ | |
1138 | } | |
1139 | ||
1140 | return ret; /* We had trouble mapping in the page */ | |
1141 | ||
1142 | } | |
1143 | ||
1144 | /*----------------------------------------------------------------------- | |
1145 | ** vmm_map_list | |
1146 | ** | |
1147 | ** This function maps a list of pages into various address spaces | |
1148 | ** | |
1149 | ** Inputs: | |
1150 | ** act - pointer to current thread activation | |
1151 | ** index - index of default address space (used if not specifed in list entry | |
1152 | ** count - number of pages to release | |
1153 | ** flavor - 0 if 32-bit version, 1 if 64-bit | |
1154 | ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map | |
1155 | ** | |
1156 | ** Outputs: | |
1157 | ** kernel return code indicating success or failure | |
1158 | ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded | |
1159 | ** or the vmm_map_page call fails. | |
1160 | ** We return kVmmInvalidAddress if virtual address size is not supported | |
1161 | -----------------------------------------------------------------------*/ | |
1162 | ||
1163 | kern_return_t vmm_map_list( | |
1164 | thread_t act, | |
1165 | vmm_adsp_id_t index, | |
1166 | unsigned int cnt, | |
1167 | unsigned int flavor) | |
1168 | { | |
1169 | vmmCntrlEntry *CEntry; | |
1170 | boolean_t ret; | |
1171 | unsigned int i; | |
1172 | vmmMList *lst; | |
1173 | vmmMList64 *lstx; | |
1174 | addr64_t cva; | |
1175 | addr64_t ava; | |
1176 | vm_prot_t prot; | |
1177 | vmm_adsp_id_t adsp; | |
1178 | ||
1179 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1180 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */ | |
1181 | ||
1182 | if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */ | |
1183 | if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */ | |
1184 | ||
1185 | lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ | |
1186 | lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ | |
1187 | ||
1188 | for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ | |
1189 | if(flavor) { /* Check if 32- or 64-bit addresses */ | |
1190 | cva = lstx[i].vmlva; /* Get the 64-bit actual address */ | |
1191 | ava = lstx[i].vmlava; /* Get the 64-bit guest address */ | |
1192 | } | |
1193 | else { | |
1194 | cva = lst[i].vmlva; /* Get the 32-bit actual address */ | |
1195 | ava = lst[i].vmlava; /* Get the 32-bit guest address */ | |
1196 | } | |
1197 | ||
1198 | prot = ava & vmmlProt; /* Extract the protection bits */ | |
1199 | adsp = (ava & vmmlAdID) >> 4; /* Extract an explicit address space request */ | |
1200 | if(!adsp) /* If no explicit, use supplied default */ | |
1201 | adsp = index - 1; | |
1202 | ava &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */ | |
1203 | ||
1204 | ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */ | |
1205 | if(ret != KERN_SUCCESS) /* Bail if any error */ | |
1206 | return ret; | |
1207 | } | |
1208 | ||
1209 | return KERN_SUCCESS; | |
1210 | } | |
1211 | ||
1212 | /*----------------------------------------------------------------------- | |
1213 | ** vmm_get_page_mapping | |
1214 | ** | |
1215 | ** Given a context index and a guest virtual address, convert the address | |
1216 | ** to its corresponding host virtual address. | |
1217 | ** | |
1218 | ** Inputs: | |
1219 | ** act - pointer to current thread activation | |
1220 | ** index - context index | |
1221 | ** gva - guest virtual address | |
1222 | ** | |
1223 | ** Outputs: | |
1224 | ** Host virtual address (page aligned) or -1 if not mapped or any failure | |
1225 | ** | |
1226 | ** Note: | |
1227 | ** If the host address space contains multiple virtual addresses mapping | |
1228 | ** to the physical address corresponding to the specified guest virtual | |
1229 | ** address (i.e., host virtual aliases), it is unpredictable which host | |
1230 | ** virtual address (alias) will be returned. Moral of the story: No host | |
1231 | ** virtual aliases. | |
1232 | -----------------------------------------------------------------------*/ | |
1233 | ||
1234 | addr64_t vmm_get_page_mapping( | |
1235 | thread_t act, | |
1236 | vmm_adsp_id_t index, | |
1237 | addr64_t gva) | |
1238 | { | |
1239 | register mapping_t *mp; | |
1240 | pmap_t pmap; | |
1241 | addr64_t nextva, hva; | |
1242 | ppnum_t pa; | |
1243 | ||
1244 | pmap = vmm_get_adsp(act, index); /* Get and validate the index */ | |
1245 | if (!pmap)return -1; /* No good, failure... */ | |
1246 | ||
1247 | if (pmap->pmapFlags & pmapVMgsaa) { /* Guest shadow assist (GSA) active ? */ | |
1248 | return (hw_gva_to_hva(pmap, gva)); /* Convert guest to host virtual address */ | |
1249 | } else { | |
1250 | mp = mapping_find(pmap, gva, &nextva, 0); /* Find guest mapping for this virtual address */ | |
1251 | ||
1252 | if(!mp) return -1; /* Not mapped, return -1 */ | |
1253 | ||
1254 | pa = mp->mpPAddr; /* Remember the physical page address */ | |
1255 | ||
1256 | mapping_drop_busy(mp); /* Go ahead and relase the mapping now */ | |
1257 | ||
1258 | pmap = current_thread()->map->pmap; /* Get the host pmap */ | |
1259 | hva = mapping_p2v(pmap, pa); /* Now find the source virtual */ | |
1260 | ||
1261 | if(hva != 0) return hva; /* We found it... */ | |
1262 | ||
1263 | panic("vmm_get_page_mapping: could not back-map guest va (%016llX)\n", gva); | |
1264 | /* We are bad wrong if we can't find it */ | |
1265 | ||
1266 | return -1; /* Never executed, prevents compiler warning */ | |
1267 | } | |
1268 | } | |
1269 | ||
1270 | /*----------------------------------------------------------------------- | |
1271 | ** vmm_unmap_page | |
1272 | ** | |
1273 | ** This function unmaps a page from the guest address space. | |
1274 | ** | |
1275 | ** Inputs: | |
1276 | ** act - pointer to current thread activation | |
1277 | ** index - index of vmm state for this page | |
1278 | ** va - virtual address within the vmm's address | |
1279 | ** space | |
1280 | ** | |
1281 | ** Outputs: | |
1282 | ** kernel return code indicating success or failure | |
1283 | -----------------------------------------------------------------------*/ | |
1284 | ||
1285 | kern_return_t vmm_unmap_page( | |
1286 | thread_t act, | |
1287 | vmm_adsp_id_t index, | |
1288 | addr64_t va) | |
1289 | { | |
1290 | addr64_t nadd; | |
1291 | pmap_t pmap; | |
1292 | ||
1293 | pmap = vmm_get_adsp(act, index); /* Get and validate the index */ | |
1294 | if (!pmap)return -1; /* No good, failure... */ | |
1295 | ||
1296 | if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ | |
1297 | hw_susp_map_gv(act->map->pmap, pmap, va); /* Suspend the mapping */ | |
1298 | return (KERN_SUCCESS); /* Always returns success */ | |
1299 | } else { | |
1300 | nadd = mapping_remove(pmap, va); /* Toss the mapping */ | |
1301 | ||
1302 | return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS); /* Return... */ | |
1303 | } | |
1304 | } | |
1305 | ||
1306 | /*----------------------------------------------------------------------- | |
1307 | ** vmm_unmap_list | |
1308 | ** | |
1309 | ** This function unmaps a list of pages from the alternate's logical | |
1310 | ** address space. | |
1311 | ** | |
1312 | ** Inputs: | |
1313 | ** act - pointer to current thread activation | |
1314 | ** index - index of vmm state for this page | |
1315 | ** count - number of pages to release | |
1316 | ** flavor - 0 if 32-bit, 1 if 64-bit | |
1317 | ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap | |
1318 | ** | |
1319 | ** Outputs: | |
1320 | ** kernel return code indicating success or failure | |
1321 | ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded | |
1322 | -----------------------------------------------------------------------*/ | |
1323 | ||
1324 | kern_return_t vmm_unmap_list( | |
1325 | thread_t act, | |
1326 | vmm_adsp_id_t index, | |
1327 | unsigned int cnt, | |
1328 | unsigned int flavor) | |
1329 | { | |
1330 | vmmCntrlEntry *CEntry; | |
1331 | kern_return_t kern_result = KERN_SUCCESS; | |
1332 | unsigned int i; | |
1333 | addr64_t gva; | |
1334 | vmmUMList *lst; | |
1335 | vmmUMList64 *lstx; | |
1336 | pmap_t pmap; | |
1337 | int adsp; | |
1338 | ||
1339 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1340 | if (CEntry == NULL) { /* Either this isn't a vmm or the index is bogus */ | |
1341 | kern_result = KERN_FAILURE; | |
1342 | goto out; | |
1343 | } | |
1344 | ||
1345 | if(cnt > kVmmMaxUnmapPages) { /* They tried to unmap too many */ | |
1346 | kern_result = KERN_FAILURE; | |
1347 | goto out; | |
1348 | } | |
1349 | if(!cnt) { /* If they said none, we're done... */ | |
1350 | kern_result = KERN_SUCCESS; | |
1351 | goto out; | |
1352 | } | |
1353 | ||
1354 | lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ | |
1355 | lst = (vmmUMList *)lstx; | |
1356 | ||
1357 | for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ | |
1358 | if(flavor) { /* Check if 32- or 64-bit addresses */ | |
1359 | gva = lstx[i].vmlava; /* Get the 64-bit guest address */ | |
1360 | } | |
1361 | else { | |
1362 | gva = lst[i].vmlava; /* Get the 32-bit guest address */ | |
1363 | } | |
1364 | ||
1365 | adsp = (gva & vmmlAdID) >> 4; /* Extract an explicit address space request */ | |
1366 | if(!adsp) /* If no explicit, use supplied default */ | |
1367 | adsp = index - 1; | |
1368 | pmap = act->machine.vmmControl->vmmAdsp[adsp]; /* Get the pmap for this request */ | |
1369 | if(!pmap) | |
1370 | continue; /* Ain't nuthin' mapped here, no durn map... */ | |
1371 | ||
1372 | gva &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */ | |
1373 | if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ | |
1374 | hw_susp_map_gv(act->map->pmap, pmap, gva); | |
1375 | /* Suspend the mapping */ | |
1376 | } else { | |
1377 | (void)mapping_remove(pmap, gva); /* Toss the mapping */ | |
1378 | } | |
1379 | } | |
1380 | ||
1381 | out: | |
1382 | return kern_result; | |
1383 | } | |
1384 | ||
1385 | /*----------------------------------------------------------------------- | |
1386 | ** vmm_unmap_all_pages | |
1387 | ** | |
1388 | ** This function unmaps all pages from the alternates's logical | |
1389 | ** address space. | |
1390 | ** | |
1391 | ** Inputs: | |
1392 | ** act - pointer to current thread activation | |
1393 | ** index - index of context state | |
1394 | ** | |
1395 | ** Outputs: | |
1396 | ** none | |
1397 | ** | |
1398 | ** Note: | |
1399 | ** All pages are unmapped, but the address space (i.e., pmap) is still alive | |
1400 | -----------------------------------------------------------------------*/ | |
1401 | ||
1402 | void vmm_unmap_all_pages( | |
1403 | thread_t act, | |
1404 | vmm_adsp_id_t index) | |
1405 | { | |
1406 | pmap_t pmap; | |
1407 | ||
1408 | pmap = vmm_get_adsp(act, index); /* Convert index to entry */ | |
1409 | if (!pmap) return; /* Either this isn't vmm thread or the index is bogus */ | |
1410 | ||
1411 | if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ | |
1412 | hw_rem_all_gv(pmap); /* Remove all guest's mappings from shadow hash table */ | |
1413 | } else { | |
1414 | /* | |
1415 | * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly | |
1416 | */ | |
1417 | mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */ | |
1418 | pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */ | |
1419 | } | |
1420 | } | |
1421 | ||
1422 | ||
1423 | /*----------------------------------------------------------------------- | |
1424 | ** vmm_get_page_dirty_flag | |
1425 | ** | |
1426 | ** This function returns the changed flag of the page | |
1427 | ** and optionally clears clears the flag. | |
1428 | ** | |
1429 | ** Inputs: | |
1430 | ** act - pointer to current thread activation | |
1431 | ** index - index of vmm state for this page | |
1432 | ** va - virtual address within the vmm's address | |
1433 | ** space | |
1434 | ** reset - Clears dirty if true, untouched if not | |
1435 | ** | |
1436 | ** Outputs: | |
1437 | ** the dirty bit | |
1438 | ** clears the dirty bit in the pte if requested | |
1439 | ** | |
1440 | ** Note: | |
1441 | ** The RC bits are merged into the global physical entry | |
1442 | -----------------------------------------------------------------------*/ | |
1443 | ||
1444 | boolean_t vmm_get_page_dirty_flag( | |
1445 | thread_t act, | |
1446 | vmm_adsp_id_t index, | |
1447 | addr64_t va, | |
1448 | unsigned int reset) | |
1449 | { | |
1450 | unsigned int RC; | |
1451 | pmap_t pmap; | |
1452 | ||
1453 | pmap = vmm_get_adsp(act, index); /* Convert index to entry */ | |
1454 | if (!pmap) return 1; /* Either this isn't vmm thread or the index is bogus */ | |
1455 | ||
1456 | if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ | |
1457 | RC = hw_test_rc_gv(act->map->pmap, pmap, va, reset);/* Fetch the RC bits and clear if requested */ | |
1458 | } else { | |
1459 | RC = hw_test_rc(pmap, (addr64_t)va, reset); /* Fetch the RC bits and clear if requested */ | |
1460 | } | |
1461 | ||
1462 | switch (RC & mapRetCode) { /* Decode return code */ | |
1463 | ||
1464 | case mapRtOK: /* Changed */ | |
1465 | return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */ | |
1466 | break; | |
1467 | ||
1468 | case mapRtNotFnd: /* Didn't find it */ | |
1469 | return 1; /* Return dirty */ | |
1470 | break; | |
1471 | ||
1472 | default: | |
1473 | panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %p, va = %016llX\n", RC, pmap, va); | |
1474 | ||
1475 | } | |
1476 | ||
1477 | return 1; /* Return the change bit */ | |
1478 | } | |
1479 | ||
1480 | ||
1481 | /*----------------------------------------------------------------------- | |
1482 | ** vmm_protect_page | |
1483 | ** | |
1484 | ** This function sets the protection bits of a mapped page | |
1485 | ** | |
1486 | ** Inputs: | |
1487 | ** act - pointer to current thread activation | |
1488 | ** index - index of vmm state for this page | |
1489 | ** va - virtual address within the vmm's address | |
1490 | ** space | |
1491 | ** prot - Protection flags | |
1492 | ** | |
1493 | ** Outputs: | |
1494 | ** none | |
1495 | ** Protection bits of the mapping are modifed | |
1496 | ** | |
1497 | -----------------------------------------------------------------------*/ | |
1498 | ||
1499 | kern_return_t vmm_protect_page( | |
1500 | thread_t act, | |
1501 | vmm_adsp_id_t index, | |
1502 | addr64_t va, | |
1503 | vm_prot_t prot) | |
1504 | { | |
1505 | addr64_t nextva; | |
1506 | int ret; | |
1507 | pmap_t pmap; | |
1508 | ||
1509 | pmap = vmm_get_adsp(act, index); /* Convert index to entry */ | |
1510 | if (!pmap) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
1511 | ||
1512 | if (pmap->pmapFlags & pmapVMgsaa) { /* Handle guest shadow assist specially */ | |
1513 | ret = hw_protect_gv(pmap, va, prot); /* Try to change protection, GSA varient */ | |
1514 | } else { | |
1515 | ret = hw_protect(pmap, va, prot, &nextva); /* Try to change protection */ | |
1516 | } | |
1517 | ||
1518 | switch (ret) { /* Decode return code */ | |
1519 | ||
1520 | case mapRtOK: /* All ok... */ | |
1521 | break; /* Outta here */ | |
1522 | ||
1523 | case mapRtNotFnd: /* Didn't find it */ | |
1524 | return KERN_SUCCESS; /* Ok, return... */ | |
1525 | break; | |
1526 | ||
1527 | default: | |
1528 | panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %p, va = %016llX\n", ret, pmap, (addr64_t)va); | |
1529 | ||
1530 | } | |
1531 | ||
1532 | if (!((getPerProc()->spcFlags) & FamVMmode)) { | |
1533 | act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ | |
1534 | act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */ | |
1535 | } | |
1536 | ||
1537 | return KERN_SUCCESS; /* Return */ | |
1538 | } | |
1539 | ||
1540 | ||
1541 | /*----------------------------------------------------------------------- | |
1542 | ** vmm_protect_execute | |
1543 | ** | |
1544 | ** This function sets the protection bits of a mapped page | |
1545 | ** and then directly starts executing. | |
1546 | ** | |
1547 | ** See description of vmm_protect_page for details | |
1548 | ** | |
1549 | ** Inputs: | |
1550 | ** See vmm_protect_page and vmm_map_execute | |
1551 | ** | |
1552 | ** Outputs: | |
1553 | ** Normal exit is to run the VM. Abnormal exit is triggered via a | |
1554 | ** non-KERN_SUCCESS return from vmm_map_page or later during the | |
1555 | ** attempt to transition into the VM. | |
1556 | -----------------------------------------------------------------------*/ | |
1557 | ||
1558 | vmm_return_code_t vmm_protect_execute( | |
1559 | thread_t act, | |
1560 | vmm_thread_index_t index, | |
1561 | addr64_t va, | |
1562 | vm_prot_t prot) | |
1563 | { | |
1564 | kern_return_t ret; | |
1565 | vmmCntrlEntry *CEntry; | |
1566 | unsigned int adsp; | |
1567 | vmm_thread_index_t cndx; | |
1568 | ||
1569 | cndx = index & 0xFF; /* Clean it up */ | |
1570 | CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */ | |
1571 | if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ | |
1572 | ||
1573 | adsp = (index >> 8) & 0xFF; /* Get any requested address space */ | |
1574 | if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */ | |
1575 | ||
1576 | if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry)) | |
1577 | return kVmmBogusContext; /* Yes, invalid index in Fam */ | |
1578 | ||
1579 | ret = vmm_protect_page(act, adsp, va, prot); /* Go try to change access */ | |
1580 | ||
1581 | if(ret == KERN_SUCCESS) { | |
1582 | act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */ | |
1583 | act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */ | |
1584 | vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */ | |
1585 | } | |
1586 | ||
1587 | return ret; /* We had trouble of some kind (shouldn't happen) */ | |
1588 | ||
1589 | } | |
1590 | ||
1591 | ||
1592 | /*----------------------------------------------------------------------- | |
1593 | ** vmm_get_float_state | |
1594 | ** | |
1595 | ** This function causes the current floating point state to | |
1596 | ** be saved into the shared context area. It also clears the | |
1597 | ** vmmFloatCngd changed flag. | |
1598 | ** | |
1599 | ** Inputs: | |
1600 | ** act - pointer to current thread activation structure | |
1601 | ** index - index returned by vmm_init_context | |
1602 | ** | |
1603 | ** Outputs: | |
1604 | ** context saved | |
1605 | -----------------------------------------------------------------------*/ | |
1606 | ||
1607 | kern_return_t vmm_get_float_state( | |
1608 | thread_t act, | |
1609 | vmm_thread_index_t index) | |
1610 | { | |
1611 | vmmCntrlEntry *CEntry; | |
1612 | int i; | |
1613 | register struct savearea_fpu *sv; | |
1614 | ||
1615 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1616 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
1617 | ||
1618 | act->machine.specFlags &= ~floatCng; /* Clear the special flag */ | |
1619 | CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */ | |
1620 | ||
1621 | fpu_save(&CEntry->vmmFacCtx); /* Save context if live */ | |
1622 | ||
1623 | if(CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */ | |
1624 | sv = CEntry->vmmFacCtx.FPUsave; | |
1625 | bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */ | |
1626 | return KERN_SUCCESS; | |
1627 | } | |
1628 | ||
1629 | ||
1630 | for(i = 0; i < 32; i++) { /* Initialize floating points */ | |
1631 | CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */ | |
1632 | } | |
1633 | ||
1634 | return KERN_SUCCESS; | |
1635 | } | |
1636 | ||
1637 | /*----------------------------------------------------------------------- | |
1638 | ** vmm_get_vector_state | |
1639 | ** | |
1640 | ** This function causes the current vector state to | |
1641 | ** be saved into the shared context area. It also clears the | |
1642 | ** vmmVectorCngd changed flag. | |
1643 | ** | |
1644 | ** Inputs: | |
1645 | ** act - pointer to current thread activation structure | |
1646 | ** index - index returned by vmm_init_context | |
1647 | ** | |
1648 | ** Outputs: | |
1649 | ** context saved | |
1650 | -----------------------------------------------------------------------*/ | |
1651 | ||
1652 | kern_return_t vmm_get_vector_state( | |
1653 | thread_t act, | |
1654 | vmm_thread_index_t index) | |
1655 | { | |
1656 | vmmCntrlEntry *CEntry; | |
1657 | int i, j; | |
1658 | unsigned int vrvalidwrk; | |
1659 | register struct savearea_vec *sv; | |
1660 | ||
1661 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1662 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
1663 | ||
1664 | vec_save(&CEntry->vmmFacCtx); /* Save context if live */ | |
1665 | ||
1666 | act->machine.specFlags &= ~vectorCng; /* Clear the special flag */ | |
1667 | CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */ | |
1668 | ||
1669 | if(CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */ | |
1670 | sv = CEntry->vmmFacCtx.VMXsave; | |
1671 | vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */ | |
1672 | ||
1673 | for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */ | |
1674 | if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */ | |
1675 | for(j = 0; j < 4; j++) { /* If so, copy it over */ | |
1676 | CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j]; | |
1677 | } | |
1678 | } | |
1679 | else { | |
1680 | for(j = 0; j < 4; j++) { /* Otherwise set to empty value */ | |
1681 | CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; | |
1682 | } | |
1683 | } | |
1684 | ||
1685 | vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */ | |
1686 | ||
1687 | } | |
1688 | ||
1689 | return KERN_SUCCESS; | |
1690 | } | |
1691 | ||
1692 | for(i = 0; i < 32; i++) { /* Initialize vector registers */ | |
1693 | for(j=0; j < 4; j++) { /* Do words */ | |
1694 | CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */ | |
1695 | } | |
1696 | } | |
1697 | ||
1698 | return KERN_SUCCESS; | |
1699 | } | |
1700 | ||
1701 | /*----------------------------------------------------------------------- | |
1702 | ** vmm_set_timer | |
1703 | ** | |
1704 | ** This function causes a timer (in AbsoluteTime) for a specific time | |
1705 | ** to be set It also clears the vmmTimerPop flag if the timer is actually | |
1706 | ** set, it is cleared otherwise. | |
1707 | ** | |
1708 | ** A timer is cleared by setting setting the time to 0. This will clear | |
1709 | ** the vmmTimerPop bit. Simply setting the timer to earlier than the | |
1710 | ** current time clears the internal timer request, but leaves the | |
1711 | ** vmmTimerPop flag set. | |
1712 | ** | |
1713 | ** | |
1714 | ** Inputs: | |
1715 | ** act - pointer to current thread activation structure | |
1716 | ** index - index returned by vmm_init_context | |
1717 | ** timerhi - high order word of AbsoluteTime to pop | |
1718 | ** timerlo - low order word of AbsoluteTime to pop | |
1719 | ** | |
1720 | ** Outputs: | |
1721 | ** timer set, vmmTimerPop cleared | |
1722 | -----------------------------------------------------------------------*/ | |
1723 | ||
1724 | kern_return_t vmm_set_timer( | |
1725 | thread_t act, | |
1726 | vmm_thread_index_t index, | |
1727 | unsigned int timerhi, | |
1728 | unsigned int timerlo) | |
1729 | { | |
1730 | vmmCntrlEntry *CEntry; | |
1731 | ||
1732 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1733 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
1734 | ||
1735 | CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo; | |
1736 | ||
1737 | vmm_timer_pop(act); /* Go adjust all of the timer stuff */ | |
1738 | return KERN_SUCCESS; /* Leave now... */ | |
1739 | } | |
1740 | ||
1741 | ||
1742 | /*----------------------------------------------------------------------- | |
1743 | ** vmm_get_timer | |
1744 | ** | |
1745 | ** This function causes the timer for a specified VM to be | |
1746 | ** returned in return_params[0] and return_params[1]. | |
1747 | ** Note that this is kind of funky for 64-bit VMs because we | |
1748 | ** split the timer into two parts so that we still set parms 0 and 1. | |
1749 | ** Obviously, we don't need to do this because the parms are 8 bytes | |
1750 | ** wide. | |
1751 | ** | |
1752 | ** | |
1753 | ** Inputs: | |
1754 | ** act - pointer to current thread activation structure | |
1755 | ** index - index returned by vmm_init_context | |
1756 | ** | |
1757 | ** Outputs: | |
1758 | ** Timer value set in return_params[0] and return_params[1]. | |
1759 | ** Set to 0 if timer is not set. | |
1760 | -----------------------------------------------------------------------*/ | |
1761 | ||
1762 | kern_return_t vmm_get_timer( | |
1763 | thread_t act, | |
1764 | vmm_thread_index_t index) | |
1765 | { | |
1766 | vmmCntrlEntry *CEntry; | |
1767 | ||
1768 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1769 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
1770 | ||
1771 | if(CEntry->vmmXAFlgs & vmm64Bit) { /* A 64-bit virtual machine? */ | |
1772 | CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32); /* Return the last timer value */ | |
1773 | CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */ | |
1774 | } | |
1775 | else { | |
1776 | CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */ | |
1777 | CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */ | |
1778 | } | |
1779 | return KERN_SUCCESS; | |
1780 | } | |
1781 | ||
1782 | ||
1783 | /*----------------------------------------------------------------------- | |
1784 | ** vmm_timer_pop | |
1785 | ** | |
1786 | ** This function causes all timers in the array of VMs to be updated. | |
1787 | ** All appropriate flags are set or reset. If a VM is currently | |
1788 | ** running and its timer expired, it is intercepted. | |
1789 | ** | |
1790 | ** The qactTimer value is set to the lowest unexpired timer. It is | |
1791 | ** zeroed if all timers are expired or have been reset. | |
1792 | ** | |
1793 | ** Inputs: | |
1794 | ** act - pointer to current thread activation structure | |
1795 | ** | |
1796 | ** Outputs: | |
1797 | ** timers set, vmmTimerPop cleared or set | |
1798 | -----------------------------------------------------------------------*/ | |
1799 | ||
1800 | void vmm_timer_pop( | |
1801 | thread_t act) | |
1802 | { | |
1803 | vmmCntrlTable *CTable; | |
1804 | int cvi, any; | |
1805 | uint64_t now, soonest; | |
1806 | struct savearea *sv; | |
1807 | ||
1808 | if(!((unsigned int)act->machine.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */ | |
1809 | panic("vmm_timer_pop: No virtual machines defined; act = %p\n", act); | |
1810 | } | |
1811 | ||
1812 | soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */ | |
1813 | ||
1814 | clock_get_uptime(&now); /* What time is it? */ | |
1815 | ||
1816 | CTable = act->machine.vmmControl; /* Make this easier */ | |
1817 | any = 0; /* Haven't found a running unexpired timer yet */ | |
1818 | ||
1819 | for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Cycle through all and check time now */ | |
1820 | ||
1821 | if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */ | |
1822 | ||
1823 | if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */ | |
1824 | CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */ | |
1825 | CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */ | |
1826 | continue; /* Check next */ | |
1827 | } | |
1828 | ||
1829 | if (CTable->vmmc[cvi].vmmTimer <= now) { | |
1830 | CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */ | |
1831 | CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */ | |
1832 | if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->machine.vmmCEntry) { /* Is this the running VM? */ | |
1833 | sv = find_user_regs(act); /* Get the user state registers */ | |
1834 | if(!sv) { /* Did we find something? */ | |
1835 | panic("vmm_timer_pop: no user context; act = %p\n", act); | |
1836 | } | |
1837 | sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */ | |
1838 | vmm_force_exit(act, sv); /* Intercept a running VM */ | |
1839 | } | |
1840 | continue; /* Check the rest */ | |
1841 | } | |
1842 | else { /* It hasn't popped yet */ | |
1843 | CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */ | |
1844 | CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */ | |
1845 | } | |
1846 | ||
1847 | any = 1; /* Show we found an active unexpired timer */ | |
1848 | ||
1849 | if (CTable->vmmc[cvi].vmmTimer < soonest) | |
1850 | soonest = CTable->vmmc[cvi].vmmTimer; | |
1851 | } | |
1852 | ||
1853 | if(any) { | |
1854 | if (act->machine.qactTimer == 0 || soonest <= act->machine.qactTimer) | |
1855 | act->machine.qactTimer = soonest; /* Set lowest timer */ | |
1856 | } | |
1857 | } | |
1858 | ||
1859 | ||
1860 | ||
1861 | /*----------------------------------------------------------------------- | |
1862 | ** vmm_stop_vm | |
1863 | ** | |
1864 | ** This function prevents the specified VM(s) to from running. | |
1865 | ** If any is currently executing, the execution is intercepted | |
1866 | ** with a code of kVmmStopped. Note that execution of the VM is | |
1867 | ** blocked until a vmmExecuteVM is called with the start flag set to 1. | |
1868 | ** This provides the ability for a thread to stop execution of a VM and | |
1869 | ** insure that it will not be run until the emulator has processed the | |
1870 | ** "virtual" interruption. | |
1871 | ** | |
1872 | ** Inputs: | |
1873 | ** vmmask - 32 bit mask corresponding to the VMs to put in stop state | |
1874 | ** NOTE: if this mask is all 0s, any executing VM is intercepted with | |
1875 | * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there | |
1876 | ** note that there is a potential race here and the VM may not stop. | |
1877 | ** | |
1878 | ** Outputs: | |
1879 | ** kernel return code indicating success | |
1880 | ** or if no VMs are enabled, an invalid syscall exception. | |
1881 | -----------------------------------------------------------------------*/ | |
1882 | ||
1883 | int vmm_stop_vm(struct savearea *save) | |
1884 | { | |
1885 | ||
1886 | thread_t act; | |
1887 | vmmCntrlTable *CTable; | |
1888 | int cvi, i; | |
1889 | task_t task; | |
1890 | thread_t fact; | |
1891 | unsigned int vmmask; | |
1892 | ReturnHandler *stopapc; | |
1893 | ||
1894 | ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ | |
1895 | ||
1896 | task = current_task(); /* Figure out who we are */ | |
1897 | ||
1898 | task_lock(task); /* Lock our task */ | |
1899 | ||
1900 | fact = (thread_t)task->threads.next; /* Get the first activation on task */ | |
1901 | act = NULL; /* Pretend we didn't find it yet */ | |
1902 | ||
1903 | for(i = 0; i < task->thread_count; i++) { /* All of the activations */ | |
1904 | if(fact->machine.vmmControl) { /* Is this a virtual machine monitor? */ | |
1905 | act = fact; /* Yeah... */ | |
1906 | break; /* Bail the loop... */ | |
1907 | } | |
1908 | fact = (thread_t)fact->task_threads.next; /* Go to the next one */ | |
1909 | } | |
1910 | ||
1911 | if(!((unsigned int)act)) { /* See if we have VMMs yet */ | |
1912 | task_unlock(task); /* No, unlock the task */ | |
1913 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1914 | return 0; /* Go generate a syscall exception */ | |
1915 | } | |
1916 | ||
1917 | thread_reference(act); | |
1918 | ||
1919 | task_unlock(task); /* Safe to release now */ | |
1920 | ||
1921 | thread_mtx_lock(act); | |
1922 | ||
1923 | CTable = act->machine.vmmControl; /* Get the pointer to the table */ | |
1924 | ||
1925 | if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */ | |
1926 | thread_mtx_unlock(act); /* Unlock the activation */ | |
1927 | thread_deallocate(act); | |
1928 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1929 | return 0; /* Go generate a syscall exception */ | |
1930 | } | |
1931 | ||
1932 | if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */ | |
1933 | thread_mtx_unlock(act); /* Unlock the activation */ | |
1934 | thread_deallocate(act); | |
1935 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1936 | save->save_r3 = KERN_SUCCESS; /* Set success */ | |
1937 | return 1; /* Return... */ | |
1938 | } | |
1939 | ||
1940 | for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search slots */ | |
1941 | if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */ | |
1942 | (void)hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */ | |
1943 | } | |
1944 | vmmask = vmmask << 1; /* Slide mask over */ | |
1945 | } | |
1946 | ||
1947 | if(hw_compare_and_store(0, 1, &act->machine.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */ | |
1948 | thread_mtx_unlock(act); /* Already one pending, unlock the activation */ | |
1949 | thread_deallocate(act); | |
1950 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1951 | save->save_r3 = KERN_SUCCESS; /* Say we did it... */ | |
1952 | return 1; /* Leave */ | |
1953 | } | |
1954 | ||
1955 | if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */ | |
1956 | act->machine.emPendRupts = 0; /* No memory, say we have given up request */ | |
1957 | thread_mtx_unlock(act); /* Unlock the activation */ | |
1958 | thread_deallocate(act); | |
1959 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1960 | save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */ | |
1961 | return 1; /* Return... */ | |
1962 | } | |
1963 | ||
1964 | ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */ | |
1965 | ||
1966 | stopapc->handler = vmm_interrupt; /* Set interruption routine */ | |
1967 | ||
1968 | stopapc->next = act->handlers; /* Put our interrupt at the start of the list */ | |
1969 | act->handlers = stopapc; /* Point to us */ | |
1970 | ||
1971 | act_set_apc(act); /* Set an APC AST */ | |
1972 | ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */ | |
1973 | ||
1974 | thread_mtx_unlock(act); /* Unlock the activation */ | |
1975 | thread_deallocate(act); | |
1976 | ||
1977 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1978 | save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */ | |
1979 | return 1; | |
1980 | } | |
1981 | ||
1982 | /*----------------------------------------------------------------------- | |
1983 | ** vmm_interrupt | |
1984 | ** | |
1985 | ** This function is executed asynchronously from an APC AST. | |
1986 | ** It is to be used for anything that needs to interrupt a running VM. | |
1987 | ** This include any kind of interruption generation (other than timer pop) | |
1988 | ** or entering the stopped state. | |
1989 | ** | |
1990 | ** Inputs: | |
1991 | ** ReturnHandler *rh - the return handler control block as required by the APC. | |
1992 | ** thread_t act - the activation | |
1993 | ** | |
1994 | ** Outputs: | |
1995 | ** Whatever needed to be done is done. | |
1996 | -----------------------------------------------------------------------*/ | |
1997 | ||
1998 | void vmm_interrupt(ReturnHandler *rh, thread_t act) { | |
1999 | ||
2000 | vmmCntrlTable *CTable; | |
2001 | struct savearea *sv; | |
2002 | boolean_t inter; | |
2003 | ||
2004 | ||
2005 | ||
2006 | kfree(rh, sizeof(ReturnHandler)); /* Release the return handler block */ | |
2007 | ||
2008 | inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */ | |
2009 | ||
2010 | act->machine.emPendRupts = 0; /* Say that there are no more interrupts pending */ | |
2011 | CTable = act->machine.vmmControl; /* Get the pointer to the table */ | |
2012 | ||
2013 | if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */ | |
2014 | ||
2015 | if(act->machine.vmmCEntry && (act->machine.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */ | |
2016 | sv = find_user_regs(act); /* Get the user state registers */ | |
2017 | if(!sv) { /* Did we find something? */ | |
2018 | panic("vmm_interrupt: no user context; act = %p\n", act); | |
2019 | } | |
2020 | sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */ | |
2021 | vmm_force_exit(act, sv); /* Intercept a running VM */ | |
2022 | } | |
2023 | ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */ | |
2024 | } |