]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/vmachmon.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*-----------------------------------------------------------------------
23 ** vmachmon.c
24 **
25 ** C routines that we are adding to the MacOS X kernel.
26 **
27 -----------------------------------------------------------------------*/
28
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/host_info.h>
32 #include <kern/kern_types.h>
33 #include <kern/host.h>
34 #include <kern/task.h>
35 #include <kern/thread.h>
36 #include <kern/thread_act.h>
37 #include <ppc/exception.h>
38 #include <ppc/mappings.h>
39 #include <ppc/thread_act.h>
40 #include <vm/vm_kern.h>
41
42 #include <ppc/vmachmon.h>
43
44 extern struct Saveanchor saveanchor; /* Aligned savearea anchor */
45 extern double FloatInit;
46 extern unsigned long QNaNbarbarian[4];
47
48 /*************************************************************************************
49 Virtual Machine Monitor Internal Routines
50 **************************************************************************************/
51
52 /*-----------------------------------------------------------------------
53 ** vmm_get_entry
54 **
55 ** This function verifies and return a vmm context entry index
56 **
57 ** Inputs:
58 ** act - pointer to current thread activation
59 ** index - index into vmm control table (this is a "one based" value)
60 **
61 ** Outputs:
62 ** address of a vmmCntrlEntry or 0 if not found
63 -----------------------------------------------------------------------*/
64
65 vmmCntrlEntry *vmm_get_entry(
66 thread_act_t act,
67 vmm_thread_index_t index)
68 {
69 vmmCntrlTable *CTable;
70 vmmCntrlEntry *CEntry;
71
72 index = index & vmmTInum; /* Clean up the index */
73
74 if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */
75 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
76
77 CTable = act->mact.vmmControl; /* Make the address a bit more convienient */
78 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
79
80 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
81
82 return CEntry;
83 }
84
85 /*-----------------------------------------------------------------------
86 ** vmm_get_adsp
87 **
88 ** This function verifies and returns the pmap for an address space.
89 ** If there is none and the request is valid, a pmap will be created.
90 **
91 ** Inputs:
92 ** act - pointer to current thread activation
93 ** index - index into vmm control table (this is a "one based" value)
94 **
95 ** Outputs:
96 ** address of a pmap or 0 if not found or could no be created
97 ** Note that if there is no pmap for the address space it will be created.
98 -----------------------------------------------------------------------*/
99
100 pmap_t vmm_get_adsp(thread_act_t act, vmm_thread_index_t index)
101 {
102 pmap_t pmap;
103
104 if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */
105 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
106
107 pmap = act->mact.vmmControl->vmmAdsp[index - 1]; /* Get the pmap */
108 if(pmap) return pmap; /* We've got it... */
109
110 pmap = pmap_create(0); /* Make a fresh one */
111 act->mact.vmmControl->vmmAdsp[index - 1] = pmap; /* Remember it */
112 /*
113 * Note that if the create fails, we will return a null.
114 */
115 return pmap; /* Return it... */
116 }
117
118
119
120 /*************************************************************************************
121 Virtual Machine Monitor Exported Functionality
122
123 The following routines are used to implement a quick-switch mechanism for
124 virtual machines that need to execute within their own processor envinroment
125 (including register and MMU state).
126 **************************************************************************************/
127
128 /*-----------------------------------------------------------------------
129 ** vmm_get_version
130 **
131 ** This function returns the current version of the virtual machine
132 ** interface. It is divided into two portions. The top 16 bits
133 ** represent the major version number, and the bottom 16 bits
134 ** represent the minor version number. Clients using the Vmm
135 ** functionality should make sure they are using a verison new
136 ** enough for them.
137 **
138 ** Inputs:
139 ** none
140 **
141 ** Outputs:
142 ** 32-bit number representing major/minor version of
143 ** the Vmm module
144 -----------------------------------------------------------------------*/
145
146 int vmm_get_version(struct savearea *save)
147 {
148 save->save_r3 = kVmmCurrentVersion; /* Return the version */
149 return 1;
150 }
151
152
153 /*-----------------------------------------------------------------------
154 ** Vmm_get_features
155 **
156 ** This function returns a set of flags that represents the functionality
157 ** supported by the current verison of the Vmm interface. Clients should
158 ** use this to determine whether they can run on this system.
159 **
160 ** Inputs:
161 ** none
162 **
163 ** Outputs:
164 ** 32-bit number representing functionality supported by this
165 ** version of the Vmm module
166 -----------------------------------------------------------------------*/
167
168 int vmm_get_features(struct savearea *save)
169 {
170 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
171 if(per_proc_info->pf.Available & pf64Bit) {
172 save->save_r3 &= ~kVmmFeature_LittleEndian; /* No little endian here */
173 save->save_r3 |= kVmmFeature_SixtyFourBit; /* Set that we can do 64-bit */
174 }
175 return 1;
176 }
177
178
179 /*-----------------------------------------------------------------------
180 ** vmm_max_addr
181 **
182 ** This function returns the maximum addressable virtual address sported
183 **
184 ** Outputs:
185 ** Returns max address
186 -----------------------------------------------------------------------*/
187
188 addr64_t vmm_max_addr(thread_act_t act)
189 {
190 return vm_max_address; /* Return the maximum address */
191 }
192
193 /*-----------------------------------------------------------------------
194 ** vmm_get_XA
195 **
196 ** This function retrieves the eXtended Architecture flags for the specifed VM.
197 **
198 ** We need to return the result in the return code rather than in the return parameters
199 ** because we need an architecture independent format so the results are actually
200 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
201 ** 4 for 32-bit.
202 **
203 **
204 ** Inputs:
205 ** act - pointer to current thread activation structure
206 ** index - index returned by vmm_init_context
207 **
208 ** Outputs:
209 ** Return code is set to the XA flags. If the index is invalid or the
210 ** context has not been created, we return 0.
211 -----------------------------------------------------------------------*/
212
213 unsigned int vmm_get_XA(
214 thread_act_t act,
215 vmm_thread_index_t index)
216 {
217 vmmCntrlEntry *CEntry;
218
219 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
220 if (CEntry == NULL) return 0; /* Either this isn't a vmm or the index is bogus */
221
222 return CEntry->vmmXAFlgs; /* Return the flags */
223 }
224
225 /*-----------------------------------------------------------------------
226 ** vmm_init_context
227 **
228 ** This function initializes an emulation context. It allocates
229 ** a new pmap (address space) and fills in the initial processor
230 ** state within the specified structure. The structure, mapped
231 ** into the client's logical address space, must be page-aligned.
232 **
233 ** Inputs:
234 ** act - pointer to current thread activation
235 ** version - requested version of the Vmm interface (allowing
236 ** future versions of the interface to change, but still
237 ** support older clients)
238 ** vmm_user_state - pointer to a logical page within the
239 ** client's address space
240 **
241 ** Outputs:
242 ** kernel return code indicating success or failure
243 -----------------------------------------------------------------------*/
244
245 int vmm_init_context(struct savearea *save)
246 {
247
248 thread_act_t act;
249 vmm_version_t version;
250 vmm_state_page_t * vmm_user_state;
251 vmmCntrlTable *CTable;
252 vm_offset_t conkern;
253 vmm_state_page_t * vks;
254 ppnum_t conphys;
255 kern_return_t ret;
256 pmap_t new_pmap;
257 int cvi, i;
258 task_t task;
259 thread_act_t fact, gact;
260
261 vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4); /* Get the user address of the comm area */
262 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
263 save->save_r3 = KERN_FAILURE; /* Return failure */
264 return 1;
265 }
266
267 /* Make sure that the version requested is supported */
268 version = save->save_r3; /* Pick up passed in version */
269 if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
270 save->save_r3 = KERN_FAILURE; /* Return failure */
271 return 1;
272 }
273
274 if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */
275 save->save_r3 = KERN_FAILURE; /* Return failure */
276 return 1;
277 }
278
279 act = current_act(); /* Pick up our activation */
280
281 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
282
283 task = current_task(); /* Figure out who we are */
284
285 task_lock(task); /* Lock our task */
286
287 fact = (thread_act_t)task->threads.next; /* Get the first activation on task */
288 gact = 0; /* Pretend we didn't find it yet */
289
290 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
291 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
292 gact = fact; /* Yeah... */
293 break; /* Bail the loop... */
294 }
295 fact = (thread_act_t)fact->task_threads.next; /* Go to the next one */
296 }
297
298
299 /*
300 * We only allow one thread per task to be a virtual machine monitor right now. This solves
301 * a number of potential problems that I can't put my finger on right now.
302 *
303 * Utlimately, I think we want to move the controls and make all this task based instead of
304 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
305 * VM (if they want) rather than hand dispatch contexts.
306 */
307
308 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
309 task_unlock(task); /* Release task lock */
310 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
311 save->save_r3 = KERN_FAILURE; /* We must play alone... */
312 return 1;
313 }
314
315 if(!gact) act->mact.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
316
317 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
318
319 CTable = act->mact.vmmControl; /* Get the control table address */
320 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
321 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
322 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
323 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
324 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
325 return 1;
326 }
327
328 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
329 act->mact.vmmControl = CTable; /* Initialize the table anchor */
330 }
331
332 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
333 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
334 }
335
336 if(cvi >= kVmmMaxContexts) { /* Did we find one? */
337 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
338 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
339 return 1;
340 }
341
342 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
343 act->map,
344 (vm_offset_t)vmm_user_state,
345 (vm_offset_t)vmm_user_state + PAGE_SIZE,
346 VM_PROT_READ | VM_PROT_WRITE,
347 FALSE);
348
349 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
350 goto return_in_shame;
351
352 /* Map the vmm state into the kernel's address space. */
353 conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state));
354
355 /* Find a virtual address to use. */
356 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
357 if (ret != KERN_SUCCESS) { /* Did we find an address? */
358 (void) vm_map_unwire(act->map, /* No, unwire the context area */
359 (vm_offset_t)vmm_user_state,
360 (vm_offset_t)vmm_user_state + PAGE_SIZE,
361 TRUE);
362 goto return_in_shame;
363 }
364
365 /* Map it into the kernel's address space. */
366
367 pmap_enter(kernel_pmap, conkern, conphys,
368 VM_PROT_READ | VM_PROT_WRITE,
369 VM_WIMG_USE_DEFAULT, TRUE);
370
371 /* Clear the vmm state structure. */
372 vks = (vmm_state_page_t *)conkern;
373 bzero((char *)vks, PAGE_SIZE);
374
375
376 /* We're home free now. Simply fill in the necessary info and return. */
377
378 vks->interface_version = version; /* Set our version code */
379 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
380
381 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
382 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
383 CTable->vmmc[cvi].vmmContextPhys = (vmm_state_page_t *)conphys; /* Remember the state page physical addr */
384 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
385
386 CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */
387 CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
388 CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
389 CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */
390 CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
391 CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
392 CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */
393
394 hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */
395
396 if (!(act->map->pmap->pmapFlags & pmapVMhost)) {
397 simple_lock(&(act->map->pmap->lock));
398 act->map->pmap->pmapFlags |= pmapVMhost;
399 simple_unlock(&(act->map->pmap->lock));
400 }
401
402 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
403 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
404 return 1;
405
406 return_in_shame:
407 if(!gact) kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
408 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
409 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
410 save->save_r3 = ret; /* Pass back return code... */
411 return 1;
412
413 }
414
415
416 /*-----------------------------------------------------------------------
417 ** vmm_tear_down_context
418 **
419 ** This function uninitializes an emulation context. It deallocates
420 ** internal resources associated with the context block.
421 **
422 ** Inputs:
423 ** act - pointer to current thread activation structure
424 ** index - index returned by vmm_init_context
425 **
426 ** Outputs:
427 ** kernel return code indicating success or failure
428 **
429 ** Strangeness note:
430 ** This call will also trash the address space with the same ID. While this
431 ** is really not too cool, we have to do it because we need to make
432 ** sure that old VMM users (not that we really have any) who depend upon
433 ** the address space going away with the context still work the same.
434 -----------------------------------------------------------------------*/
435
436 kern_return_t vmm_tear_down_context(
437 thread_act_t act,
438 vmm_thread_index_t index)
439 {
440 vmmCntrlEntry *CEntry;
441 vmmCntrlTable *CTable;
442 int cvi;
443 register savearea *sv;
444
445 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
446 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
447
448 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
449
450 hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */
451
452 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
453 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
454 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
455 }
456
457 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
458 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
459 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
460 }
461
462 CEntry->vmmPmap = 0; /* Remove this trace */
463 if(act->mact.vmmControl->vmmAdsp[index - 1]) { /* Check if there is an address space assigned here */
464 mapping_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
465 pmap_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
466 pmap_destroy(act->mact.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */
467 act->mact.vmmControl->vmmAdsp[index - 1] = NULL; /* Clean it up */
468 }
469
470 (void) vm_map_unwire( /* Unwire the user comm page */
471 act->map,
472 (vm_offset_t)CEntry->vmmContextUser,
473 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
474 FALSE);
475
476 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
477
478 CTable = act->mact.vmmControl; /* Get the control table address */
479 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
480
481 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
482 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
483 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
484
485 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
486 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
487 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
488 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
489 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
490 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
491 CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */
492
493 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
494 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
495 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
496 return KERN_SUCCESS; /* Leave... */
497 }
498 }
499
500 /*
501 * When we have tossed the last context, toss any address spaces left over before releasing
502 * the VMM control block
503 */
504
505 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
506 if(!act->mact.vmmControl->vmmAdsp[index - 1]) continue; /* Nothing to remove here */
507 mapping_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
508 pmap_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
509 pmap_destroy(act->mact.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */
510 act->mact.vmmControl->vmmAdsp[index - 1] = 0; /* Clear just in case */
511 }
512
513 kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
514 act->mact.vmmControl = 0; /* Unmark us as vmm */
515
516 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
517
518 return KERN_SUCCESS;
519 }
520
521
522 /*-----------------------------------------------------------------------
523 ** vmm_set_XA
524 **
525 ** This function sets the eXtended Architecture flags for the specifed VM.
526 **
527 ** We need to return the result in the return code rather than in the return parameters
528 ** because we need an architecture independent format so the results are actually
529 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
530 ** 4 for 32-bit.
531 **
532 ** Note that this function does a lot of the same stuff as vmm_tear_down_context
533 ** and vmm_init_context.
534 **
535 ** Inputs:
536 ** act - pointer to current thread activation structure
537 ** index - index returned by vmm_init_context
538 ** flags - the extended architecture flags
539 **
540 **
541 ** Outputs:
542 ** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
543 ** Also, the internal flags are set and, additionally, the VM is completely reset.
544 -----------------------------------------------------------------------*/
545
546 kern_return_t vmm_set_XA(
547 thread_act_t act,
548 vmm_thread_index_t index,
549 unsigned int xaflags)
550 {
551 vmmCntrlEntry *CEntry;
552 vmmCntrlTable *CTable;
553 vmm_state_page_t *vks;
554 vmm_version_t version;
555
556 if(xaflags & ~vmm64Bit) return KERN_FAILURE; /* We only support this one kind now */
557
558 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
559 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
560
561 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
562
563 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
564 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
565 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
566 }
567
568 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
569 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
570 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
571 }
572
573 CTable = act->mact.vmmControl; /* Get the control table address */
574 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
575
576 CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */
577 CEntry->vmmXAFlgs = (xaflags & vmm64Bit) | (CEntry->vmmXAFlgs & ~vmm64Bit); /* Set the XA flags */
578 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
579 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
580 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
581 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
582 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
583 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
584
585 vks = CEntry->vmmContextKern; /* Get address of the context page */
586 version = vks->interface_version; /* Save the version code */
587 bzero((char *)vks, 4096); /* Clear all */
588
589 vks->interface_version = version; /* Set our version code */
590 vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */
591
592 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
593
594 return KERN_SUCCESS; /* Return the flags */
595 }
596
597
598 /*-----------------------------------------------------------------------
599 ** vmm_tear_down_all
600 **
601 ** This function uninitializes all emulation contexts. If there are
602 ** any vmm contexts, it calls vmm_tear_down_context for each one.
603 **
604 ** Note: this can also be called from normal thread termination. Because of
605 ** that, we will context switch out of an alternate if we are currenty in it.
606 ** It will be terminated with no valid return code set because we don't expect
607 ** the activation to ever run again.
608 **
609 ** Inputs:
610 ** activation to tear down
611 **
612 ** Outputs:
613 ** All vmm contexts released and VMM shut down
614 -----------------------------------------------------------------------*/
615 void vmm_tear_down_all(thread_act_t act) {
616
617 vmmCntrlTable *CTable;
618 int cvi;
619 kern_return_t ret;
620 savearea *save;
621 spl_t s;
622
623 if(act->mact.specFlags & runningVM) { /* Are we actually in a context right now? */
624 save = find_user_regs(act); /* Find the user state context */
625 if(!save) { /* Did we find it? */
626 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
627 return;
628 }
629
630 save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */
631 s = splhigh(); /* Make sure interrupts are off */
632 vmm_force_exit(act, save); /* Force and exit from VM state */
633 splx(s); /* Restore interrupts */
634 }
635
636 if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */
637
638
639 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
640 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
641 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
642 if(ret != KERN_SUCCESS) { /* Did it go away? */
643 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
644 ret, act, cvi);
645 }
646 }
647 }
648
649 /*
650 * Note that all address apces should be gone here.
651 */
652 if(act->mact.vmmControl) { /* Did we find one? */
653 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
654 }
655 }
656
657 return;
658 }
659
660 /*-----------------------------------------------------------------------
661 ** vmm_map_page
662 **
663 ** This function maps a page from within the client's logical
664 ** address space into the alternate address space.
665 **
666 ** The page need not be locked or resident. If not resident, it will be faulted
667 ** in by this code, which may take some time. Also, if the page is not locked,
668 ** it, and this mapping may disappear at any time, even before it gets used. Note also
669 ** that reference and change information is NOT preserved when a page is unmapped, either
670 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
671 ** space). This means that if RC is needed, the page MUST be wired.
672 **
673 ** Note that if there is already a mapping at the address, it is removed and all
674 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
675 ** if the map call fails, the old address is still unmapped..
676 **
677 ** Inputs:
678 ** act - pointer to current thread activation
679 ** index - index of address space to map into
680 ** va - virtual address within the client's address
681 ** space
682 ** ava - virtual address within the alternate address
683 ** space
684 ** prot - protection flags
685 **
686 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
687 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
688 **
689 ** Input conditions:
690 ** Interrupts disabled (from fast trap)
691 **
692 ** Outputs:
693 ** kernel return code indicating success or failure
694 ** if success, va resident and alternate mapping made
695 -----------------------------------------------------------------------*/
696
697 kern_return_t vmm_map_page(
698 thread_act_t act,
699 vmm_adsp_id_t index,
700 addr64_t cva,
701 addr64_t ava,
702 vm_prot_t prot)
703 {
704 kern_return_t ret;
705 vmmCntrlEntry *CEntry;
706 register mapping *mp;
707 struct phys_entry *pp;
708 vm_map_t map;
709 addr64_t ova, nextva;
710 pmap_t pmap;
711
712 pmap = vmm_get_adsp(act, index); /* Get the pmap for this address space */
713 if(!pmap) return KERN_FAILURE; /* Bogus address space, no VMs, or we can't make a pmap, failure... */
714
715 if(ava > vm_max_address) return kVmmInvalidAddress; /* Does the machine support an address of this size? */
716
717 map = current_act()->map; /* Get the current map */
718
719 while(1) { /* Keep trying until we get it or until we fail */
720
721 mp = mapping_find(map->pmap, cva, &nextva, 0); /* Find the mapping for this address */
722
723 if(mp) break; /* We found it */
724
725 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
726 ret = vm_fault(map, trunc_page_32((vm_offset_t)cva), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* Didn't find it, try to fault it in read/write... */
727 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
728 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
729 }
730
731 if(mp->mpFlags & (mpBlock | mpNest | mpSpecial)) { /* If this is a block, a nest, or some other special thing, we can't map it */
732 mapping_drop_busy(mp); /* We have everything we need from the mapping */
733 return KERN_FAILURE; /* Leave in shame */
734 }
735
736 while(1) { /* Keep trying the enter until it goes in */
737 ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */
738 if(!ova) break; /* If there were no collisions, we are done... */
739 mapping_remove(pmap, ova); /* Remove the mapping that collided */
740 }
741
742 mapping_drop_busy(mp); /* We have everything we need from the mapping */
743
744 if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) {
745 act->mact.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
746 act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
747 }
748
749 return KERN_SUCCESS;
750 }
751
752
753 /*-----------------------------------------------------------------------
754 ** vmm_map_execute
755 **
756 ** This function maps a page from within the client's logical
757 ** address space into the alternate address space of the
758 ** Virtual Machine Monitor context and then directly starts executing.
759 **
760 ** See description of vmm_map_page for details.
761 **
762 ** Inputs:
763 ** Index is used for both the context and the address space ID.
764 ** index[24:31] is the context id and index[16:23] is the address space.
765 ** if the address space ID is 0, the context ID is used for it.
766 **
767 ** Outputs:
768 ** Normal exit is to run the VM. Abnormal exit is triggered via a
769 ** non-KERN_SUCCESS return from vmm_map_page or later during the
770 ** attempt to transition into the VM.
771 -----------------------------------------------------------------------*/
772
773 vmm_return_code_t vmm_map_execute(
774 thread_act_t act,
775 vmm_thread_index_t index,
776 addr64_t cva,
777 addr64_t ava,
778 vm_prot_t prot)
779 {
780 kern_return_t ret;
781 vmmCntrlEntry *CEntry;
782 unsigned int adsp;
783 vmm_thread_index_t cndx;
784
785 cndx = index & 0xFF; /* Clean it up */
786
787 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
788 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
789
790 if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry))
791 return kVmmBogusContext; /* Yes, invalid index in Fam */
792
793 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
794 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
795
796 ret = vmm_map_page(act, adsp, cva, ava, prot); /* Go try to map the page on in */
797
798
799 if(ret == KERN_SUCCESS) {
800 act->mact.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
801 act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
802 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
803 }
804
805 return ret; /* We had trouble mapping in the page */
806
807 }
808
809 /*-----------------------------------------------------------------------
810 ** vmm_map_list
811 **
812 ** This function maps a list of pages into various address spaces
813 **
814 ** Inputs:
815 ** act - pointer to current thread activation
816 ** index - index of default address space (used if not specifed in list entry
817 ** count - number of pages to release
818 ** flavor - 0 if 32-bit version, 1 if 64-bit
819 ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
820 **
821 ** Outputs:
822 ** kernel return code indicating success or failure
823 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
824 ** or the vmm_map_page call fails.
825 ** We return kVmmInvalidAddress if virtual address size is not supported
826 -----------------------------------------------------------------------*/
827
828 kern_return_t vmm_map_list(
829 thread_act_t act,
830 vmm_adsp_id_t index,
831 unsigned int cnt,
832 unsigned int flavor)
833 {
834 vmmCntrlEntry *CEntry;
835 boolean_t ret;
836 unsigned int i;
837 vmmMList *lst;
838 vmmMList64 *lstx;
839 addr64_t cva;
840 addr64_t ava;
841 vm_prot_t prot;
842 vmm_adsp_id_t adsp;
843
844 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
845 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
846
847 if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */
848 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
849
850 lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
851 lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
852
853 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
854 if(flavor) { /* Check if 32- or 64-bit addresses */
855 cva = lstx[i].vmlva; /* Get the 64-bit actual address */
856 ava = lstx[i].vmlava; /* Get the 64-bit guest address */
857 }
858 else {
859 cva = lst[i].vmlva; /* Get the 32-bit actual address */
860 ava = lst[i].vmlava; /* Get the 32-bit guest address */
861 }
862
863 prot = ava & vmmlProt; /* Extract the protection bits */
864 adsp = (ava & vmmlAdID) >> 4; /* Extract an explicit address space request */
865 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
866 ava = ava &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
867
868 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
869 if(ret != KERN_SUCCESS) return ret; /* Bail if any error */
870 }
871
872 return KERN_SUCCESS ; /* Return... */
873 }
874
875 /*-----------------------------------------------------------------------
876 ** vmm_get_page_mapping
877 **
878 ** This function determines whether the specified VMM
879 ** virtual address is mapped.
880 **
881 ** Inputs:
882 ** act - pointer to current thread activation
883 ** index - index of vmm state for this page
884 ** va - virtual address within the alternate's address
885 ** space
886 **
887 ** Outputs:
888 ** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
889 **
890 ** Note:
891 ** If there are aliases to the page in the non-alternate address space,
892 ** this call could return the wrong one. Moral of the story: no aliases.
893 -----------------------------------------------------------------------*/
894
895 addr64_t vmm_get_page_mapping(
896 thread_act_t act,
897 vmm_adsp_id_t index,
898 addr64_t va)
899 {
900 vmmCntrlEntry *CEntry;
901 register mapping *mp;
902 pmap_t pmap;
903 addr64_t nextva, sva;
904 ppnum_t pa;
905
906 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
907 if (!pmap)return -1; /* No good, failure... */
908
909 mp = mapping_find(pmap, va, &nextva, 0); /* Find our page */
910
911 if(!mp) return -1; /* Not mapped, return -1 */
912
913 pa = mp->mpPAddr; /* Remember the page address */
914
915 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
916
917 pmap = current_act()->map->pmap; /* Get the current pmap */
918 sva = mapping_p2v(pmap, pa); /* Now find the source virtual */
919
920 if(sva != 0) return sva; /* We found it... */
921
922 panic("vmm_get_page_mapping: could not back-map alternate va (%016llX)\n", va); /* We are bad wrong if we can't find it */
923
924 return -1;
925 }
926
927 /*-----------------------------------------------------------------------
928 ** vmm_unmap_page
929 **
930 ** This function unmaps a page from the alternate's logical
931 ** address space.
932 **
933 ** Inputs:
934 ** act - pointer to current thread activation
935 ** index - index of vmm state for this page
936 ** va - virtual address within the vmm's address
937 ** space
938 **
939 ** Outputs:
940 ** kernel return code indicating success or failure
941 -----------------------------------------------------------------------*/
942
943 kern_return_t vmm_unmap_page(
944 thread_act_t act,
945 vmm_adsp_id_t index,
946 addr64_t va)
947 {
948 vmmCntrlEntry *CEntry;
949 addr64_t nadd;
950 pmap_t pmap;
951 kern_return_t kern_result = KERN_SUCCESS;
952
953 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
954 if (!pmap)return -1; /* No good, failure... */
955
956 nadd = mapping_remove(pmap, va); /* Toss the mapping */
957
958 return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS); /* Return... */
959 }
960
961 /*-----------------------------------------------------------------------
962 ** vmm_unmap_list
963 **
964 ** This function unmaps a list of pages from the alternate's logical
965 ** address space.
966 **
967 ** Inputs:
968 ** act - pointer to current thread activation
969 ** index - index of vmm state for this page
970 ** count - number of pages to release
971 ** flavor - 0 if 32-bit, 1 if 64-bit
972 ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
973 **
974 ** Outputs:
975 ** kernel return code indicating success or failure
976 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
977 -----------------------------------------------------------------------*/
978
979 kern_return_t vmm_unmap_list(
980 thread_act_t act,
981 vmm_adsp_id_t index,
982 unsigned int cnt,
983 unsigned int flavor)
984 {
985 vmmCntrlEntry *CEntry;
986 boolean_t ret;
987 kern_return_t kern_result = KERN_SUCCESS;
988 unsigned int *pgaddr, i;
989 addr64_t gva;
990 vmmUMList *lst;
991 vmmUMList64 *lstx;
992 pmap_t pmap;
993 int adsp;
994
995 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
996 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
997
998 if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */
999 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
1000
1001 lst = (vmmUMList *)lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
1002
1003 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
1004 if(flavor) { /* Check if 32- or 64-bit addresses */
1005 gva = lstx[i].vmlava; /* Get the 64-bit guest address */
1006 }
1007 else {
1008 gva = lst[i].vmlava; /* Get the 32-bit guest address */
1009 }
1010
1011 adsp = (gva & vmmlAdID) >> 4; /* Extract an explicit address space request */
1012 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
1013 pmap = act->mact.vmmControl->vmmAdsp[adsp]; /* Get the pmap for this request */
1014 if(!pmap) continue; /* Ain't nuthin' mapped here, no durn map... */
1015
1016 gva = gva &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1017 (void)mapping_remove(pmap, gva); /* Toss the mapping */
1018 }
1019
1020 return KERN_SUCCESS ; /* Return... */
1021 }
1022
1023 /*-----------------------------------------------------------------------
1024 ** vmm_unmap_all_pages
1025 **
1026 ** This function unmaps all pages from the alternates's logical
1027 ** address space.
1028 **
1029 ** Inputs:
1030 ** act - pointer to current thread activation
1031 ** index - index of context state
1032 **
1033 ** Outputs:
1034 ** none
1035 **
1036 ** Note:
1037 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
1038 -----------------------------------------------------------------------*/
1039
1040 void vmm_unmap_all_pages(
1041 thread_act_t act,
1042 vmm_adsp_id_t index)
1043 {
1044 vmmCntrlEntry *CEntry;
1045 pmap_t pmap;
1046
1047 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1048 if (!pmap) return; /* Either this isn't vmm thread or the index is bogus */
1049
1050 /*
1051 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
1052 */
1053 mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
1054 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
1055 return;
1056 }
1057
1058
1059 /*-----------------------------------------------------------------------
1060 ** vmm_get_page_dirty_flag
1061 **
1062 ** This function returns the changed flag of the page
1063 ** and optionally clears clears the flag.
1064 **
1065 ** Inputs:
1066 ** act - pointer to current thread activation
1067 ** index - index of vmm state for this page
1068 ** va - virtual address within the vmm's address
1069 ** space
1070 ** reset - Clears dirty if true, untouched if not
1071 **
1072 ** Outputs:
1073 ** the dirty bit
1074 ** clears the dirty bit in the pte if requested
1075 **
1076 ** Note:
1077 ** The RC bits are merged into the global physical entry
1078 -----------------------------------------------------------------------*/
1079
1080 boolean_t vmm_get_page_dirty_flag(
1081 thread_act_t act,
1082 vmm_adsp_id_t index,
1083 addr64_t va,
1084 unsigned int reset)
1085 {
1086 vmmCntrlEntry *CEntry;
1087 register mapping *mpv, *mp;
1088 unsigned int RC;
1089 pmap_t pmap;
1090
1091 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1092 if (!pmap) return 1; /* Either this isn't vmm thread or the index is bogus */
1093
1094 RC = hw_test_rc(pmap, (addr64_t)va, reset); /* Fetch the RC bits and clear if requested */
1095
1096 switch (RC & mapRetCode) { /* Decode return code */
1097
1098 case mapRtOK: /* Changed */
1099 return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */
1100 break;
1101
1102 case mapRtNotFnd: /* Didn't find it */
1103 return 1; /* Return dirty */
1104 break;
1105
1106 default:
1107 panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC, pmap, va);
1108
1109 }
1110
1111 return 1; /* Return the change bit */
1112 }
1113
1114
1115 /*-----------------------------------------------------------------------
1116 ** vmm_protect_page
1117 **
1118 ** This function sets the protection bits of a mapped page
1119 **
1120 ** Inputs:
1121 ** act - pointer to current thread activation
1122 ** index - index of vmm state for this page
1123 ** va - virtual address within the vmm's address
1124 ** space
1125 ** prot - Protection flags
1126 **
1127 ** Outputs:
1128 ** none
1129 ** Protection bits of the mapping are modifed
1130 **
1131 -----------------------------------------------------------------------*/
1132
1133 kern_return_t vmm_protect_page(
1134 thread_act_t act,
1135 vmm_adsp_id_t index,
1136 addr64_t va,
1137 vm_prot_t prot)
1138 {
1139 vmmCntrlEntry *CEntry;
1140 addr64_t nextva;
1141 int ret;
1142 pmap_t pmap;
1143
1144 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1145 if (!pmap) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1146
1147 ret = hw_protect(pmap, va, prot, &nextva); /* Try to change the protect here */
1148
1149 switch (ret) { /* Decode return code */
1150
1151 case mapRtOK: /* All ok... */
1152 break; /* Outta here */
1153
1154 case mapRtNotFnd: /* Didn't find it */
1155 return KERN_SUCCESS; /* Ok, return... */
1156 break;
1157
1158 default:
1159 panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, (addr64_t)va);
1160
1161 }
1162
1163 if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) {
1164 act->mact.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1165 act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
1166 }
1167
1168 return KERN_SUCCESS; /* Return */
1169 }
1170
1171
1172 /*-----------------------------------------------------------------------
1173 ** vmm_protect_execute
1174 **
1175 ** This function sets the protection bits of a mapped page
1176 ** and then directly starts executing.
1177 **
1178 ** See description of vmm_protect_page for details
1179 **
1180 ** Inputs:
1181 ** See vmm_protect_page and vmm_map_execute
1182 **
1183 ** Outputs:
1184 ** Normal exit is to run the VM. Abnormal exit is triggered via a
1185 ** non-KERN_SUCCESS return from vmm_map_page or later during the
1186 ** attempt to transition into the VM.
1187 -----------------------------------------------------------------------*/
1188
1189 vmm_return_code_t vmm_protect_execute(
1190 thread_act_t act,
1191 vmm_thread_index_t index,
1192 addr64_t va,
1193 vm_prot_t prot)
1194 {
1195 kern_return_t ret;
1196 vmmCntrlEntry *CEntry;
1197 unsigned int adsp;
1198 vmm_thread_index_t cndx;
1199
1200 cndx = index & 0xFF; /* Clean it up */
1201 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
1202 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1203
1204 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1205 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
1206
1207 if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry))
1208 return kVmmBogusContext; /* Yes, invalid index in Fam */
1209
1210 ret = vmm_protect_page(act, adsp, va, prot); /* Go try to change access */
1211
1212 if(ret == KERN_SUCCESS) {
1213 act->mact.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1214 act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
1215 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
1216 }
1217
1218 return ret; /* We had trouble of some kind (shouldn't happen) */
1219
1220 }
1221
1222
1223 /*-----------------------------------------------------------------------
1224 ** vmm_get_float_state
1225 **
1226 ** This function causes the current floating point state to
1227 ** be saved into the shared context area. It also clears the
1228 ** vmmFloatCngd changed flag.
1229 **
1230 ** Inputs:
1231 ** act - pointer to current thread activation structure
1232 ** index - index returned by vmm_init_context
1233 **
1234 ** Outputs:
1235 ** context saved
1236 -----------------------------------------------------------------------*/
1237
1238 kern_return_t vmm_get_float_state(
1239 thread_act_t act,
1240 vmm_thread_index_t index)
1241 {
1242 vmmCntrlEntry *CEntry;
1243 vmmCntrlTable *CTable;
1244 int i;
1245 register struct savearea_fpu *sv;
1246
1247 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1248 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1249
1250 act->mact.specFlags &= ~floatCng; /* Clear the special flag */
1251 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
1252
1253 fpu_save(&CEntry->vmmFacCtx); /* Save context if live */
1254
1255 if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */
1256 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */
1257 return KERN_SUCCESS;
1258 }
1259
1260
1261 for(i = 0; i < 32; i++) { /* Initialize floating points */
1262 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
1263 }
1264
1265 return KERN_SUCCESS;
1266 }
1267
1268 /*-----------------------------------------------------------------------
1269 ** vmm_get_vector_state
1270 **
1271 ** This function causes the current vector state to
1272 ** be saved into the shared context area. It also clears the
1273 ** vmmVectorCngd changed flag.
1274 **
1275 ** Inputs:
1276 ** act - pointer to current thread activation structure
1277 ** index - index returned by vmm_init_context
1278 **
1279 ** Outputs:
1280 ** context saved
1281 -----------------------------------------------------------------------*/
1282
1283 kern_return_t vmm_get_vector_state(
1284 thread_act_t act,
1285 vmm_thread_index_t index)
1286 {
1287 vmmCntrlEntry *CEntry;
1288 vmmCntrlTable *CTable;
1289 int i, j;
1290 unsigned int vrvalidwrk;
1291 register struct savearea_vec *sv;
1292
1293 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1294 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1295
1296 vec_save(&CEntry->vmmFacCtx); /* Save context if live */
1297
1298 act->mact.specFlags &= ~vectorCng; /* Clear the special flag */
1299 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
1300
1301 if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */
1302
1303 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
1304
1305 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
1306 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
1307 for(j = 0; j < 4; j++) { /* If so, copy it over */
1308 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
1309 }
1310 }
1311 else {
1312 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
1313 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
1314 }
1315 }
1316
1317 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
1318
1319 }
1320
1321 return KERN_SUCCESS;
1322 }
1323
1324 for(i = 0; i < 32; i++) { /* Initialize vector registers */
1325 for(j=0; j < 4; j++) { /* Do words */
1326 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
1327 }
1328 }
1329
1330 return KERN_SUCCESS;
1331 }
1332
1333 /*-----------------------------------------------------------------------
1334 ** vmm_set_timer
1335 **
1336 ** This function causes a timer (in AbsoluteTime) for a specific time
1337 ** to be set It also clears the vmmTimerPop flag if the timer is actually
1338 ** set, it is cleared otherwise.
1339 **
1340 ** A timer is cleared by setting setting the time to 0. This will clear
1341 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
1342 ** current time clears the internal timer request, but leaves the
1343 ** vmmTimerPop flag set.
1344 **
1345 **
1346 ** Inputs:
1347 ** act - pointer to current thread activation structure
1348 ** index - index returned by vmm_init_context
1349 ** timerhi - high order word of AbsoluteTime to pop
1350 ** timerlo - low order word of AbsoluteTime to pop
1351 **
1352 ** Outputs:
1353 ** timer set, vmmTimerPop cleared
1354 -----------------------------------------------------------------------*/
1355
1356 kern_return_t vmm_set_timer(
1357 thread_act_t act,
1358 vmm_thread_index_t index,
1359 unsigned int timerhi,
1360 unsigned int timerlo)
1361 {
1362 vmmCntrlEntry *CEntry;
1363
1364 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1365 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1366
1367 CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
1368
1369 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
1370 return KERN_SUCCESS; /* Leave now... */
1371 }
1372
1373
1374 /*-----------------------------------------------------------------------
1375 ** vmm_get_timer
1376 **
1377 ** This function causes the timer for a specified VM to be
1378 ** returned in return_params[0] and return_params[1].
1379 ** Note that this is kind of funky for 64-bit VMs because we
1380 ** split the timer into two parts so that we still set parms 0 and 1.
1381 ** Obviously, we don't need to do this because the parms are 8 bytes
1382 ** wide.
1383 **
1384 **
1385 ** Inputs:
1386 ** act - pointer to current thread activation structure
1387 ** index - index returned by vmm_init_context
1388 **
1389 ** Outputs:
1390 ** Timer value set in return_params[0] and return_params[1].
1391 ** Set to 0 if timer is not set.
1392 -----------------------------------------------------------------------*/
1393
1394 kern_return_t vmm_get_timer(
1395 thread_act_t act,
1396 vmm_thread_index_t index)
1397 {
1398 vmmCntrlEntry *CEntry;
1399 vmmCntrlTable *CTable;
1400
1401 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1402 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1403
1404 if(CEntry->vmmXAFlgs & vmm64Bit) { /* A 64-bit virtual machine? */
1405 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32); /* Return the last timer value */
1406 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1407 }
1408 else {
1409 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */
1410 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1411 }
1412 return KERN_SUCCESS;
1413 }
1414
1415
1416 /*-----------------------------------------------------------------------
1417 ** vmm_timer_pop
1418 **
1419 ** This function causes all timers in the array of VMs to be updated.
1420 ** All appropriate flags are set or reset. If a VM is currently
1421 ** running and its timer expired, it is intercepted.
1422 **
1423 ** The qactTimer value is set to the lowest unexpired timer. It is
1424 ** zeroed if all timers are expired or have been reset.
1425 **
1426 ** Inputs:
1427 ** act - pointer to current thread activation structure
1428 **
1429 ** Outputs:
1430 ** timers set, vmmTimerPop cleared or set
1431 -----------------------------------------------------------------------*/
1432
1433 void vmm_timer_pop(
1434 thread_act_t act)
1435 {
1436 vmmCntrlEntry *CEntry;
1437 vmmCntrlTable *CTable;
1438 int cvi, any;
1439 uint64_t now, soonest;
1440 savearea *sv;
1441
1442 if(!((unsigned int)act->mact.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
1443 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
1444 }
1445
1446 soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */
1447
1448 clock_get_uptime(&now); /* What time is it? */
1449
1450 CTable = act->mact.vmmControl; /* Make this easier */
1451 any = 0; /* Haven't found a running unexpired timer yet */
1452
1453 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Cycle through all and check time now */
1454
1455 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
1456
1457 if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */
1458 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
1459 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
1460 continue; /* Check next */
1461 }
1462
1463 if (CTable->vmmc[cvi].vmmTimer <= now) {
1464 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
1465 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
1466 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->mact.vmmCEntry) { /* Is this the running VM? */
1467 sv = find_user_regs(act); /* Get the user state registers */
1468 if(!sv) { /* Did we find something? */
1469 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1470 }
1471 sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */
1472 vmm_force_exit(act, sv); /* Intercept a running VM */
1473 }
1474 continue; /* Check the rest */
1475 }
1476 else { /* It hasn't popped yet */
1477 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1478 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1479 }
1480
1481 any = 1; /* Show we found an active unexpired timer */
1482
1483 if (CTable->vmmc[cvi].vmmTimer < soonest)
1484 soonest = CTable->vmmc[cvi].vmmTimer;
1485 }
1486
1487 if(any) {
1488 if (act->mact.qactTimer == 0 || soonest <= act->mact.qactTimer)
1489 act->mact.qactTimer = soonest; /* Set lowest timer */
1490 }
1491
1492 return;
1493 }
1494
1495
1496
1497 /*-----------------------------------------------------------------------
1498 ** vmm_stop_vm
1499 **
1500 ** This function prevents the specified VM(s) to from running.
1501 ** If any is currently executing, the execution is intercepted
1502 ** with a code of kVmmStopped. Note that execution of the VM is
1503 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
1504 ** This provides the ability for a thread to stop execution of a VM and
1505 ** insure that it will not be run until the emulator has processed the
1506 ** "virtual" interruption.
1507 **
1508 ** Inputs:
1509 ** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1510 ** NOTE: if this mask is all 0s, any executing VM is intercepted with
1511 * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1512 ** note that there is a potential race here and the VM may not stop.
1513 **
1514 ** Outputs:
1515 ** kernel return code indicating success
1516 ** or if no VMs are enabled, an invalid syscall exception.
1517 -----------------------------------------------------------------------*/
1518
1519 int vmm_stop_vm(struct savearea *save)
1520 {
1521
1522 thread_act_t act;
1523 vmmCntrlTable *CTable;
1524 int cvi, i;
1525 task_t task;
1526 thread_act_t fact;
1527 unsigned int vmmask;
1528 ReturnHandler *stopapc;
1529
1530 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1531
1532 task = current_task(); /* Figure out who we are */
1533
1534 task_lock(task); /* Lock our task */
1535
1536 fact = (thread_act_t)task->threads.next; /* Get the first activation on task */
1537 act = 0; /* Pretend we didn't find it yet */
1538
1539 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
1540 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
1541 act = fact; /* Yeah... */
1542 break; /* Bail the loop... */
1543 }
1544 fact = (thread_act_t)fact->task_threads.next; /* Go to the next one */
1545 }
1546
1547 if(!((unsigned int)act)) { /* See if we have VMMs yet */
1548 task_unlock(task); /* No, unlock the task */
1549 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1550 return 0; /* Go generate a syscall exception */
1551 }
1552
1553 act_lock_thread(act); /* Make sure this stays 'round */
1554 task_unlock(task); /* Safe to release now */
1555
1556 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1557
1558 if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */
1559 act_unlock_thread(act); /* Unlock the activation */
1560 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1561 return 0; /* Go generate a syscall exception */
1562 }
1563
1564 if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */
1565 act_unlock_thread(act); /* Unlock the activation */
1566 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1567 save->save_r3 = KERN_SUCCESS; /* Set success */
1568 return 1; /* Return... */
1569 }
1570
1571 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search slots */
1572 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */
1573 hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */
1574 }
1575 vmmask = vmmask << 1; /* Slide mask over */
1576 }
1577
1578 if(hw_compare_and_store(0, 1, &act->mact.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */
1579 act_unlock_thread(act); /* Already one pending, unlock the activation */
1580 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1581 save->save_r3 = KERN_SUCCESS; /* Say we did it... */
1582 return 1; /* Leave */
1583 }
1584
1585 if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */
1586 act->mact.emPendRupts = 0; /* No memory, say we have given up request */
1587 act_unlock_thread(act); /* Unlock the activation */
1588 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1589 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
1590 return 1; /* Return... */
1591 }
1592
1593 ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1594
1595 stopapc->handler = vmm_interrupt; /* Set interruption routine */
1596
1597 stopapc->next = act->handlers; /* Put our interrupt at the start of the list */
1598 act->handlers = stopapc; /* Point to us */
1599
1600 act_set_apc(act); /* Set an APC AST */
1601 ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */
1602
1603 act_unlock_thread(act); /* Unlock the activation */
1604
1605 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1606 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
1607 return 1;
1608 }
1609
1610 /*-----------------------------------------------------------------------
1611 ** vmm_interrupt
1612 **
1613 ** This function is executed asynchronously from an APC AST.
1614 ** It is to be used for anything that needs to interrupt a running VM.
1615 ** This include any kind of interruption generation (other than timer pop)
1616 ** or entering the stopped state.
1617 **
1618 ** Inputs:
1619 ** ReturnHandler *rh - the return handler control block as required by the APC.
1620 ** thread_act_t act - the activation
1621 **
1622 ** Outputs:
1623 ** Whatever needed to be done is done.
1624 -----------------------------------------------------------------------*/
1625
1626 void vmm_interrupt(ReturnHandler *rh, thread_act_t act) {
1627
1628 vmmCntrlTable *CTable;
1629 savearea *sv;
1630 boolean_t inter;
1631
1632
1633
1634 kfree((vm_offset_t)rh, sizeof(ReturnHandler)); /* Release the return handler block */
1635
1636 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1637
1638 act->mact.emPendRupts = 0; /* Say that there are no more interrupts pending */
1639 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1640
1641 if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */
1642
1643 if(act->mact.vmmCEntry && (act->mact.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */
1644 sv = find_user_regs(act); /* Get the user state registers */
1645 if(!sv) { /* Did we find something? */
1646 panic("vmm_interrupt: no user context; act = %08X\n", act);
1647 }
1648 sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */
1649 vmm_force_exit(act, sv); /* Intercept a running VM */
1650 }
1651 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
1652
1653 return;
1654 }