]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/vmachmon.c
xnu-201.19.3.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*-----------------------------------------------------------------------
23 ** vmachmon.c
24 **
25 ** C routines that we are adding to the MacOS X kernel.
26 **
27 ** Weird Apple PSL stuff goes here...
28 **
29 ** Until then, Copyright 2000, Connectix
30 -----------------------------------------------------------------------*/
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/host_info.h>
35 #include <kern/kern_types.h>
36 #include <kern/host.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/thread_act.h>
40 #include <ppc/exception.h>
41 #include <ppc/mappings.h>
42 #include <ppc/thread_act.h>
43 #include <ppc/pmap_internals.h>
44 #include <vm/vm_kern.h>
45
46 #include <ppc/vmachmon.h>
47
48 extern struct Saveanchor saveanchor; /* Aligned savearea anchor */
49 extern double FloatInit;
50 extern unsigned long QNaNbarbarian[4];
51
52 /*************************************************************************************
53 Virtual Machine Monitor Internal Routines
54 **************************************************************************************/
55
56 /*-----------------------------------------------------------------------
57 ** vmm_get_entry
58 **
59 ** This function verifies and return a vmm context entry index
60 **
61 ** Inputs:
62 ** act - pointer to current thread activation
63 ** index - index into vmm control table (this is a "one based" value)
64 **
65 ** Outputs:
66 ** address of a vmmCntrlEntry or 0 if not found
67 -----------------------------------------------------------------------*/
68
69 vmmCntrlEntry *vmm_get_entry(
70 thread_act_t act,
71 vmm_thread_index_t index)
72 {
73 vmmCntrlTable *CTable;
74 vmmCntrlEntry *CEntry;
75
76 if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */
77 if ((index - 1) >= kVmmMaxContextsPerThread) return NULL; /* Index not in range */
78
79 CTable = act->mact.vmmControl; /* Make the address a bit more convienient */
80 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
81
82 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
83
84 return CEntry;
85 }
86
87
88
89 /*************************************************************************************
90 Virtual Machine Monitor Exported Functionality
91
92 The following routines are used to implement a quick-switch mechanism for
93 virtual machines that need to execute within their own processor envinroment
94 (including register and MMU state).
95 **************************************************************************************/
96
97 /*-----------------------------------------------------------------------
98 ** vmm_get_version
99 **
100 ** This function returns the current version of the virtual machine
101 ** interface. It is divided into two portions. The top 16 bits
102 ** represent the major version number, and the bottom 16 bits
103 ** represent the minor version number. Clients using the Vmm
104 ** functionality should make sure they are using a verison new
105 ** enough for them.
106 **
107 ** Inputs:
108 ** none
109 **
110 ** Outputs:
111 ** 32-bit number representing major/minor version of
112 ** the Vmm module
113 -----------------------------------------------------------------------*/
114
115 int vmm_get_version(struct savearea *save)
116 {
117 save->save_r3 = kVmmCurrentVersion; /* Return the version */
118 return 1;
119 }
120
121
122 /*-----------------------------------------------------------------------
123 ** Vmm_get_features
124 **
125 ** This function returns a set of flags that represents the functionality
126 ** supported by the current verison of the Vmm interface. Clients should
127 ** use this to determine whether they can run on this system.
128 **
129 ** Inputs:
130 ** none
131 **
132 ** Outputs:
133 ** 32-bit number representing functionality supported by this
134 ** version of the Vmm module
135 -----------------------------------------------------------------------*/
136
137 int vmm_get_features(struct savearea *save)
138 {
139 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
140 return 1;
141 }
142
143
144 /*-----------------------------------------------------------------------
145 ** vmm_init_context
146 **
147 ** This function initializes an emulation context. It allocates
148 ** a new pmap (address space) and fills in the initial processor
149 ** state within the specified structure. The structure, mapped
150 ** into the client's logical address space, must be page-aligned.
151 **
152 ** Inputs:
153 ** act - pointer to current thread activation
154 ** version - requested version of the Vmm interface (allowing
155 ** future versions of the interface to change, but still
156 ** support older clients)
157 ** vmm_user_state - pointer to a logical page within the
158 ** client's address space
159 **
160 ** Outputs:
161 ** kernel return code indicating success or failure
162 -----------------------------------------------------------------------*/
163
164 int vmm_init_context(struct savearea *save)
165 {
166
167 thread_act_t act;
168 vmm_version_t version;
169 vmm_state_page_t * vmm_user_state;
170 vmmCntrlTable *CTable;
171 vm_offset_t conkern;
172 vmm_state_page_t * vks;
173 vm_offset_t conphys;
174 kern_return_t ret;
175 pmap_t new_pmap;
176 int cvi, i;
177 task_t task;
178 thread_act_t fact, gact;
179
180 vmm_user_state = (vmm_state_page_t *)save->save_r4; /* Get the user address of the comm area */
181 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
182 save->save_r3 = KERN_FAILURE; /* Return failure */
183 return 1;
184 }
185
186 /* Make sure that the version requested is supported */
187 version = save->save_r3; /* Pick up passed in version */
188 if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
189 save->save_r3 = KERN_FAILURE; /* Return failure */
190 return 1;
191 }
192
193 if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */
194 save->save_r3 = KERN_FAILURE; /* Return failure */
195 return 1;
196 }
197
198 act = current_act(); /* Pick up our activation */
199
200 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
201
202 task = current_task(); /* Figure out who we are */
203
204 task_lock(task); /* Lock our task */
205
206 fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */
207 gact = 0; /* Pretend we didn't find it yet */
208
209 for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */
210 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
211 gact = fact; /* Yeah... */
212 break; /* Bail the loop... */
213 }
214 fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */
215 }
216
217
218 /*
219 * We only allow one thread per task to be a virtual machine monitor right now. This solves
220 * a number of potential problems that I can't put my finger on right now.
221 *
222 * Utlimately, I think we want to move the controls and make all this task based instead of
223 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
224 * VM (if they want) rather than hand dispatch contexts.
225 */
226
227 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
228 task_unlock(task); /* Release task lock */
229 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
230 save->save_r3 = KERN_FAILURE; /* We must play alone... */
231 return 1;
232 }
233
234 if(!gact) act->mact.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
235
236 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
237
238 CTable = act->mact.vmmControl; /* Get the control table address */
239 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
240 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
241 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
242 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
243 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
244 return 1;
245 }
246
247 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
248 act->mact.vmmControl = CTable; /* Initialize the table anchor */
249 }
250
251 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */
252 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
253 }
254
255 if(cvi >= kVmmMaxContextsPerThread) { /* Did we find one? */
256 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
257 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
258 return 1;
259 }
260
261 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
262 act->map,
263 (vm_offset_t)vmm_user_state,
264 (vm_offset_t)vmm_user_state + PAGE_SIZE,
265 VM_PROT_READ | VM_PROT_WRITE,
266 FALSE);
267
268 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
269 goto return_in_shame;
270
271 /* Map the vmm state into the kernel's address space. */
272 conphys = pmap_extract(act->map->pmap, (vm_offset_t)vmm_user_state);
273
274 /* Find a virtual address to use. */
275 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
276 if (ret != KERN_SUCCESS) { /* Did we find an address? */
277 (void) vm_map_unwire(act->map, /* No, unwire the context area */
278 (vm_offset_t)vmm_user_state,
279 (vm_offset_t)vmm_user_state + PAGE_SIZE,
280 TRUE);
281 goto return_in_shame;
282 }
283
284 /* Map it into the kernel's address space. */
285 pmap_enter(kernel_pmap, conkern, conphys, VM_PROT_READ | VM_PROT_WRITE, TRUE);
286
287 /* Clear the vmm state structure. */
288 vks = (vmm_state_page_t *)conkern;
289 bzero((char *)vks, PAGE_SIZE);
290
291 /* Allocate a new pmap for the new vmm context. */
292 new_pmap = pmap_create(0);
293 if (new_pmap == PMAP_NULL) {
294 (void) vm_map_unwire(act->map, /* Couldn't get a pmap, unwire the user page */
295 (vm_offset_t)vmm_user_state,
296 (vm_offset_t)vmm_user_state + PAGE_SIZE,
297 TRUE);
298
299 kmem_free(kernel_map, conkern, PAGE_SIZE); /* Release the kernel address */
300 goto return_in_shame;
301 }
302
303 /* We're home free now. Simply fill in the necessary info and return. */
304
305 vks->interface_version = version; /* Set our version code */
306 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
307
308 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
309 CTable->vmmc[cvi].vmmPmap = new_pmap; /* Remember the pmap for this guy */
310 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
311 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
312 CTable->vmmc[cvi].vmmFPU_pcb = 0; /* Clear saved floating point context */
313 CTable->vmmc[cvi].vmmFPU_cpu = -1; /* Invalidate CPU saved fp context is valid on */
314 CTable->vmmc[cvi].vmmVMX_pcb = 0; /* Clear saved vector context */
315 CTable->vmmc[cvi].vmmVMX_cpu = -1; /* Invalidate CPU saved vector context is valid on */
316
317 hw_atomic_add(&saveanchor.saveneed, 2); /* Account for the number of extra saveareas we think we might "need" */
318
319 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
320 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
321 return 1;
322
323 return_in_shame:
324 if(!gact) kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
325 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
326 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
327 save->save_r3 = ret; /* Pass back return code... */
328 return 1;
329
330 }
331
332
333 /*-----------------------------------------------------------------------
334 ** vmm_tear_down_context
335 **
336 ** This function uninitializes an emulation context. It deallocates
337 ** internal resources associated with the context block.
338 **
339 ** Inputs:
340 ** act - pointer to current thread activation structure
341 ** index - index returned by vmm_init_context
342 **
343 ** Outputs:
344 ** kernel return code indicating success or failure
345 -----------------------------------------------------------------------*/
346
347 kern_return_t vmm_tear_down_context(
348 thread_act_t act,
349 vmm_thread_index_t index)
350 {
351 vmmCntrlEntry *CEntry;
352 vmmCntrlTable *CTable;
353 int cvi;
354 register savearea *sv;
355
356 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
357 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
358
359 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
360
361 hw_atomic_sub(&saveanchor.saveneed, 2); /* We don't need these extra saveareas anymore */
362
363 if(CEntry->vmmFPU_pcb) { /* Is there any floating point context? */
364 sv = (savearea *)CEntry->vmmFPU_pcb; /* Make useable */
365 sv->save_flags &= ~SAVfpuvalid; /* Clear in use bit */
366 if(!(sv->save_flags & SAVinuse)) { /* Anyone left with this one? */
367 save_release(sv); /* Nope, release it */
368 }
369 }
370
371 if(CEntry->vmmVMX_pcb) { /* Is there any vector context? */
372 sv = (savearea *)CEntry->vmmVMX_pcb; /* Make useable */
373 sv->save_flags &= ~SAVvmxvalid; /* Clear in use bit */
374 if(!(sv->save_flags & SAVinuse)) { /* Anyone left with this one? */
375 save_release(sv); /* Nope, release it */
376 }
377 }
378
379 mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
380 pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */
381 pmap_destroy(CEntry->vmmPmap); /* Toss the pmap for this context */
382 CEntry->vmmPmap = NULL; /* Clean it up */
383
384 (void) vm_map_unwire( /* Unwire the user comm page */
385 act->map,
386 (vm_offset_t)CEntry->vmmContextUser,
387 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
388 FALSE);
389
390 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
391
392 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
393 CEntry->vmmPmap = 0; /* Clear pmap pointer */
394 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
395 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
396 CEntry->vmmFPU_pcb = 0; /* Clear saved floating point context */
397 CEntry->vmmFPU_cpu = -1; /* Invalidate CPU saved fp context is valid on */
398 CEntry->vmmVMX_pcb = 0; /* Clear saved vector context */
399 CEntry->vmmVMX_cpu = -1; /* Invalidate CPU saved vector context is valid on */
400
401 CTable = act->mact.vmmControl; /* Get the control table address */
402 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */
403 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
404 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
405 return KERN_SUCCESS; /* Leave... */
406 }
407 }
408
409 kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
410 act->mact.vmmControl = 0; /* Unmark us as vmm */
411
412 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
413
414 return KERN_SUCCESS;
415 }
416
417 /*-----------------------------------------------------------------------
418 ** vmm_tear_down_all
419 **
420 ** This function uninitializes all emulation contexts. If there are
421 ** any vmm contexts, it calls vmm_tear_down_context for each one.
422 **
423 ** Note: this can also be called from normal thread termination. Because of
424 ** that, we will context switch out of an alternate if we are currenty in it.
425 ** It will be terminated with no valid return code set because we don't expect
426 ** the activation to ever run again.
427 **
428 ** Inputs:
429 ** activation to tear down
430 **
431 ** Outputs:
432 ** All vmm contexts released and VMM shut down
433 -----------------------------------------------------------------------*/
434 void vmm_tear_down_all(thread_act_t act) {
435
436 vmmCntrlTable *CTable;
437 int cvi;
438 kern_return_t ret;
439 savearea *save;
440 spl_t s;
441
442 if(act->mact.specFlags & runningVM) { /* Are we actually in a context right now? */
443 save = (savearea *)find_user_regs(act); /* Find the user state context */
444 if(!save) { /* Did we find it? */
445 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
446 return;
447 }
448
449 save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */
450 s = splhigh(); /* Make sure interrupts are off */
451 vmm_force_exit(act, save); /* Force and exit from VM state */
452 splx(s); /* Restore interrupts */
453 }
454
455 if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */
456
457 for(cvi = 1; cvi <= kVmmMaxContextsPerThread; cvi++) { /* Look at all slots */
458 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
459 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
460 if(ret != KERN_SUCCESS) { /* Did it go away? */
461 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
462 ret, act, cvi);
463 }
464 }
465 }
466 if(act->mact.vmmControl) { /* Did we find one? */
467 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
468 }
469 }
470
471 return;
472 }
473
474 /*-----------------------------------------------------------------------
475 ** vmm_map_page
476 **
477 ** This function maps a page from within the client's logical
478 ** address space into the alternate address space of the
479 ** Virtual Machine Monitor context.
480 **
481 ** The page need not be locked or resident. If not resident, it will be faulted
482 ** in by this code, which may take some time. Also, if the page is not locked,
483 ** it, and this mapping may disappear at any time, even before it gets used. Note also
484 ** that reference and change information is NOT preserved when a page is unmapped, either
485 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
486 ** space). This means that if RC is needed, the page MUST be wired.
487 **
488 ** Note that if there is already a mapping at the address, it is removed and all
489 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
490 ** if the map call fails, the old address is still unmapped..
491 **
492 ** Inputs:
493 ** act - pointer to current thread activation
494 ** index - index of vmm state for this page
495 ** va - virtual address within the client's address
496 ** space
497 ** ava - virtual address within the alternate address
498 ** space
499 ** prot - protection flags
500 **
501 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
502 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
503 **
504 ** Input conditions:
505 ** Interrupts disabled (from fast trap)
506 **
507 ** Outputs:
508 ** kernel return code indicating success or failure
509 ** if success, va resident and alternate mapping made
510 -----------------------------------------------------------------------*/
511
512 kern_return_t vmm_map_page(
513 thread_act_t act,
514 vmm_thread_index_t index,
515 vm_offset_t cva,
516 vm_offset_t ava,
517 vm_prot_t prot)
518 {
519 kern_return_t ret;
520 vmmCntrlEntry *CEntry;
521 vm_offset_t phys_addr;
522 register mapping *mpv, *mp, *nmpv, *nmp;
523 struct phys_entry *pp;
524 pmap_t mpmap;
525 vm_map_t map;
526
527 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
528 if (CEntry == NULL)return KERN_FAILURE; /* No good, failure... */
529
530 /*
531 * Find out if we have already mapped the address and toss it out if so.
532 */
533 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, ava); /* See if there is already a mapping */
534 if((unsigned int)mp & 1) { /* Did we timeout? */
535 panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava); /* Yeah, scream about it! */
536 return KERN_FAILURE; /* Bad hair day, return FALSE... */
537 }
538 if(mp) { /* If it was there, toss it */
539 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
540 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
541 (void)mapping_remove(CEntry->vmmPmap, ava); /* Throw away the mapping. we're about to replace it */
542 }
543 map = current_act()->map; /* Get the current map */
544
545 while(1) { /* Keep trying until we get it or until we fail */
546 if(hw_cvp_blk(map->pmap, cva)) return KERN_FAILURE; /* Make sure that there is no block map at this address */
547
548 mp = hw_lock_phys_vir(map->pmap->space, cva); /* Lock the physical entry for emulator's page */
549 if((unsigned int)mp&1) { /* Did we timeout? */
550 panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva); /* Yeah, scream about it! */
551 return KERN_FAILURE; /* Bad hair day, return FALSE... */
552 }
553
554 if(mp) { /* We found it... */
555 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
556
557 if(!mpv->physent) return KERN_FAILURE; /* If there is no physical entry (e.g., I/O area), we won't map it */
558
559 if(!(mpv->PTEr & 1)) break; /* If we are writable go ahead and map it... */
560
561 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the map before we try to fault the write bit on */
562 }
563
564 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
565 ret = vm_fault(map, trunc_page(cva), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* Didn't find it, try to fault it in read/write... */
566 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
567 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
568 }
569
570 /*
571 * Now we make a mapping using all of the attributes of the source page except for protection.
572 * Also specify that the physical entry is locked.
573 */
574 nmpv = mapping_make(CEntry->vmmPmap, mpv->physent, (ava & -PAGE_SIZE),
575 (mpv->physent->pte1 & -PAGE_SIZE), prot, ((mpv->physent->pte1 >> 3) & 0xF), 1);
576
577 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry now, we're done with it */
578
579 CEntry->vmmLastMap = ava & -PAGE_SIZE; /* Remember the last mapping we made */
580 CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */
581
582 return KERN_SUCCESS;
583 }
584
585
586 /*-----------------------------------------------------------------------
587 ** vmm_map_execute
588 **
589 ** This function maps a page from within the client's logical
590 ** address space into the alternate address space of the
591 ** Virtual Machine Monitor context and then directly starts executing.
592 **
593 ** See description of vmm_map_page for details.
594 **
595 ** Outputs:
596 ** Normal exit is to run the VM. Abnormal exit is triggered via a
597 ** non-KERN_SUCCESS return from vmm_map_page or later during the
598 ** attempt to transition into the VM.
599 -----------------------------------------------------------------------*/
600
601 vmm_return_code_t vmm_map_execute(
602 thread_act_t act,
603 vmm_thread_index_t index,
604 vm_offset_t cva,
605 vm_offset_t ava,
606 vm_prot_t prot)
607 {
608 kern_return_t ret;
609 vmmCntrlEntry *CEntry;
610
611 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
612
613 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
614
615 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
616
617 if(ret == KERN_SUCCESS) vmm_execute_vm(act, index); /* Return was ok, launch the VM */
618
619 return kVmmInvalidAddress; /* We had trouble mapping in the page */
620
621 }
622
623 /*-----------------------------------------------------------------------
624 ** vmm_get_page_mapping
625 **
626 ** This function determines whether the specified VMM
627 ** virtual address is mapped.
628 **
629 ** Inputs:
630 ** act - pointer to current thread activation
631 ** index - index of vmm state for this page
632 ** va - virtual address within the alternate's address
633 ** space
634 **
635 ** Outputs:
636 ** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
637 **
638 ** Note:
639 ** If there are aliases to the page in the non-alternate address space,
640 ** this call could return the wrong one. Moral of the story: no aliases.
641 -----------------------------------------------------------------------*/
642
643 vm_offset_t vmm_get_page_mapping(
644 thread_act_t act,
645 vmm_thread_index_t index,
646 vm_offset_t va)
647 {
648 vmmCntrlEntry *CEntry;
649 vm_offset_t ova;
650 register mapping *mpv, *mp, *nmpv, *nmp;
651 pmap_t pmap;
652
653 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
654 if (CEntry == NULL)return -1; /* No good, failure... */
655
656 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
657 if((unsigned int)mp & 1) { /* Did we timeout? */
658 panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */
659 return -1; /* Bad hair day, return FALSE... */
660 }
661 if(!mp) return -1; /* Not mapped, return -1 */
662
663 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
664 pmap = current_act()->map->pmap; /* Get the current pmap */
665 ova = -1; /* Assume failure for now */
666
667 for(nmpv = hw_cpv(mpv->physent->phys_link); nmpv; nmpv = hw_cpv(nmpv->next)) { /* Scan 'em all */
668
669 if(nmpv->pmap != pmap) continue; /* Skip all the rest if this is not the right pmap... */
670
671 ova = ((((unsigned int)nmpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
672 ova = ova | ((nmpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */
673 ova = ova | ((nmpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */
674 break; /* We're done now, pass virtual address back */
675 }
676
677 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
678
679 if(ova == -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va); /* We are bad wrong if we can't find it */
680
681 return ova;
682 }
683
684 /*-----------------------------------------------------------------------
685 ** vmm_unmap_page
686 **
687 ** This function unmaps a page from the alternate's logical
688 ** address space.
689 **
690 ** Inputs:
691 ** act - pointer to current thread activation
692 ** index - index of vmm state for this page
693 ** va - virtual address within the vmm's address
694 ** space
695 **
696 ** Outputs:
697 ** kernel return code indicating success or failure
698 -----------------------------------------------------------------------*/
699
700 kern_return_t vmm_unmap_page(
701 thread_act_t act,
702 vmm_thread_index_t index,
703 vm_offset_t va)
704 {
705 vmmCntrlEntry *CEntry;
706 boolean_t ret;
707 kern_return_t kern_result = KERN_SUCCESS;
708
709 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
710 if (CEntry == NULL)return -1; /* No good, failure... */
711
712 ret = mapping_remove(CEntry->vmmPmap, va); /* Toss the mapping */
713
714 return (ret ? KERN_SUCCESS : KERN_FAILURE); /* Return... */
715 }
716
717 /*-----------------------------------------------------------------------
718 ** vmm_unmap_all_pages
719 **
720 ** This function unmaps all pages from the alternates's logical
721 ** address space.
722 **
723 ** Inputs:
724 ** act - pointer to current thread activation
725 ** index - index of context state
726 **
727 ** Outputs:
728 ** none
729 **
730 ** Note:
731 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
732 -----------------------------------------------------------------------*/
733
734 void vmm_unmap_all_pages(
735 thread_act_t act,
736 vmm_thread_index_t index)
737 {
738 vmmCntrlEntry *CEntry;
739
740 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
741 if (CEntry == NULL) return; /* Either this isn't vmm thread or the index is bogus */
742
743 /*
744 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
745 */
746 mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
747 pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */
748 return;
749 }
750
751
752 /*-----------------------------------------------------------------------
753 ** vmm_get_page_dirty_flag
754 **
755 ** This function returns the changed flag of the page
756 ** and optionally clears clears the flag.
757 **
758 ** Inputs:
759 ** act - pointer to current thread activation
760 ** index - index of vmm state for this page
761 ** va - virtual address within the vmm's address
762 ** space
763 ** reset - Clears dirty if true, untouched if not
764 **
765 ** Outputs:
766 ** the dirty bit
767 ** clears the dirty bit in the pte if requested
768 **
769 ** Note:
770 ** The RC bits are merged into the global physical entry
771 -----------------------------------------------------------------------*/
772
773 boolean_t vmm_get_page_dirty_flag(
774 thread_act_t act,
775 vmm_thread_index_t index,
776 vm_offset_t va,
777 unsigned int reset)
778 {
779 vmmCntrlEntry *CEntry;
780 register mapping *mpv, *mp;
781 unsigned int RC;
782
783 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
784 if (CEntry == NULL) return 1; /* Either this isn't vmm thread or the index is bogus */
785
786 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
787 if((unsigned int)mp & 1) { /* Did we timeout? */
788 panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */
789 return 1; /* Bad hair day, return dirty... */
790 }
791 if(!mp) return 1; /* Not mapped, return dirty... */
792
793 RC = hw_test_rc(mp, reset); /* Fetch the RC bits and clear if requested */
794
795 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
796 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
797
798 return (RC & 1); /* Return the change bit */
799 }
800
801
802 /*-----------------------------------------------------------------------
803 ** vmm_protect_page
804 **
805 ** This function sets the protection bits of a mapped page
806 **
807 ** Inputs:
808 ** act - pointer to current thread activation
809 ** index - index of vmm state for this page
810 ** va - virtual address within the vmm's address
811 ** space
812 ** prot - Protection flags
813 **
814 ** Outputs:
815 ** none
816 ** Protection bits of the mapping are modifed
817 **
818 -----------------------------------------------------------------------*/
819
820 kern_return_t vmm_protect_page(
821 thread_act_t act,
822 vmm_thread_index_t index,
823 vm_offset_t va,
824 vm_prot_t prot)
825 {
826 vmmCntrlEntry *CEntry;
827 register mapping *mpv, *mp;
828 unsigned int RC;
829
830 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
831 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
832
833 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
834 if((unsigned int)mp & 1) { /* Did we timeout? */
835 panic("vmm_protect_page: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */
836 return 1; /* Bad hair day, return dirty... */
837 }
838 if(!mp) return KERN_SUCCESS; /* Not mapped, just return... */
839
840 hw_prot_virt(mp, prot); /* Set the protection */
841
842 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
843 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
844
845 CEntry->vmmLastMap = va & -PAGE_SIZE; /* Remember the last mapping we changed */
846 CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */
847
848 return KERN_SUCCESS; /* Return */
849 }
850
851
852 /*-----------------------------------------------------------------------
853 ** vmm_protect_execute
854 **
855 ** This function sets the protection bits of a mapped page
856 ** and then directly starts executing.
857 **
858 ** See description of vmm_protect_page for details.
859 **
860 ** Outputs:
861 ** Normal exit is to run the VM. Abnormal exit is triggered via a
862 ** non-KERN_SUCCESS return from vmm_map_page or later during the
863 ** attempt to transition into the VM.
864 -----------------------------------------------------------------------*/
865
866 vmm_return_code_t vmm_protect_execute(
867 thread_act_t act,
868 vmm_thread_index_t index,
869 vm_offset_t va,
870 vm_prot_t prot)
871 {
872 kern_return_t ret;
873 vmmCntrlEntry *CEntry;
874
875 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
876
877 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
878
879 ret = vmm_protect_page(act, index, va, prot); /* Go try to change access */
880
881 if(ret == KERN_SUCCESS) vmm_execute_vm(act, index); /* Return was ok, launch the VM */
882
883 return kVmmInvalidAddress; /* We had trouble of some kind (shouldn't happen) */
884
885 }
886
887
888 /*-----------------------------------------------------------------------
889 ** vmm_get_float_state
890 **
891 ** This function causes the current floating point state to
892 ** be saved into the shared context area. It also clears the
893 ** vmmFloatCngd changed flag.
894 **
895 ** Inputs:
896 ** act - pointer to current thread activation structure
897 ** index - index returned by vmm_init_context
898 **
899 ** Outputs:
900 ** context saved
901 -----------------------------------------------------------------------*/
902
903 kern_return_t vmm_get_float_state(
904 thread_act_t act,
905 vmm_thread_index_t index)
906 {
907 vmmCntrlEntry *CEntry;
908 vmmCntrlTable *CTable;
909 int i;
910 register struct savearea *sv;
911
912 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
913 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
914
915 act->mact.specFlags &= ~floatCng; /* Clear the special flag */
916 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
917
918 if(sv = (struct savearea *)CEntry->vmmFPU_pcb) { /* Is there context yet? */
919 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[0].d), sizeof(vmm_processor_state_t)); /* 32 registers plus status and pad */
920 return KERN_SUCCESS;
921 }
922
923 CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[0] = 0; /* Clear FPSCR */
924 CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[1] = 0; /* Clear FPSCR */
925
926 for(i = 0; i < 32; i++) { /* Initialize floating points */
927 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
928 }
929
930 return KERN_SUCCESS;
931 }
932
933 /*-----------------------------------------------------------------------
934 ** vmm_get_vector_state
935 **
936 ** This function causes the current vector state to
937 ** be saved into the shared context area. It also clears the
938 ** vmmVectorCngd changed flag.
939 **
940 ** Inputs:
941 ** act - pointer to current thread activation structure
942 ** index - index returned by vmm_init_context
943 **
944 ** Outputs:
945 ** context saved
946 -----------------------------------------------------------------------*/
947
948 kern_return_t vmm_get_vector_state(
949 thread_act_t act,
950 vmm_thread_index_t index)
951 {
952 vmmCntrlEntry *CEntry;
953 vmmCntrlTable *CTable;
954 int i, j;
955 unsigned int vrvalidwrk;
956 register struct savearea *sv;
957
958 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
959 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
960
961 act->mact.specFlags &= ~vectorCng; /* Clear the special flag */
962 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
963
964 if(sv = (savearea *)CEntry->vmmVMX_pcb) { /* Is there context yet? */
965
966 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
967
968 for(j=0; j < 4; j++) { /* Set value for vscr */
969 CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j] = sv->save_vscr[j];
970 }
971
972 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
973 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
974 for(j = 0; j < 4; j++) { /* If so, copy it over */
975 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
976 }
977 }
978 else {
979 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
980 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
981 }
982 }
983
984 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
985
986 }
987
988 return KERN_SUCCESS;
989 }
990
991 for(j = 0; j < 4; j++) { /* Initialize vscr to java mode */
992 CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j] = 0; /* Initial value */
993 }
994
995 for(i = 0; i < 32; i++) { /* Initialize vector registers */
996 for(j=0; j < 4; j++) { /* Do words */
997 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
998 }
999 }
1000
1001 return KERN_SUCCESS;
1002 }
1003
1004 /*-----------------------------------------------------------------------
1005 ** vmm_set_timer
1006 **
1007 ** This function causes a timer (in AbsoluteTime) for a specific time
1008 ** to be set It also clears the vmmTimerPop flag if the timer is actually
1009 ** set, it is cleared otherwise.
1010 **
1011 ** A timer is cleared by setting setting the time to 0. This will clear
1012 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
1013 ** current time clears the internal timer request, but leaves the
1014 ** vmmTimerPop flag set.
1015 **
1016 **
1017 ** Inputs:
1018 ** act - pointer to current thread activation structure
1019 ** index - index returned by vmm_init_context
1020 ** timerhi - high order word of AbsoluteTime to pop
1021 ** timerlo - low order word of AbsoluteTime to pop
1022 **
1023 ** Outputs:
1024 ** timer set, vmmTimerPop cleared
1025 -----------------------------------------------------------------------*/
1026
1027 kern_return_t vmm_set_timer(
1028 thread_act_t act,
1029 vmm_thread_index_t index,
1030 unsigned int timerhi,
1031 unsigned int timerlo)
1032 {
1033 vmmCntrlEntry *CEntry;
1034
1035 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1036 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1037
1038 CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
1039
1040 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
1041 return KERN_SUCCESS; /* Leave now... */
1042 }
1043
1044
1045 /*-----------------------------------------------------------------------
1046 ** vmm_get_timer
1047 **
1048 ** This function causes the timer for a specified VM to be
1049 ** returned in return_params[0] and return_params[1].
1050 **
1051 **
1052 ** Inputs:
1053 ** act - pointer to current thread activation structure
1054 ** index - index returned by vmm_init_context
1055 **
1056 ** Outputs:
1057 ** Timer value set in return_params[0] and return_params[1].
1058 ** Set to 0 if timer is not set.
1059 -----------------------------------------------------------------------*/
1060
1061 kern_return_t vmm_get_timer(
1062 thread_act_t act,
1063 vmm_thread_index_t index)
1064 {
1065 vmmCntrlEntry *CEntry;
1066 vmmCntrlTable *CTable;
1067
1068 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1069 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1070
1071 CEntry->vmmContextKern->return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */
1072 CEntry->vmmContextKern->return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1073
1074 return KERN_SUCCESS;
1075 }
1076
1077
1078
1079 /*-----------------------------------------------------------------------
1080 ** vmm_timer_pop
1081 **
1082 ** This function causes all timers in the array of VMs to be updated.
1083 ** All appropriate flags are set or reset. If a VM is currently
1084 ** running and its timer expired, it is intercepted.
1085 **
1086 ** The qactTimer value is set to the lowest unexpired timer. It is
1087 ** zeroed if all timers are expired or have been reset.
1088 **
1089 ** Inputs:
1090 ** act - pointer to current thread activation structure
1091 **
1092 ** Outputs:
1093 ** timers set, vmmTimerPop cleared or set
1094 -----------------------------------------------------------------------*/
1095
1096 void vmm_timer_pop(
1097 thread_act_t act)
1098 {
1099 vmmCntrlEntry *CEntry;
1100 vmmCntrlTable *CTable;
1101 int cvi, any;
1102 uint64_t now, soonest;
1103 savearea *sv;
1104
1105 if(!((unsigned int)act->mact.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
1106 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
1107 }
1108
1109 soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */
1110
1111 clock_get_uptime(&now); /* What time is it? */
1112
1113 CTable = act->mact.vmmControl; /* Make this easier */
1114 any = 0; /* Haven't found a running unexpired timer yet */
1115
1116 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Cycle through all and check time now */
1117
1118 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
1119
1120 if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */
1121 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
1122 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
1123 continue; /* Check next */
1124 }
1125
1126 if (CTable->vmmc[cvi].vmmTimer <= now) {
1127 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
1128 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
1129 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->mact.vmmCEntry) { /* Is this the running VM? */
1130 sv = (savearea *)find_user_regs(act); /* Get the user state registers */
1131 if(!sv) { /* Did we find something? */
1132 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1133 }
1134 sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */
1135 vmm_force_exit(act, sv); /* Intercept a running VM */
1136 }
1137 continue; /* Check the rest */
1138 }
1139 else { /* It hasn't popped yet */
1140 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1141 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1142 }
1143
1144 any = 1; /* Show we found an active unexpired timer */
1145
1146 if (CTable->vmmc[cvi].vmmTimer < soonest)
1147 soonest = CTable->vmmc[cvi].vmmTimer;
1148 }
1149
1150 if(any) {
1151 if (act->mact.qactTimer == 0 || soonest <= act->mact.qactTimer)
1152 act->mact.qactTimer = soonest; /* Set lowest timer */
1153 }
1154
1155 return;
1156 }
1157
1158
1159
1160 /*-----------------------------------------------------------------------
1161 ** vmm_stop_vm
1162 **
1163 ** This function prevents the specified VM(s) to from running.
1164 ** If any is currently executing, the execution is intercepted
1165 ** with a code of kVmmStopped. Note that execution of the VM is
1166 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
1167 ** This provides the ability for a thread to stop execution of a VM and
1168 ** insure that it will not be run until the emulator has processed the
1169 ** "virtual" interruption.
1170 **
1171 ** Inputs:
1172 ** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1173 ** NOTE: if this mask is all 0s, any executing VM is intercepted with
1174 * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1175 ** note that there is a potential race here and the VM may not stop.
1176 **
1177 ** Outputs:
1178 ** kernel return code indicating success
1179 ** or if no VMs are enabled, an invalid syscall exception.
1180 -----------------------------------------------------------------------*/
1181
1182 int vmm_stop_vm(struct savearea *save)
1183 {
1184
1185 thread_act_t act;
1186 vmmCntrlTable *CTable;
1187 int cvi, i;
1188 task_t task;
1189 thread_act_t fact;
1190 unsigned int vmmask;
1191 ReturnHandler *stopapc;
1192
1193 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1194
1195 task = current_task(); /* Figure out who we are */
1196
1197 task_lock(task); /* Lock our task */
1198
1199 fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */
1200 act = 0; /* Pretend we didn't find it yet */
1201
1202 for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */
1203 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
1204 act = fact; /* Yeah... */
1205 break; /* Bail the loop... */
1206 }
1207 fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */
1208 }
1209
1210 if(!((unsigned int)act)) { /* See if we have VMMs yet */
1211 task_unlock(task); /* No, unlock the task */
1212 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1213 return 0; /* Go generate a syscall exception */
1214 }
1215
1216 act_lock_thread(act); /* Make sure this stays 'round */
1217 task_unlock(task); /* Safe to release now */
1218
1219 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1220
1221 if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */
1222 act_unlock_thread(act); /* Unlock the activation */
1223 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1224 return 0; /* Go generate a syscall exception */
1225 }
1226
1227 if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */
1228 act_unlock_thread(act); /* Unlock the activation */
1229 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1230 save->save_r3 = KERN_SUCCESS; /* Set success */
1231 return 1; /* Return... */
1232 }
1233
1234 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search slots */
1235 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */
1236 hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */
1237 }
1238 vmmask = vmmask << 1; /* Slide mask over */
1239 }
1240
1241 if(hw_compare_and_store(0, 1, &act->mact.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */
1242 act_unlock_thread(act); /* Already one pending, unlock the activation */
1243 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1244 save->save_r3 = KERN_SUCCESS; /* Say we did it... */
1245 return 1; /* Leave */
1246 }
1247
1248 if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */
1249 act->mact.emPendRupts = 0; /* No memory, say we have given up request */
1250 act_unlock_thread(act); /* Unlock the activation */
1251 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1252 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
1253 return 1; /* Return... */
1254 }
1255
1256 ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1257
1258 stopapc->handler = vmm_interrupt; /* Set interruption routine */
1259
1260 stopapc->next = act->handlers; /* Put our interrupt at the start of the list */
1261 act->handlers = stopapc; /* Point to us */
1262
1263 act_set_apc(act); /* Set an APC AST */
1264 ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */
1265
1266 act_unlock_thread(act); /* Unlock the activation */
1267
1268 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1269 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
1270 return 1;
1271 }
1272
1273 /*-----------------------------------------------------------------------
1274 ** vmm_interrupt
1275 **
1276 ** This function is executed asynchronously from an APC AST.
1277 ** It is to be used for anything that needs to interrupt a running VM.
1278 ** This include any kind of interruption generation (other than timer pop)
1279 ** or entering the stopped state.
1280 **
1281 ** Inputs:
1282 ** ReturnHandler *rh - the return handler control block as required by the APC.
1283 ** thread_act_t act - the activation
1284 **
1285 ** Outputs:
1286 ** Whatever needed to be done is done.
1287 -----------------------------------------------------------------------*/
1288
1289 void vmm_interrupt(ReturnHandler *rh, thread_act_t act) {
1290
1291 vmmCntrlTable *CTable;
1292 savearea *sv;
1293 boolean_t inter;
1294
1295
1296
1297 kfree((vm_offset_t)rh, sizeof(ReturnHandler)); /* Release the return handler block */
1298
1299 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1300
1301 act->mact.emPendRupts = 0; /* Say that there are no more interrupts pending */
1302 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1303
1304 if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */
1305
1306 if(act->mact.vmmCEntry && (act->mact.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */
1307 sv = (savearea *)find_user_regs(act); /* Get the user state registers */
1308 if(!sv) { /* Did we find something? */
1309 panic("vmm_interrupt: no user context; act = %08X\n", act);
1310 }
1311 sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */
1312 vmm_force_exit(act, sv); /* Intercept a running VM */
1313 }
1314 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
1315
1316 return;
1317 }