]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/vmachmon.c
xnu-344.12.2.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*-----------------------------------------------------------------------
23 ** vmachmon.c
24 **
25 ** C routines that we are adding to the MacOS X kernel.
26 **
27 ** Weird Apple PSL stuff goes here...
28 **
29 ** Until then, Copyright 2000, Connectix
30 -----------------------------------------------------------------------*/
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/host_info.h>
35 #include <kern/kern_types.h>
36 #include <kern/host.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/thread_act.h>
40 #include <ppc/exception.h>
41 #include <ppc/mappings.h>
42 #include <ppc/thread_act.h>
43 #include <ppc/pmap_internals.h>
44 #include <vm/vm_kern.h>
45
46 #include <ppc/vmachmon.h>
47
48 extern struct Saveanchor saveanchor; /* Aligned savearea anchor */
49 extern double FloatInit;
50 extern unsigned long QNaNbarbarian[4];
51
52 /*************************************************************************************
53 Virtual Machine Monitor Internal Routines
54 **************************************************************************************/
55
56 /*-----------------------------------------------------------------------
57 ** vmm_get_entry
58 **
59 ** This function verifies and return a vmm context entry index
60 **
61 ** Inputs:
62 ** act - pointer to current thread activation
63 ** index - index into vmm control table (this is a "one based" value)
64 **
65 ** Outputs:
66 ** address of a vmmCntrlEntry or 0 if not found
67 -----------------------------------------------------------------------*/
68
69 vmmCntrlEntry *vmm_get_entry(
70 thread_act_t act,
71 vmm_thread_index_t index)
72 {
73 vmmCntrlTable *CTable;
74 vmmCntrlEntry *CEntry;
75
76 if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */
77 if ((index - 1) >= kVmmMaxContextsPerThread) return NULL; /* Index not in range */
78
79 CTable = act->mact.vmmControl; /* Make the address a bit more convienient */
80 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
81
82 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
83
84 return CEntry;
85 }
86
87
88
89 /*************************************************************************************
90 Virtual Machine Monitor Exported Functionality
91
92 The following routines are used to implement a quick-switch mechanism for
93 virtual machines that need to execute within their own processor envinroment
94 (including register and MMU state).
95 **************************************************************************************/
96
97 /*-----------------------------------------------------------------------
98 ** vmm_get_version
99 **
100 ** This function returns the current version of the virtual machine
101 ** interface. It is divided into two portions. The top 16 bits
102 ** represent the major version number, and the bottom 16 bits
103 ** represent the minor version number. Clients using the Vmm
104 ** functionality should make sure they are using a verison new
105 ** enough for them.
106 **
107 ** Inputs:
108 ** none
109 **
110 ** Outputs:
111 ** 32-bit number representing major/minor version of
112 ** the Vmm module
113 -----------------------------------------------------------------------*/
114
115 int vmm_get_version(struct savearea *save)
116 {
117 save->save_r3 = kVmmCurrentVersion; /* Return the version */
118 return 1;
119 }
120
121
122 /*-----------------------------------------------------------------------
123 ** Vmm_get_features
124 **
125 ** This function returns a set of flags that represents the functionality
126 ** supported by the current verison of the Vmm interface. Clients should
127 ** use this to determine whether they can run on this system.
128 **
129 ** Inputs:
130 ** none
131 **
132 ** Outputs:
133 ** 32-bit number representing functionality supported by this
134 ** version of the Vmm module
135 -----------------------------------------------------------------------*/
136
137 int vmm_get_features(struct savearea *save)
138 {
139 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
140 return 1;
141 }
142
143
144 /*-----------------------------------------------------------------------
145 ** vmm_init_context
146 **
147 ** This function initializes an emulation context. It allocates
148 ** a new pmap (address space) and fills in the initial processor
149 ** state within the specified structure. The structure, mapped
150 ** into the client's logical address space, must be page-aligned.
151 **
152 ** Inputs:
153 ** act - pointer to current thread activation
154 ** version - requested version of the Vmm interface (allowing
155 ** future versions of the interface to change, but still
156 ** support older clients)
157 ** vmm_user_state - pointer to a logical page within the
158 ** client's address space
159 **
160 ** Outputs:
161 ** kernel return code indicating success or failure
162 -----------------------------------------------------------------------*/
163
164 int vmm_init_context(struct savearea *save)
165 {
166
167 thread_act_t act;
168 vmm_version_t version;
169 vmm_state_page_t * vmm_user_state;
170 vmmCntrlTable *CTable;
171 vm_offset_t conkern;
172 vmm_state_page_t * vks;
173 vm_offset_t conphys;
174 kern_return_t ret;
175 pmap_t new_pmap;
176 int cvi, i;
177 task_t task;
178 thread_act_t fact, gact;
179
180 vmm_user_state = (vmm_state_page_t *)save->save_r4; /* Get the user address of the comm area */
181 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
182 save->save_r3 = KERN_FAILURE; /* Return failure */
183 return 1;
184 }
185
186 /* Make sure that the version requested is supported */
187 version = save->save_r3; /* Pick up passed in version */
188 if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
189 save->save_r3 = KERN_FAILURE; /* Return failure */
190 return 1;
191 }
192
193 if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */
194 save->save_r3 = KERN_FAILURE; /* Return failure */
195 return 1;
196 }
197
198 act = current_act(); /* Pick up our activation */
199
200 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
201
202 task = current_task(); /* Figure out who we are */
203
204 task_lock(task); /* Lock our task */
205
206 fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */
207 gact = 0; /* Pretend we didn't find it yet */
208
209 for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */
210 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
211 gact = fact; /* Yeah... */
212 break; /* Bail the loop... */
213 }
214 fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */
215 }
216
217
218 /*
219 * We only allow one thread per task to be a virtual machine monitor right now. This solves
220 * a number of potential problems that I can't put my finger on right now.
221 *
222 * Utlimately, I think we want to move the controls and make all this task based instead of
223 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
224 * VM (if they want) rather than hand dispatch contexts.
225 */
226
227 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
228 task_unlock(task); /* Release task lock */
229 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
230 save->save_r3 = KERN_FAILURE; /* We must play alone... */
231 return 1;
232 }
233
234 if(!gact) act->mact.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
235
236 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
237
238 CTable = act->mact.vmmControl; /* Get the control table address */
239 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
240 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
241 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
242 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
243 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
244 return 1;
245 }
246
247 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
248 act->mact.vmmControl = CTable; /* Initialize the table anchor */
249 }
250
251 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */
252 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
253 }
254
255 if(cvi >= kVmmMaxContextsPerThread) { /* Did we find one? */
256 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
257 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
258 return 1;
259 }
260
261 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
262 act->map,
263 (vm_offset_t)vmm_user_state,
264 (vm_offset_t)vmm_user_state + PAGE_SIZE,
265 VM_PROT_READ | VM_PROT_WRITE,
266 FALSE);
267
268 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
269 goto return_in_shame;
270
271 /* Map the vmm state into the kernel's address space. */
272 conphys = pmap_extract(act->map->pmap, (vm_offset_t)vmm_user_state);
273
274 /* Find a virtual address to use. */
275 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
276 if (ret != KERN_SUCCESS) { /* Did we find an address? */
277 (void) vm_map_unwire(act->map, /* No, unwire the context area */
278 (vm_offset_t)vmm_user_state,
279 (vm_offset_t)vmm_user_state + PAGE_SIZE,
280 TRUE);
281 goto return_in_shame;
282 }
283
284 /* Map it into the kernel's address space. */
285 pmap_enter(kernel_pmap, conkern, conphys,
286 VM_PROT_READ | VM_PROT_WRITE,
287 VM_WIMG_USE_DEFAULT, TRUE);
288
289 /* Clear the vmm state structure. */
290 vks = (vmm_state_page_t *)conkern;
291 bzero((char *)vks, PAGE_SIZE);
292
293 /* Allocate a new pmap for the new vmm context. */
294 new_pmap = pmap_create(0);
295 if (new_pmap == PMAP_NULL) {
296 (void) vm_map_unwire(act->map, /* Couldn't get a pmap, unwire the user page */
297 (vm_offset_t)vmm_user_state,
298 (vm_offset_t)vmm_user_state + PAGE_SIZE,
299 TRUE);
300
301 kmem_free(kernel_map, conkern, PAGE_SIZE); /* Release the kernel address */
302 goto return_in_shame;
303 }
304
305 /* We're home free now. Simply fill in the necessary info and return. */
306
307 vks->interface_version = version; /* Set our version code */
308 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
309
310 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
311 CTable->vmmc[cvi].vmmPmap = new_pmap; /* Remember the pmap for this guy */
312 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
313 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
314
315 CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */
316 CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
317 CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
318 CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */
319 CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
320 CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
321 CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */
322
323 hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */
324
325 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
326 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
327 return 1;
328
329 return_in_shame:
330 if(!gact) kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
331 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
332 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
333 save->save_r3 = ret; /* Pass back return code... */
334 return 1;
335
336 }
337
338
339 /*-----------------------------------------------------------------------
340 ** vmm_tear_down_context
341 **
342 ** This function uninitializes an emulation context. It deallocates
343 ** internal resources associated with the context block.
344 **
345 ** Inputs:
346 ** act - pointer to current thread activation structure
347 ** index - index returned by vmm_init_context
348 **
349 ** Outputs:
350 ** kernel return code indicating success or failure
351 -----------------------------------------------------------------------*/
352
353 kern_return_t vmm_tear_down_context(
354 thread_act_t act,
355 vmm_thread_index_t index)
356 {
357 vmmCntrlEntry *CEntry;
358 vmmCntrlTable *CTable;
359 int cvi;
360 register savearea *sv;
361
362 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
363 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
364
365 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
366
367 hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */
368
369 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
370 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
371 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
372 }
373
374 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
375 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
376 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
377 }
378
379 mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
380 pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */
381 pmap_destroy(CEntry->vmmPmap); /* Toss the pmap for this context */
382 CEntry->vmmPmap = NULL; /* Clean it up */
383
384 (void) vm_map_unwire( /* Unwire the user comm page */
385 act->map,
386 (vm_offset_t)CEntry->vmmContextUser,
387 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
388 FALSE);
389
390 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
391
392 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
393 CEntry->vmmPmap = 0; /* Clear pmap pointer */
394 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
395 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
396
397 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
398 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
399 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
400 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
401 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
402 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
403 CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */
404
405 CTable = act->mact.vmmControl; /* Get the control table address */
406 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */
407 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
408 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
409 return KERN_SUCCESS; /* Leave... */
410 }
411 }
412
413 kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
414 act->mact.vmmControl = 0; /* Unmark us as vmm */
415
416 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
417
418 return KERN_SUCCESS;
419 }
420
421 /*-----------------------------------------------------------------------
422 ** vmm_tear_down_all
423 **
424 ** This function uninitializes all emulation contexts. If there are
425 ** any vmm contexts, it calls vmm_tear_down_context for each one.
426 **
427 ** Note: this can also be called from normal thread termination. Because of
428 ** that, we will context switch out of an alternate if we are currenty in it.
429 ** It will be terminated with no valid return code set because we don't expect
430 ** the activation to ever run again.
431 **
432 ** Inputs:
433 ** activation to tear down
434 **
435 ** Outputs:
436 ** All vmm contexts released and VMM shut down
437 -----------------------------------------------------------------------*/
438 void vmm_tear_down_all(thread_act_t act) {
439
440 vmmCntrlTable *CTable;
441 int cvi;
442 kern_return_t ret;
443 savearea *save;
444 spl_t s;
445
446 if(act->mact.specFlags & runningVM) { /* Are we actually in a context right now? */
447 save = find_user_regs(act); /* Find the user state context */
448 if(!save) { /* Did we find it? */
449 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
450 return;
451 }
452
453 save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */
454 s = splhigh(); /* Make sure interrupts are off */
455 vmm_force_exit(act, save); /* Force and exit from VM state */
456 splx(s); /* Restore interrupts */
457 }
458
459 if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */
460
461 for(cvi = 1; cvi <= kVmmMaxContextsPerThread; cvi++) { /* Look at all slots */
462 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
463 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
464 if(ret != KERN_SUCCESS) { /* Did it go away? */
465 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
466 ret, act, cvi);
467 }
468 }
469 }
470 if(act->mact.vmmControl) { /* Did we find one? */
471 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
472 }
473 }
474
475 return;
476 }
477
478 /*-----------------------------------------------------------------------
479 ** vmm_map_page
480 **
481 ** This function maps a page from within the client's logical
482 ** address space into the alternate address space of the
483 ** Virtual Machine Monitor context.
484 **
485 ** The page need not be locked or resident. If not resident, it will be faulted
486 ** in by this code, which may take some time. Also, if the page is not locked,
487 ** it, and this mapping may disappear at any time, even before it gets used. Note also
488 ** that reference and change information is NOT preserved when a page is unmapped, either
489 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
490 ** space). This means that if RC is needed, the page MUST be wired.
491 **
492 ** Note that if there is already a mapping at the address, it is removed and all
493 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
494 ** if the map call fails, the old address is still unmapped..
495 **
496 ** Inputs:
497 ** act - pointer to current thread activation
498 ** index - index of vmm state for this page
499 ** va - virtual address within the client's address
500 ** space
501 ** ava - virtual address within the alternate address
502 ** space
503 ** prot - protection flags
504 **
505 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
506 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
507 **
508 ** Input conditions:
509 ** Interrupts disabled (from fast trap)
510 **
511 ** Outputs:
512 ** kernel return code indicating success or failure
513 ** if success, va resident and alternate mapping made
514 -----------------------------------------------------------------------*/
515
516 kern_return_t vmm_map_page(
517 thread_act_t act,
518 vmm_thread_index_t index,
519 vm_offset_t cva,
520 vm_offset_t ava,
521 vm_prot_t prot)
522 {
523 kern_return_t ret;
524 vmmCntrlEntry *CEntry;
525 vm_offset_t phys_addr;
526 register mapping *mpv, *mp, *nmpv, *nmp;
527 struct phys_entry *pp;
528 pmap_t mpmap;
529 vm_map_t map;
530
531 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
532 if (CEntry == NULL)return KERN_FAILURE; /* No good, failure... */
533
534 /*
535 * Find out if we have already mapped the address and toss it out if so.
536 */
537 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, ava); /* See if there is already a mapping */
538 if((unsigned int)mp & 1) { /* Did we timeout? */
539 panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava); /* Yeah, scream about it! */
540 return KERN_FAILURE; /* Bad hair day, return FALSE... */
541 }
542 if(mp) { /* If it was there, toss it */
543 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
544 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
545 (void)mapping_remove(CEntry->vmmPmap, ava); /* Throw away the mapping. we're about to replace it */
546 }
547 map = current_act()->map; /* Get the current map */
548
549 while(1) { /* Keep trying until we get it or until we fail */
550 if(hw_cvp_blk(map->pmap, cva)) return KERN_FAILURE; /* Make sure that there is no block map at this address */
551
552 mp = hw_lock_phys_vir(map->pmap->space, cva); /* Lock the physical entry for emulator's page */
553 if((unsigned int)mp&1) { /* Did we timeout? */
554 panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva); /* Yeah, scream about it! */
555 return KERN_FAILURE; /* Bad hair day, return FALSE... */
556 }
557
558 if(mp) { /* We found it... */
559 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
560
561 if(!mpv->physent) return KERN_FAILURE; /* If there is no physical entry (e.g., I/O area), we won't map it */
562
563 if(!(mpv->PTEr & 1)) break; /* If we are writable go ahead and map it... */
564
565 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the map before we try to fault the write bit on */
566 }
567
568 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
569 ret = vm_fault(map, trunc_page(cva), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* Didn't find it, try to fault it in read/write... */
570 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
571 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
572 }
573
574 /*
575 * Now we make a mapping using all of the attributes of the source page except for protection.
576 * Also specify that the physical entry is locked.
577 */
578 nmpv = mapping_make(CEntry->vmmPmap, mpv->physent, (ava & -PAGE_SIZE),
579 (mpv->physent->pte1 & -PAGE_SIZE), prot, ((mpv->physent->pte1 >> 3) & 0xF), 1);
580
581 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry now, we're done with it */
582
583 CEntry->vmmLastMap = ava & -PAGE_SIZE; /* Remember the last mapping we made */
584 CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */
585
586 return KERN_SUCCESS;
587 }
588
589
590 /*-----------------------------------------------------------------------
591 ** vmm_map_execute
592 **
593 ** This function maps a page from within the client's logical
594 ** address space into the alternate address space of the
595 ** Virtual Machine Monitor context and then directly starts executing.
596 **
597 ** See description of vmm_map_page for details.
598 **
599 ** Outputs:
600 ** Normal exit is to run the VM. Abnormal exit is triggered via a
601 ** non-KERN_SUCCESS return from vmm_map_page or later during the
602 ** attempt to transition into the VM.
603 -----------------------------------------------------------------------*/
604
605 vmm_return_code_t vmm_map_execute(
606 thread_act_t act,
607 vmm_thread_index_t index,
608 vm_offset_t cva,
609 vm_offset_t ava,
610 vm_prot_t prot)
611 {
612 kern_return_t ret;
613 vmmCntrlEntry *CEntry;
614
615 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
616
617 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
618
619 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
620
621 if(ret == KERN_SUCCESS) vmm_execute_vm(act, index); /* Return was ok, launch the VM */
622
623 return kVmmInvalidAddress; /* We had trouble mapping in the page */
624
625 }
626
627 /*-----------------------------------------------------------------------
628 ** vmm_map_list
629 **
630 ** This function maps a list of pages into the alternate's logical
631 ** address space.
632 **
633 ** Inputs:
634 ** act - pointer to current thread activation
635 ** index - index of vmm state for this page
636 ** count - number of pages to release
637 ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
638 **
639 ** Outputs:
640 ** kernel return code indicating success or failure
641 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
642 ** or the vmm_map_page call fails.
643 -----------------------------------------------------------------------*/
644
645 kern_return_t vmm_map_list(
646 thread_act_t act,
647 vmm_thread_index_t index,
648 unsigned int cnt)
649 {
650 vmmCntrlEntry *CEntry;
651 boolean_t ret;
652 unsigned int i;
653 vmmMapList *lst;
654 vm_offset_t cva;
655 vm_offset_t ava;
656 vm_prot_t prot;
657
658 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
659 if (CEntry == NULL)return -1; /* No good, failure... */
660
661 if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */
662 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
663
664 lst = &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
665
666 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
667 cva = lst[i].vmlva; /* Get the actual address */
668 ava = lst[i].vmlava & -vmlFlgs; /* Get the alternate address */
669 prot = lst[i].vmlava & vmlProt; /* Get the protection bits */
670 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
671 if(ret != KERN_SUCCESS) return KERN_FAILURE; /* Bail if any error */
672 }
673
674 return KERN_SUCCESS ; /* Return... */
675 }
676
677 /*-----------------------------------------------------------------------
678 ** vmm_get_page_mapping
679 **
680 ** This function determines whether the specified VMM
681 ** virtual address is mapped.
682 **
683 ** Inputs:
684 ** act - pointer to current thread activation
685 ** index - index of vmm state for this page
686 ** va - virtual address within the alternate's address
687 ** space
688 **
689 ** Outputs:
690 ** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
691 **
692 ** Note:
693 ** If there are aliases to the page in the non-alternate address space,
694 ** this call could return the wrong one. Moral of the story: no aliases.
695 -----------------------------------------------------------------------*/
696
697 vm_offset_t vmm_get_page_mapping(
698 thread_act_t act,
699 vmm_thread_index_t index,
700 vm_offset_t va)
701 {
702 vmmCntrlEntry *CEntry;
703 vm_offset_t ova;
704 register mapping *mpv, *mp, *nmpv, *nmp;
705 pmap_t pmap;
706
707 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
708 if (CEntry == NULL)return -1; /* No good, failure... */
709
710 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
711 if((unsigned int)mp & 1) { /* Did we timeout? */
712 panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */
713 return -1; /* Bad hair day, return FALSE... */
714 }
715 if(!mp) return -1; /* Not mapped, return -1 */
716
717 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
718 pmap = current_act()->map->pmap; /* Get the current pmap */
719 ova = -1; /* Assume failure for now */
720
721 for(nmpv = hw_cpv(mpv->physent->phys_link); nmpv; nmpv = hw_cpv(nmpv->next)) { /* Scan 'em all */
722
723 if(nmpv->pmap != pmap) continue; /* Skip all the rest if this is not the right pmap... */
724
725 ova = ((((unsigned int)nmpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
726 ova = ova | ((nmpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */
727 ova = ova | ((nmpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */
728 break; /* We're done now, pass virtual address back */
729 }
730
731 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
732
733 if(ova == -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va); /* We are bad wrong if we can't find it */
734
735 return ova;
736 }
737
738 /*-----------------------------------------------------------------------
739 ** vmm_unmap_page
740 **
741 ** This function unmaps a page from the alternate's logical
742 ** address space.
743 **
744 ** Inputs:
745 ** act - pointer to current thread activation
746 ** index - index of vmm state for this page
747 ** va - virtual address within the vmm's address
748 ** space
749 **
750 ** Outputs:
751 ** kernel return code indicating success or failure
752 -----------------------------------------------------------------------*/
753
754 kern_return_t vmm_unmap_page(
755 thread_act_t act,
756 vmm_thread_index_t index,
757 vm_offset_t va)
758 {
759 vmmCntrlEntry *CEntry;
760 boolean_t ret;
761 kern_return_t kern_result = KERN_SUCCESS;
762
763 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
764 if (CEntry == NULL)return -1; /* No good, failure... */
765
766 ret = mapping_remove(CEntry->vmmPmap, va); /* Toss the mapping */
767
768 return (ret ? KERN_SUCCESS : KERN_FAILURE); /* Return... */
769 }
770
771 /*-----------------------------------------------------------------------
772 ** vmm_unmap_list
773 **
774 ** This function unmaps a list of pages from the alternate's logical
775 ** address space.
776 **
777 ** Inputs:
778 ** act - pointer to current thread activation
779 ** index - index of vmm state for this page
780 ** count - number of pages to release
781 ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
782 **
783 ** Outputs:
784 ** kernel return code indicating success or failure
785 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
786 -----------------------------------------------------------------------*/
787
788 kern_return_t vmm_unmap_list(
789 thread_act_t act,
790 vmm_thread_index_t index,
791 unsigned int cnt)
792 {
793 vmmCntrlEntry *CEntry;
794 boolean_t ret;
795 kern_return_t kern_result = KERN_SUCCESS;
796 unsigned int *pgaddr, i;
797
798 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
799 if (CEntry == NULL)return -1; /* No good, failure... */
800
801 if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */
802 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
803
804 pgaddr = &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
805
806 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
807
808 (void)mapping_remove(CEntry->vmmPmap, pgaddr[i]); /* Toss the mapping */
809 }
810
811 return KERN_SUCCESS ; /* Return... */
812 }
813
814 /*-----------------------------------------------------------------------
815 ** vmm_unmap_all_pages
816 **
817 ** This function unmaps all pages from the alternates's logical
818 ** address space.
819 **
820 ** Inputs:
821 ** act - pointer to current thread activation
822 ** index - index of context state
823 **
824 ** Outputs:
825 ** none
826 **
827 ** Note:
828 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
829 -----------------------------------------------------------------------*/
830
831 void vmm_unmap_all_pages(
832 thread_act_t act,
833 vmm_thread_index_t index)
834 {
835 vmmCntrlEntry *CEntry;
836
837 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
838 if (CEntry == NULL) return; /* Either this isn't vmm thread or the index is bogus */
839
840 /*
841 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
842 */
843 mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
844 pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */
845 return;
846 }
847
848
849 /*-----------------------------------------------------------------------
850 ** vmm_get_page_dirty_flag
851 **
852 ** This function returns the changed flag of the page
853 ** and optionally clears clears the flag.
854 **
855 ** Inputs:
856 ** act - pointer to current thread activation
857 ** index - index of vmm state for this page
858 ** va - virtual address within the vmm's address
859 ** space
860 ** reset - Clears dirty if true, untouched if not
861 **
862 ** Outputs:
863 ** the dirty bit
864 ** clears the dirty bit in the pte if requested
865 **
866 ** Note:
867 ** The RC bits are merged into the global physical entry
868 -----------------------------------------------------------------------*/
869
870 boolean_t vmm_get_page_dirty_flag(
871 thread_act_t act,
872 vmm_thread_index_t index,
873 vm_offset_t va,
874 unsigned int reset)
875 {
876 vmmCntrlEntry *CEntry;
877 register mapping *mpv, *mp;
878 unsigned int RC;
879
880 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
881 if (CEntry == NULL) return 1; /* Either this isn't vmm thread or the index is bogus */
882
883 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
884 if((unsigned int)mp & 1) { /* Did we timeout? */
885 panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */
886 return 1; /* Bad hair day, return dirty... */
887 }
888 if(!mp) return 1; /* Not mapped, return dirty... */
889
890 RC = hw_test_rc(mp, reset); /* Fetch the RC bits and clear if requested */
891
892 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
893 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
894
895 return (RC & 1); /* Return the change bit */
896 }
897
898
899 /*-----------------------------------------------------------------------
900 ** vmm_protect_page
901 **
902 ** This function sets the protection bits of a mapped page
903 **
904 ** Inputs:
905 ** act - pointer to current thread activation
906 ** index - index of vmm state for this page
907 ** va - virtual address within the vmm's address
908 ** space
909 ** prot - Protection flags
910 **
911 ** Outputs:
912 ** none
913 ** Protection bits of the mapping are modifed
914 **
915 -----------------------------------------------------------------------*/
916
917 kern_return_t vmm_protect_page(
918 thread_act_t act,
919 vmm_thread_index_t index,
920 vm_offset_t va,
921 vm_prot_t prot)
922 {
923 vmmCntrlEntry *CEntry;
924 register mapping *mpv, *mp;
925 unsigned int RC;
926
927 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
928 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
929
930 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
931 if((unsigned int)mp & 1) { /* Did we timeout? */
932 panic("vmm_protect_page: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */
933 return 1; /* Bad hair day, return dirty... */
934 }
935 if(!mp) return KERN_SUCCESS; /* Not mapped, just return... */
936
937 hw_prot_virt(mp, prot); /* Set the protection */
938
939 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
940 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
941
942 CEntry->vmmLastMap = va & -PAGE_SIZE; /* Remember the last mapping we changed */
943 CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */
944
945 return KERN_SUCCESS; /* Return */
946 }
947
948
949 /*-----------------------------------------------------------------------
950 ** vmm_protect_execute
951 **
952 ** This function sets the protection bits of a mapped page
953 ** and then directly starts executing.
954 **
955 ** See description of vmm_protect_page for details.
956 **
957 ** Outputs:
958 ** Normal exit is to run the VM. Abnormal exit is triggered via a
959 ** non-KERN_SUCCESS return from vmm_map_page or later during the
960 ** attempt to transition into the VM.
961 -----------------------------------------------------------------------*/
962
963 vmm_return_code_t vmm_protect_execute(
964 thread_act_t act,
965 vmm_thread_index_t index,
966 vm_offset_t va,
967 vm_prot_t prot)
968 {
969 kern_return_t ret;
970 vmmCntrlEntry *CEntry;
971
972 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
973
974 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
975
976 ret = vmm_protect_page(act, index, va, prot); /* Go try to change access */
977
978 if(ret == KERN_SUCCESS) vmm_execute_vm(act, index); /* Return was ok, launch the VM */
979
980 return kVmmInvalidAddress; /* We had trouble of some kind (shouldn't happen) */
981
982 }
983
984
985 /*-----------------------------------------------------------------------
986 ** vmm_get_float_state
987 **
988 ** This function causes the current floating point state to
989 ** be saved into the shared context area. It also clears the
990 ** vmmFloatCngd changed flag.
991 **
992 ** Inputs:
993 ** act - pointer to current thread activation structure
994 ** index - index returned by vmm_init_context
995 **
996 ** Outputs:
997 ** context saved
998 -----------------------------------------------------------------------*/
999
1000 kern_return_t vmm_get_float_state(
1001 thread_act_t act,
1002 vmm_thread_index_t index)
1003 {
1004 vmmCntrlEntry *CEntry;
1005 vmmCntrlTable *CTable;
1006 int i;
1007 register struct savearea_fpu *sv;
1008
1009 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1010 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1011
1012 act->mact.specFlags &= ~floatCng; /* Clear the special flag */
1013 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
1014
1015 fpu_save(&CEntry->vmmFacCtx); /* Save context if live */
1016
1017 CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[0] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[0]; /* Copy FPSCR */
1018 CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[1] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[1]; /* Copy FPSCR */
1019
1020 if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */
1021 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */
1022 return KERN_SUCCESS;
1023 }
1024
1025
1026 for(i = 0; i < 32; i++) { /* Initialize floating points */
1027 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
1028 }
1029
1030 return KERN_SUCCESS;
1031 }
1032
1033 /*-----------------------------------------------------------------------
1034 ** vmm_get_vector_state
1035 **
1036 ** This function causes the current vector state to
1037 ** be saved into the shared context area. It also clears the
1038 ** vmmVectorCngd changed flag.
1039 **
1040 ** Inputs:
1041 ** act - pointer to current thread activation structure
1042 ** index - index returned by vmm_init_context
1043 **
1044 ** Outputs:
1045 ** context saved
1046 -----------------------------------------------------------------------*/
1047
1048 kern_return_t vmm_get_vector_state(
1049 thread_act_t act,
1050 vmm_thread_index_t index)
1051 {
1052 vmmCntrlEntry *CEntry;
1053 vmmCntrlTable *CTable;
1054 int i, j;
1055 unsigned int vrvalidwrk;
1056 register struct savearea_vec *sv;
1057
1058 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1059 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1060
1061 vec_save(&CEntry->vmmFacCtx); /* Save context if live */
1062
1063 act->mact.specFlags &= ~vectorCng; /* Clear the special flag */
1064 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
1065
1066 for(j=0; j < 4; j++) { /* Set value for vscr */
1067 CEntry->vmmContextKern->vmm_proc_state.ppcVSCRshadow.i[j] = CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j];
1068 }
1069
1070 if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */
1071
1072 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
1073
1074 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
1075 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
1076 for(j = 0; j < 4; j++) { /* If so, copy it over */
1077 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
1078 }
1079 }
1080 else {
1081 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
1082 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
1083 }
1084 }
1085
1086 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
1087
1088 }
1089
1090 return KERN_SUCCESS;
1091 }
1092
1093 for(i = 0; i < 32; i++) { /* Initialize vector registers */
1094 for(j=0; j < 4; j++) { /* Do words */
1095 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
1096 }
1097 }
1098
1099 return KERN_SUCCESS;
1100 }
1101
1102 /*-----------------------------------------------------------------------
1103 ** vmm_set_timer
1104 **
1105 ** This function causes a timer (in AbsoluteTime) for a specific time
1106 ** to be set It also clears the vmmTimerPop flag if the timer is actually
1107 ** set, it is cleared otherwise.
1108 **
1109 ** A timer is cleared by setting setting the time to 0. This will clear
1110 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
1111 ** current time clears the internal timer request, but leaves the
1112 ** vmmTimerPop flag set.
1113 **
1114 **
1115 ** Inputs:
1116 ** act - pointer to current thread activation structure
1117 ** index - index returned by vmm_init_context
1118 ** timerhi - high order word of AbsoluteTime to pop
1119 ** timerlo - low order word of AbsoluteTime to pop
1120 **
1121 ** Outputs:
1122 ** timer set, vmmTimerPop cleared
1123 -----------------------------------------------------------------------*/
1124
1125 kern_return_t vmm_set_timer(
1126 thread_act_t act,
1127 vmm_thread_index_t index,
1128 unsigned int timerhi,
1129 unsigned int timerlo)
1130 {
1131 vmmCntrlEntry *CEntry;
1132
1133 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1134 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1135
1136 CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
1137
1138 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
1139 return KERN_SUCCESS; /* Leave now... */
1140 }
1141
1142
1143 /*-----------------------------------------------------------------------
1144 ** vmm_get_timer
1145 **
1146 ** This function causes the timer for a specified VM to be
1147 ** returned in return_params[0] and return_params[1].
1148 **
1149 **
1150 ** Inputs:
1151 ** act - pointer to current thread activation structure
1152 ** index - index returned by vmm_init_context
1153 **
1154 ** Outputs:
1155 ** Timer value set in return_params[0] and return_params[1].
1156 ** Set to 0 if timer is not set.
1157 -----------------------------------------------------------------------*/
1158
1159 kern_return_t vmm_get_timer(
1160 thread_act_t act,
1161 vmm_thread_index_t index)
1162 {
1163 vmmCntrlEntry *CEntry;
1164 vmmCntrlTable *CTable;
1165
1166 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1167 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1168
1169 CEntry->vmmContextKern->return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */
1170 CEntry->vmmContextKern->return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1171
1172 return KERN_SUCCESS;
1173 }
1174
1175
1176
1177 /*-----------------------------------------------------------------------
1178 ** vmm_timer_pop
1179 **
1180 ** This function causes all timers in the array of VMs to be updated.
1181 ** All appropriate flags are set or reset. If a VM is currently
1182 ** running and its timer expired, it is intercepted.
1183 **
1184 ** The qactTimer value is set to the lowest unexpired timer. It is
1185 ** zeroed if all timers are expired or have been reset.
1186 **
1187 ** Inputs:
1188 ** act - pointer to current thread activation structure
1189 **
1190 ** Outputs:
1191 ** timers set, vmmTimerPop cleared or set
1192 -----------------------------------------------------------------------*/
1193
1194 void vmm_timer_pop(
1195 thread_act_t act)
1196 {
1197 vmmCntrlEntry *CEntry;
1198 vmmCntrlTable *CTable;
1199 int cvi, any;
1200 uint64_t now, soonest;
1201 savearea *sv;
1202
1203 if(!((unsigned int)act->mact.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
1204 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
1205 }
1206
1207 soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */
1208
1209 clock_get_uptime(&now); /* What time is it? */
1210
1211 CTable = act->mact.vmmControl; /* Make this easier */
1212 any = 0; /* Haven't found a running unexpired timer yet */
1213
1214 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Cycle through all and check time now */
1215
1216 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
1217
1218 if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */
1219 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
1220 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
1221 continue; /* Check next */
1222 }
1223
1224 if (CTable->vmmc[cvi].vmmTimer <= now) {
1225 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
1226 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
1227 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->mact.vmmCEntry) { /* Is this the running VM? */
1228 sv = find_user_regs(act); /* Get the user state registers */
1229 if(!sv) { /* Did we find something? */
1230 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1231 }
1232 sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */
1233 vmm_force_exit(act, sv); /* Intercept a running VM */
1234 }
1235 continue; /* Check the rest */
1236 }
1237 else { /* It hasn't popped yet */
1238 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1239 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1240 }
1241
1242 any = 1; /* Show we found an active unexpired timer */
1243
1244 if (CTable->vmmc[cvi].vmmTimer < soonest)
1245 soonest = CTable->vmmc[cvi].vmmTimer;
1246 }
1247
1248 if(any) {
1249 if (act->mact.qactTimer == 0 || soonest <= act->mact.qactTimer)
1250 act->mact.qactTimer = soonest; /* Set lowest timer */
1251 }
1252
1253 return;
1254 }
1255
1256
1257
1258 /*-----------------------------------------------------------------------
1259 ** vmm_stop_vm
1260 **
1261 ** This function prevents the specified VM(s) to from running.
1262 ** If any is currently executing, the execution is intercepted
1263 ** with a code of kVmmStopped. Note that execution of the VM is
1264 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
1265 ** This provides the ability for a thread to stop execution of a VM and
1266 ** insure that it will not be run until the emulator has processed the
1267 ** "virtual" interruption.
1268 **
1269 ** Inputs:
1270 ** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1271 ** NOTE: if this mask is all 0s, any executing VM is intercepted with
1272 * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1273 ** note that there is a potential race here and the VM may not stop.
1274 **
1275 ** Outputs:
1276 ** kernel return code indicating success
1277 ** or if no VMs are enabled, an invalid syscall exception.
1278 -----------------------------------------------------------------------*/
1279
1280 int vmm_stop_vm(struct savearea *save)
1281 {
1282
1283 thread_act_t act;
1284 vmmCntrlTable *CTable;
1285 int cvi, i;
1286 task_t task;
1287 thread_act_t fact;
1288 unsigned int vmmask;
1289 ReturnHandler *stopapc;
1290
1291 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1292
1293 task = current_task(); /* Figure out who we are */
1294
1295 task_lock(task); /* Lock our task */
1296
1297 fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */
1298 act = 0; /* Pretend we didn't find it yet */
1299
1300 for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */
1301 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
1302 act = fact; /* Yeah... */
1303 break; /* Bail the loop... */
1304 }
1305 fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */
1306 }
1307
1308 if(!((unsigned int)act)) { /* See if we have VMMs yet */
1309 task_unlock(task); /* No, unlock the task */
1310 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1311 return 0; /* Go generate a syscall exception */
1312 }
1313
1314 act_lock_thread(act); /* Make sure this stays 'round */
1315 task_unlock(task); /* Safe to release now */
1316
1317 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1318
1319 if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */
1320 act_unlock_thread(act); /* Unlock the activation */
1321 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1322 return 0; /* Go generate a syscall exception */
1323 }
1324
1325 if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */
1326 act_unlock_thread(act); /* Unlock the activation */
1327 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1328 save->save_r3 = KERN_SUCCESS; /* Set success */
1329 return 1; /* Return... */
1330 }
1331
1332 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search slots */
1333 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */
1334 hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */
1335 }
1336 vmmask = vmmask << 1; /* Slide mask over */
1337 }
1338
1339 if(hw_compare_and_store(0, 1, &act->mact.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */
1340 act_unlock_thread(act); /* Already one pending, unlock the activation */
1341 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1342 save->save_r3 = KERN_SUCCESS; /* Say we did it... */
1343 return 1; /* Leave */
1344 }
1345
1346 if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */
1347 act->mact.emPendRupts = 0; /* No memory, say we have given up request */
1348 act_unlock_thread(act); /* Unlock the activation */
1349 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1350 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
1351 return 1; /* Return... */
1352 }
1353
1354 ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1355
1356 stopapc->handler = vmm_interrupt; /* Set interruption routine */
1357
1358 stopapc->next = act->handlers; /* Put our interrupt at the start of the list */
1359 act->handlers = stopapc; /* Point to us */
1360
1361 act_set_apc(act); /* Set an APC AST */
1362 ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */
1363
1364 act_unlock_thread(act); /* Unlock the activation */
1365
1366 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1367 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
1368 return 1;
1369 }
1370
1371 /*-----------------------------------------------------------------------
1372 ** vmm_interrupt
1373 **
1374 ** This function is executed asynchronously from an APC AST.
1375 ** It is to be used for anything that needs to interrupt a running VM.
1376 ** This include any kind of interruption generation (other than timer pop)
1377 ** or entering the stopped state.
1378 **
1379 ** Inputs:
1380 ** ReturnHandler *rh - the return handler control block as required by the APC.
1381 ** thread_act_t act - the activation
1382 **
1383 ** Outputs:
1384 ** Whatever needed to be done is done.
1385 -----------------------------------------------------------------------*/
1386
1387 void vmm_interrupt(ReturnHandler *rh, thread_act_t act) {
1388
1389 vmmCntrlTable *CTable;
1390 savearea *sv;
1391 boolean_t inter;
1392
1393
1394
1395 kfree((vm_offset_t)rh, sizeof(ReturnHandler)); /* Release the return handler block */
1396
1397 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1398
1399 act->mact.emPendRupts = 0; /* Say that there are no more interrupts pending */
1400 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1401
1402 if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */
1403
1404 if(act->mact.vmmCEntry && (act->mact.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */
1405 sv = find_user_regs(act); /* Get the user state registers */
1406 if(!sv) { /* Did we find something? */
1407 panic("vmm_interrupt: no user context; act = %08X\n", act);
1408 }
1409 sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */
1410 vmm_force_exit(act, sv); /* Intercept a running VM */
1411 }
1412 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
1413
1414 return;
1415 }