]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/vmachmon.c
xnu-124.13.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*-----------------------------------------------------------------------
23 ** vmachmon.c
24 **
25 ** C routines that we are adding to the MacOS X kernel.
26 **
27 ** Weird Apple PSL stuff goes here...
28 **
29 ** Until then, Copyright 2000, Connectix
30 -----------------------------------------------------------------------*/
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/host_info.h>
35 #include <kern/kern_types.h>
36 #include <kern/host.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <ppc/exception.h>
40 #include <ppc/mappings.h>
41 #include <ppc/thread_act.h>
42 #include <ppc/pmap_internals.h>
43 #include <vm/vm_kern.h>
44
45 #include <ppc/vmachmon.h>
46
47 extern struct Saveanchor saveanchor; /* Aligned savearea anchor */
48 extern double FloatInit;
49 extern unsigned long QNaNbarbarian[4];
50
51 /*************************************************************************************
52 Virtual Machine Monitor Internal Routines
53 **************************************************************************************/
54
55 /*-----------------------------------------------------------------------
56 ** vmm_get_entry
57 **
58 ** This function verifies and return a vmm context entry index
59 **
60 ** Inputs:
61 ** act - pointer to current thread activation
62 ** index - index into vmm control table (this is a "one based" value)
63 **
64 ** Outputs:
65 ** address of a vmmCntrlEntry or 0 if not found
66 -----------------------------------------------------------------------*/
67
68 vmmCntrlEntry *vmm_get_entry(
69 thread_act_t act,
70 vmm_thread_index_t index)
71 {
72 vmmCntrlTable *CTable;
73 vmmCntrlEntry *CEntry;
74
75 if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */
76 if ((index - 1) >= kVmmMaxContextsPerThread) return NULL; /* Index not in range */
77
78 CTable = act->mact.vmmControl; /* Make the address a bit more convienient */
79 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
80
81 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
82
83 return CEntry;
84 }
85
86
87
88 /*************************************************************************************
89 Virtual Machine Monitor Exported Functionality
90
91 The following routines are used to implement a quick-switch mechanism for
92 virtual machines that need to execute within their own processor envinroment
93 (including register and MMU state).
94 **************************************************************************************/
95
96 /*-----------------------------------------------------------------------
97 ** vmm_get_version
98 **
99 ** This function returns the current version of the virtual machine
100 ** interface. It is divided into two portions. The top 16 bits
101 ** represent the major version number, and the bottom 16 bits
102 ** represent the minor version number. Clients using the Vmm
103 ** functionality should make sure they are using a verison new
104 ** enough for them.
105 **
106 ** Inputs:
107 ** none
108 **
109 ** Outputs:
110 ** 32-bit number representing major/minor version of
111 ** the Vmm module
112 -----------------------------------------------------------------------*/
113
114 int vmm_get_version(struct savearea *save)
115 {
116 save->save_r3 = kVmmCurrentVersion; /* Return the version */
117 return 1;
118 }
119
120
121 /*-----------------------------------------------------------------------
122 ** Vmm_get_features
123 **
124 ** This function returns a set of flags that represents the functionality
125 ** supported by the current verison of the Vmm interface. Clients should
126 ** use this to determine whether they can run on this system.
127 **
128 ** Inputs:
129 ** none
130 **
131 ** Outputs:
132 ** 32-bit number representing functionality supported by this
133 ** version of the Vmm module
134 -----------------------------------------------------------------------*/
135
136 int vmm_get_features(struct savearea *save)
137 {
138 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
139 return 1;
140 }
141
142
143 /*-----------------------------------------------------------------------
144 ** vmm_init_context
145 **
146 ** This function initializes an emulation context. It allocates
147 ** a new pmap (address space) and fills in the initial processor
148 ** state within the specified structure. The structure, mapped
149 ** into the client's logical address space, must be page-aligned.
150 **
151 ** Inputs:
152 ** act - pointer to current thread activation
153 ** version - requested version of the Vmm interface (allowing
154 ** future versions of the interface to change, but still
155 ** support older clients)
156 ** vmm_user_state - pointer to a logical page within the
157 ** client's address space
158 **
159 ** Outputs:
160 ** kernel return code indicating success or failure
161 -----------------------------------------------------------------------*/
162
163 int vmm_init_context(struct savearea *save)
164 {
165
166 thread_act_t act;
167 vmm_version_t version;
168 vmm_state_page_t * vmm_user_state;
169 vmmCntrlTable *CTable;
170 vm_offset_t conkern;
171 vmm_state_page_t * vks;
172 vm_offset_t conphys;
173 kern_return_t ret;
174 pmap_t new_pmap;
175 int cvi, i;
176 task_t task;
177 thread_act_t fact, gact;
178
179 vmm_user_state = (vmm_state_page_t *)save->save_r4; /* Get the user address of the comm area */
180 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
181 save->save_r3 = KERN_FAILURE; /* Return failure */
182 return 1;
183 }
184
185 /* If the client is requesting a newer major version than */
186 /* we currently support, we'll have to fail. In the future, */
187 /* we can add new major versions and support the older ones. */
188 version = save->save_r3; /* Pick up passed in version */
189 if ((version >> 16) > (kVmmCurrentVersion >> 16)) {
190 }
191
192 act = current_act(); /* Pick up our activation */
193
194 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
195
196 task = current_task(); /* Figure out who we are */
197
198 task_lock(task); /* Lock our task */
199
200 fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */
201 gact = 0; /* Pretend we didn't find it yet */
202
203 for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */
204 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
205 gact = fact; /* Yeah... */
206 break; /* Bail the loop... */
207 }
208 fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */
209 }
210
211
212 /*
213 * We only allow one thread per task to be a virtual machine monitor right now. This solves
214 * a number of potential problems that I can't put my finger on right now.
215 *
216 * Utlimately, I think we want to move the controls and make all this task based instead of
217 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
218 * VM (if they want) rather than hand dispatch contexts.
219 */
220
221 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
222 task_unlock(task); /* Release task lock */
223 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
224 save->save_r3 = KERN_FAILURE; /* We must play alone... */
225 return 1;
226 }
227
228 if(!gact) act->mact.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
229
230 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
231
232 CTable = act->mact.vmmControl; /* Get the control table address */
233 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
234 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
235 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
236 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
237 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
238 return 1;
239 }
240
241 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
242 act->mact.vmmControl = CTable; /* Initialize the table anchor */
243 }
244
245 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */
246 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
247 }
248
249 if(cvi >= kVmmMaxContextsPerThread) { /* Did we find one? */
250 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
251 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
252 return 1;
253 }
254
255 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
256 act->map,
257 (vm_offset_t)vmm_user_state,
258 (vm_offset_t)vmm_user_state + PAGE_SIZE,
259 VM_PROT_READ | VM_PROT_WRITE,
260 FALSE);
261
262 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
263 goto return_in_shame;
264
265 /* Map the vmm state into the kernel's address space. */
266 conphys = pmap_extract(act->map->pmap, (vm_offset_t)vmm_user_state);
267
268 /* Find a virtual address to use. */
269 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
270 if (ret != KERN_SUCCESS) { /* Did we find an address? */
271 (void) vm_map_unwire(act->map, /* No, unwire the context area */
272 (vm_offset_t)vmm_user_state,
273 (vm_offset_t)vmm_user_state + PAGE_SIZE,
274 TRUE);
275 goto return_in_shame;
276 }
277
278 /* Map it into the kernel's address space. */
279 pmap_enter(kernel_pmap, conkern, conphys, VM_PROT_READ | VM_PROT_WRITE, TRUE);
280
281 /* Clear the vmm state structure. */
282 vks = (vmm_state_page_t *)conkern;
283 bzero((char *)vks, PAGE_SIZE);
284
285 /* Allocate a new pmap for the new vmm context. */
286 new_pmap = pmap_create(0);
287 if (new_pmap == PMAP_NULL) {
288 (void) vm_map_unwire(act->map, /* Couldn't get a pmap, unwire the user page */
289 (vm_offset_t)vmm_user_state,
290 (vm_offset_t)vmm_user_state + PAGE_SIZE,
291 TRUE);
292
293 kmem_free(kernel_map, conkern, PAGE_SIZE); /* Release the kernel address */
294 goto return_in_shame;
295 }
296
297 /* We're home free now. Simply fill in the necessary info and return. */
298
299 vks->interface_version = version; /* Set our version code */
300 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
301
302 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
303 CTable->vmmc[cvi].vmmPmap = new_pmap; /* Remember the pmap for this guy */
304 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
305 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
306 CTable->vmmc[cvi].vmmFPU_pcb = 0; /* Clear saved floating point context */
307 CTable->vmmc[cvi].vmmFPU_cpu = -1; /* Invalidate CPU saved fp context is valid on */
308 CTable->vmmc[cvi].vmmVMX_pcb = 0; /* Clear saved vector context */
309 CTable->vmmc[cvi].vmmVMX_cpu = -1; /* Invalidate CPU saved vector context is valid on */
310
311 hw_atomic_add(&saveanchor.saveneed, 2); /* Account for the number of extra saveareas we think we might "need" */
312
313 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
314 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
315 return 1;
316
317 return_in_shame:
318 if(!gact) kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
319 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
320 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
321 save->save_r3 = ret; /* Pass back return code... */
322 return 1;
323
324 }
325
326
327 /*-----------------------------------------------------------------------
328 ** vmm_tear_down_context
329 **
330 ** This function uninitializes an emulation context. It deallocates
331 ** internal resources associated with the context block.
332 **
333 ** Inputs:
334 ** act - pointer to current thread activation structure
335 ** index - index returned by vmm_init_context
336 **
337 ** Outputs:
338 ** kernel return code indicating success or failure
339 -----------------------------------------------------------------------*/
340
341 kern_return_t vmm_tear_down_context(
342 thread_act_t act,
343 vmm_thread_index_t index)
344 {
345 vmmCntrlEntry *CEntry;
346 vmmCntrlTable *CTable;
347 int cvi;
348 register savearea *sv;
349
350 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
351 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
352
353 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
354
355 hw_atomic_sub(&saveanchor.saveneed, 2); /* We don't need these extra saveareas anymore */
356
357 if(CEntry->vmmFPU_pcb) { /* Is there any floating point context? */
358 sv = (savearea *)CEntry->vmmFPU_pcb; /* Make useable */
359 sv->save_flags &= ~SAVfpuvalid; /* Clear in use bit */
360 if(!(sv->save_flags & SAVinuse)) { /* Anyone left with this one? */
361 save_release(sv); /* Nope, release it */
362 }
363 }
364
365 if(CEntry->vmmVMX_pcb) { /* Is there any vector context? */
366 sv = (savearea *)CEntry->vmmVMX_pcb; /* Make useable */
367 sv->save_flags &= ~SAVvmxvalid; /* Clear in use bit */
368 if(!(sv->save_flags & SAVinuse)) { /* Anyone left with this one? */
369 save_release(sv); /* Nope, release it */
370 }
371 }
372
373 mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
374 pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */
375 pmap_destroy(CEntry->vmmPmap); /* Toss the pmap for this context */
376 CEntry->vmmPmap = NULL; /* Clean it up */
377
378 (void) vm_map_unwire( /* Unwire the user comm page */
379 act->map,
380 (vm_offset_t)CEntry->vmmContextUser,
381 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
382 FALSE);
383
384 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
385
386 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
387 CEntry->vmmPmap = 0; /* Clear pmap pointer */
388 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
389 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
390 CEntry->vmmFPU_pcb = 0; /* Clear saved floating point context */
391 CEntry->vmmFPU_cpu = -1; /* Invalidate CPU saved fp context is valid on */
392 CEntry->vmmVMX_pcb = 0; /* Clear saved vector context */
393 CEntry->vmmVMX_cpu = -1; /* Invalidate CPU saved vector context is valid on */
394
395 CTable = act->mact.vmmControl; /* Get the control table address */
396 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */
397 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
398 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
399 return KERN_SUCCESS; /* Leave... */
400 }
401 }
402
403 kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
404 act->mact.vmmControl = 0; /* Unmark us as vmm */
405
406 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
407
408 return KERN_SUCCESS;
409 }
410
411 /*-----------------------------------------------------------------------
412 ** vmm_tear_down_all
413 **
414 ** This function uninitializes all emulation contexts. If there are
415 ** any vmm contexts, it calls vmm_tear_down_context for each one.
416 **
417 ** Note: this can also be called from normal thread termination. Because of
418 ** that, we will context switch out of an alternate if we are currenty in it.
419 ** It will be terminated with no valid return code set because we don't expect
420 ** the activation to ever run again.
421 **
422 ** Inputs:
423 ** activation to tear down
424 **
425 ** Outputs:
426 ** All vmm contexts released and VMM shut down
427 -----------------------------------------------------------------------*/
428 void vmm_tear_down_all(thread_act_t act) {
429
430 vmmCntrlTable *CTable;
431 int cvi;
432 kern_return_t ret;
433 savearea *save;
434 spl_t s;
435
436 if(act->mact.specFlags & runningVM) { /* Are we actually in a context right now? */
437 save = (savearea *)find_user_regs(act); /* Find the user state context */
438 if(!save) { /* Did we find it? */
439 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
440 return;
441 }
442
443 s = splhigh(); /* Make sure interrupts are off */
444 vmm_force_exit(act, save); /* Force and exit from VM state */
445 splx(s); /* Restore interrupts */
446 }
447
448 if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */
449
450 for(cvi = 1; cvi <= kVmmMaxContextsPerThread; cvi++) { /* Look at all slots */
451 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
452 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
453 if(ret != KERN_SUCCESS) { /* Did it go away? */
454 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
455 ret, act, cvi);
456 }
457 }
458 }
459 if(act->mact.vmmControl) { /* Did we find one? */
460 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
461 }
462 }
463
464 return;
465 }
466
467 /*-----------------------------------------------------------------------
468 ** vmm_map_page
469 **
470 ** This function maps a page from within the client's logical
471 ** address space into the alternate address space of the
472 ** Virtual Machine Monitor context.
473 **
474 ** The page need not be locked or resident. If not resident, it will be faulted
475 ** in by this code, which may take some time. Also, if the page is not locked,
476 ** it, and this mapping may disappear at any time, even before it gets used. Note also
477 ** that reference and change information is NOT preserved when a page is unmapped, either
478 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
479 ** space). This means that if RC is needed, the page MUST be wired.
480 **
481 ** Note that if there is already a mapping at the address, it is removed and all
482 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
483 ** if the map call fails, the old address is still unmapped..
484 **
485 ** Inputs:
486 ** act - pointer to current thread activation
487 ** index - index of vmm state for this page
488 ** va - virtual address within the client's address
489 ** space (must be page aligned)
490 ** ava - virtual address within the alternate address
491 ** space (must be page aligned)
492 ** prot - protection flags
493 **
494 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
495 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
496 **
497 ** Input conditions:
498 ** Interrupts disabled (from fast trap)
499 **
500 ** Outputs:
501 ** kernel return code indicating success or failure
502 ** if success, va resident and alternate mapping made
503 -----------------------------------------------------------------------*/
504
505 kern_return_t vmm_map_page(
506 thread_act_t act,
507 vmm_thread_index_t index,
508 vm_offset_t cva,
509 vm_offset_t ava,
510 vm_prot_t prot)
511 {
512 kern_return_t ret;
513 vmmCntrlEntry *CEntry;
514 vm_offset_t phys_addr;
515 register mapping *mpv, *mp, *nmpv, *nmp;
516 struct phys_entry *pp;
517 pmap_t mpmap;
518 vm_map_t map;
519
520 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
521 if (CEntry == NULL)return KERN_FAILURE; /* No good, failure... */
522
523 /*
524 * Find out if we have already mapped the address and toss it out if so.
525 */
526 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, ava); /* See if there is already a mapping */
527 if((unsigned int)mp & 1) { /* Did we timeout? */
528 panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava); /* Yeah, scream about it! */
529 return KERN_FAILURE; /* Bad hair day, return FALSE... */
530 }
531 if(mp) { /* If it was there, toss it */
532 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
533 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
534 (void)mapping_remove(CEntry->vmmPmap, ava); /* Throw away the mapping. we're about to replace it */
535 }
536 map = current_act()->map; /* Get the current map */
537
538 while(1) { /* Keep trying until we get it or until we fail */
539 if(hw_cvp_blk(map->pmap, cva)) return KERN_FAILURE; /* Make sure that there is no block map at this address */
540
541 mp = hw_lock_phys_vir(map->pmap->space, cva); /* Lock the physical entry for emulator's page */
542 if((unsigned int)mp&1) { /* Did we timeout? */
543 panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva); /* Yeah, scream about it! */
544 return KERN_FAILURE; /* Bad hair day, return FALSE... */
545 }
546
547 if(mp) { /* We found it... */
548 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
549 if(!(mpv->PTEr & 1)) break; /* If we are not write protected, we are ok... */
550 }
551
552 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
553 ret = vm_fault(map, trunc_page(cva), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* Didn't find it, try to fault it in read/write... */
554 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
555 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
556 }
557
558
559 if(!mpv->physent) { /* Is this an I/O area, e.g., framebuffer? */
560 return KERN_FAILURE; /* Yes, we won't map it... */
561 }
562
563 /*
564 * Now we make a mapping using all of the attributes of the source page except for protection.
565 * Also specify that the physical entry is locked.
566 */
567 nmpv = mapping_make(CEntry->vmmPmap, mpv->physent, (ava & -PAGE_SIZE),
568 (mpv->physent->pte1 & -PAGE_SIZE), prot, ((mpv->physent->pte1 >> 3) & 0xF), 1);
569
570 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry now, we're done with it */
571
572 CEntry->vmmLastMap = ava & -PAGE_SIZE; /* Remember the last mapping we made */
573 CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */
574
575 return KERN_SUCCESS;
576 }
577
578
579 /*-----------------------------------------------------------------------
580 ** vmm_get_page_mapping
581 **
582 ** This function determines whether the specified VMM
583 ** virtual address is mapped.
584 **
585 ** Inputs:
586 ** act - pointer to current thread activation
587 ** index - index of vmm state for this page
588 ** va - virtual address within the alternate's address
589 ** space
590 **
591 ** Outputs:
592 ** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
593 **
594 ** Note:
595 ** If there are aliases to the page in the non-alternate address space,
596 ** this call could return the wrong one. Moral of the story: no aliases.
597 -----------------------------------------------------------------------*/
598
599 vm_offset_t vmm_get_page_mapping(
600 thread_act_t act,
601 vmm_thread_index_t index,
602 vm_offset_t va)
603 {
604 vmmCntrlEntry *CEntry;
605 vm_offset_t ova;
606 register mapping *mpv, *mp, *nmpv, *nmp;
607 pmap_t pmap;
608
609 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
610 if (CEntry == NULL)return -1; /* No good, failure... */
611
612 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
613 if((unsigned int)mp & 1) { /* Did we timeout? */
614 panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */
615 return -1; /* Bad hair day, return FALSE... */
616 }
617 if(!mp) return -1; /* Not mapped, return -1 */
618
619 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
620 pmap = current_act()->map->pmap; /* Get the current pmap */
621 ova = -1; /* Assume failure for now */
622
623 for(nmpv = hw_cpv(mpv->physent->phys_link); nmpv; nmpv = hw_cpv(nmpv->next)) { /* Scan 'em all */
624
625 if(nmpv->pmap != pmap) continue; /* Skip all the rest if this is not the right pmap... */
626
627 ova = ((((unsigned int)nmpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
628 ova = ova | ((nmpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */
629 ova = ova | ((nmpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */
630 break; /* We're done now, pass virtual address back */
631 }
632
633 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
634
635 if(ova == -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va); /* We are bad wrong if we can't find it */
636
637 return ova;
638 }
639
640 /*-----------------------------------------------------------------------
641 ** vmm_unmap_page
642 **
643 ** This function unmaps a page from the alternate's logical
644 ** address space.
645 **
646 ** Inputs:
647 ** act - pointer to current thread activation
648 ** index - index of vmm state for this page
649 ** va - virtual address within the vmm's address
650 ** space
651 **
652 ** Outputs:
653 ** kernel return code indicating success or failure
654 -----------------------------------------------------------------------*/
655
656 kern_return_t vmm_unmap_page(
657 thread_act_t act,
658 vmm_thread_index_t index,
659 vm_offset_t va)
660 {
661 vmmCntrlEntry *CEntry;
662 boolean_t ret;
663 kern_return_t kern_result = KERN_SUCCESS;
664
665 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
666 if (CEntry == NULL)return -1; /* No good, failure... */
667
668 ret = mapping_remove(CEntry->vmmPmap, va); /* Toss the mapping */
669
670 return (ret ? KERN_SUCCESS : KERN_FAILURE); /* Return... */
671 }
672
673 /*-----------------------------------------------------------------------
674 ** vmm_unmap_all_pages
675 **
676 ** This function unmaps all pages from the alternates's logical
677 ** address space.
678 **
679 ** Inputs:
680 ** act - pointer to current thread activation
681 ** index - index of context state
682 **
683 ** Outputs:
684 ** none
685 **
686 ** Note:
687 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
688 -----------------------------------------------------------------------*/
689
690 void vmm_unmap_all_pages(
691 thread_act_t act,
692 vmm_thread_index_t index)
693 {
694 vmmCntrlEntry *CEntry;
695
696 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
697 if (CEntry == NULL) return; /* Either this isn't vmm thread or the index is bogus */
698
699 /*
700 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
701 */
702 mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
703 pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */
704 return;
705 }
706
707
708 /*-----------------------------------------------------------------------
709 ** vmm_get_page_dirty_flag
710 **
711 ** This function returns the changed flag of the page
712 ** and optionally clears clears the flag.
713 **
714 ** Inputs:
715 ** act - pointer to current thread activation
716 ** index - index of vmm state for this page
717 ** va - virtual address within the vmm's address
718 ** space
719 ** reset - Clears dirty if true, untouched if not
720 **
721 ** Outputs:
722 ** the dirty bit
723 ** clears the dirty bit in the pte if requested
724 **
725 ** Note:
726 ** The RC bits are merged into the global physical entry
727 -----------------------------------------------------------------------*/
728
729 boolean_t vmm_get_page_dirty_flag(
730 thread_act_t act,
731 vmm_thread_index_t index,
732 vm_offset_t va,
733 unsigned int reset)
734 {
735 vmmCntrlEntry *CEntry;
736 register mapping *mpv, *mp;
737 unsigned int RC;
738
739 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
740 if (CEntry == NULL) return 1; /* Either this isn't vmm thread or the index is bogus */
741
742 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
743 if((unsigned int)mp & 1) { /* Did we timeout? */
744 panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */
745 return 1; /* Bad hair day, return dirty... */
746 }
747 if(!mp) return 1; /* Not mapped, return dirty... */
748
749 RC = hw_test_rc(mp, reset); /* Fetch the RC bits and clear if requested */
750
751 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
752 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
753
754 return (RC & 1); /* Return the change bit */
755 }
756
757 /*-----------------------------------------------------------------------
758 ** vmm_get_float_state
759 **
760 ** This function causes the current floating point state to
761 ** be saved into the shared context area. It also clears the
762 ** vmmFloatCngd changed flag.
763 **
764 ** Inputs:
765 ** act - pointer to current thread activation structure
766 ** index - index returned by vmm_init_context
767 **
768 ** Outputs:
769 ** context saved
770 -----------------------------------------------------------------------*/
771
772 kern_return_t vmm_get_float_state(
773 thread_act_t act,
774 vmm_thread_index_t index)
775 {
776 vmmCntrlEntry *CEntry;
777 vmmCntrlTable *CTable;
778 int i;
779 register struct savearea *sv;
780
781 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
782 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
783
784 act->mact.specFlags &= ~floatCng; /* Clear the special flag */
785 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
786
787 if(sv = (struct savearea *)CEntry->vmmFPU_pcb) { /* Is there context yet? */
788 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[0].d), sizeof(vmm_processor_state_t)); /* 32 registers plus status and pad */
789 return KERN_SUCCESS;
790 }
791
792 CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[0] = 0; /* Clear FPSCR */
793 CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[1] = 0; /* Clear FPSCR */
794
795 for(i = 0; i < 32; i++) { /* Initialize floating points */
796 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
797 }
798
799 return KERN_SUCCESS;
800 }
801
802 /*-----------------------------------------------------------------------
803 ** vmm_get_vector_state
804 **
805 ** This function causes the current vector state to
806 ** be saved into the shared context area. It also clears the
807 ** vmmVectorCngd changed flag.
808 **
809 ** Inputs:
810 ** act - pointer to current thread activation structure
811 ** index - index returned by vmm_init_context
812 **
813 ** Outputs:
814 ** context saved
815 -----------------------------------------------------------------------*/
816
817 kern_return_t vmm_get_vector_state(
818 thread_act_t act,
819 vmm_thread_index_t index)
820 {
821 vmmCntrlEntry *CEntry;
822 vmmCntrlTable *CTable;
823 int i, j;
824 unsigned int vrvalidwrk;
825 register struct savearea *sv;
826
827 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
828 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
829
830 act->mact.specFlags &= ~vectorCng; /* Clear the special flag */
831 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
832
833 if(sv = (savearea *)CEntry->vmmVMX_pcb) { /* Is there context yet? */
834
835 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
836
837 for(j=0; j < 4; j++) { /* Set value for vscr */
838 CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j] = sv->save_vscr[j];
839 }
840
841 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
842 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
843 for(j = 0; j < 4; j++) { /* If so, copy it over */
844 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
845 }
846 }
847 else {
848 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
849 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
850 }
851 }
852
853 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
854
855 }
856
857 return KERN_SUCCESS;
858 }
859
860 for(j = 0; j < 4; j++) { /* Initialize vscr to java mode */
861 CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j] = 0; /* Initial value */
862 }
863
864 for(i = 0; i < 32; i++) { /* Initialize vector registers */
865 for(j=0; j < 4; j++) { /* Do words */
866 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
867 }
868 }
869
870 return KERN_SUCCESS;
871 }
872
873 /*-----------------------------------------------------------------------
874 ** vmm_set_timer
875 **
876 ** This function causes a timer (in AbsoluteTime) for a specific time
877 ** to be set It also clears the vmmTimerPop flag if the timer is actually
878 ** set, it is cleared otherwise.
879 **
880 ** A timer is cleared by setting setting the time to 0. This will clear
881 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
882 ** current time clears the internal timer request, but leaves the
883 ** vmmTimerPop flag set.
884 **
885 **
886 ** Inputs:
887 ** act - pointer to current thread activation structure
888 ** index - index returned by vmm_init_context
889 ** timerhi - high order word of AbsoluteTime to pop
890 ** timerlo - low order word of AbsoluteTime to pop
891 **
892 ** Outputs:
893 ** timer set, vmmTimerPop cleared
894 -----------------------------------------------------------------------*/
895
896 kern_return_t vmm_set_timer(
897 thread_act_t act,
898 vmm_thread_index_t index,
899 unsigned int timerhi,
900 unsigned int timerlo)
901 {
902 vmmCntrlEntry *CEntry;
903
904 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
905 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
906
907 CEntry->vmmTimer.hi = timerhi; /* Set the high order part */
908 CEntry->vmmTimer.lo = timerlo; /* Set the low order part */
909
910 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
911 return KERN_SUCCESS; /* Leave now... */
912 }
913
914
915 /*-----------------------------------------------------------------------
916 ** vmm_get_timer
917 **
918 ** This function causes the timer for a specified VM to be
919 ** returned in return_params[0] and return_params[1].
920 **
921 **
922 ** Inputs:
923 ** act - pointer to current thread activation structure
924 ** index - index returned by vmm_init_context
925 **
926 ** Outputs:
927 ** Timer value set in return_params[0] and return_params[1].
928 ** Set to 0 if timer is not set.
929 -----------------------------------------------------------------------*/
930
931 kern_return_t vmm_get_timer(
932 thread_act_t act,
933 vmm_thread_index_t index)
934 {
935 vmmCntrlEntry *CEntry;
936 vmmCntrlTable *CTable;
937
938 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
939 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
940
941 CEntry->vmmContextKern->return_params[0] = CEntry->vmmTimer.hi; /* Return the last timer value */
942 CEntry->vmmContextKern->return_params[1] = CEntry->vmmTimer.lo; /* Return the last timer value */
943
944 return KERN_SUCCESS;
945 }
946
947
948
949 /*-----------------------------------------------------------------------
950 ** vmm_timer_pop
951 **
952 ** This function causes all timers in the array of VMs to be updated.
953 ** All appropriate flags are set or reset. If a VM is currently
954 ** running and its timer expired, it is intercepted.
955 **
956 ** The qactTimer value is set to the lowest unexpired timer. It is
957 ** zeroed if all timers are expired or have been reset.
958 **
959 ** Inputs:
960 ** act - pointer to current thread activation structure
961 **
962 ** Outputs:
963 ** timers set, vmmTimerPop cleared or set
964 -----------------------------------------------------------------------*/
965
966 void vmm_timer_pop(
967 thread_act_t act)
968 {
969 vmmCntrlEntry *CEntry;
970 vmmCntrlTable *CTable;
971 int cvi, any;
972 AbsoluteTime now, soonest;
973 savearea *sv;
974
975 if(!((unsigned int)act->mact.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
976 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
977 }
978
979 soonest.hi = 0xFFFFFFFF; /* Max time */
980 soonest.lo = 0xFFFFFFFF; /* Max time */
981
982 clock_get_uptime((AbsoluteTime *)&now); /* What time is it? */
983
984 CTable = act->mact.vmmControl; /* Make this easier */
985 any = 0; /* Haven't found a running unexpired timer yet */
986
987 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Cycle through all and check time now */
988
989 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
990
991 if(!(CTable->vmmc[cvi].vmmTimer.hi | CTable->vmmc[cvi].vmmTimer.hi)) { /* Is the timer reset? */
992 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
993 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
994 continue; /* Check next */
995 }
996
997 if (CMP_ABSOLUTETIME(&CTable->vmmc[cvi].vmmTimer, &now) <= 0) {
998 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
999 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
1000 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->mact.vmmCEntry) { /* Is this the running VM? */
1001 sv = (savearea *)find_user_regs(act); /* Get the user state registers */
1002 if(!sv) { /* Did we find something? */
1003 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1004 }
1005 sv->save_exception = T_IN_VAIN; /* Indicate that this is a null exception */
1006 vmm_force_exit(act, sv); /* Intercept a running VM */
1007 }
1008 continue; /* Check the rest */
1009 }
1010 else { /* It hasn't popped yet */
1011 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1012 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1013 }
1014
1015 any = 1; /* Show we found an active unexpired timer */
1016
1017 if (CMP_ABSOLUTETIME(&CTable->vmmc[cvi].vmmTimer, &soonest) < 0) {
1018 soonest.hi = CTable->vmmc[cvi].vmmTimer.hi; /* Set high order lowest timer */
1019 soonest.lo = CTable->vmmc[cvi].vmmTimer.lo; /* Set low order lowest timer */
1020 }
1021 }
1022
1023 if(any) {
1024 if (!(act->mact.qactTimer.hi | act->mact.qactTimer.lo) ||
1025 (CMP_ABSOLUTETIME(&soonest, &act->mact.qactTimer) <= 0)) {
1026 act->mact.qactTimer.hi = soonest.hi; /* Set high order lowest timer */
1027 act->mact.qactTimer.lo = soonest.lo; /* Set low order lowest timer */
1028 }
1029 }
1030
1031 return;
1032 }