2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /*-----------------------------------------------------------------------
25 ** C routines that we are adding to the MacOS X kernel.
27 ** Weird Apple PSL stuff goes here...
29 ** Until then, Copyright 2000, Connectix
30 -----------------------------------------------------------------------*/
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/host_info.h>
35 #include <kern/kern_types.h>
36 #include <kern/host.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <ppc/exception.h>
40 #include <ppc/mappings.h>
41 #include <ppc/thread_act.h>
42 #include <ppc/pmap_internals.h>
43 #include <vm/vm_kern.h>
45 #include <ppc/vmachmon.h>
47 extern struct Saveanchor saveanchor
; /* Aligned savearea anchor */
48 extern double FloatInit
;
49 extern unsigned long QNaNbarbarian
[4];
51 /*************************************************************************************
52 Virtual Machine Monitor Internal Routines
53 **************************************************************************************/
55 /*-----------------------------------------------------------------------
58 ** This function verifies and return a vmm context entry index
61 ** act - pointer to current thread activation
62 ** index - index into vmm control table (this is a "one based" value)
65 ** address of a vmmCntrlEntry or 0 if not found
66 -----------------------------------------------------------------------*/
68 vmmCntrlEntry
*vmm_get_entry(
70 vmm_thread_index_t index
)
72 vmmCntrlTable
*CTable
;
73 vmmCntrlEntry
*CEntry
;
75 if (act
->mact
.vmmControl
== 0) return NULL
; /* No control table means no vmm */
76 if ((index
- 1) >= kVmmMaxContextsPerThread
) return NULL
; /* Index not in range */
78 CTable
= act
->mact
.vmmControl
; /* Make the address a bit more convienient */
79 CEntry
= &CTable
->vmmc
[index
- 1]; /* Point to the entry */
81 if (!(CEntry
->vmmFlags
& vmmInUse
)) return NULL
; /* See if the slot is actually in use */
88 /*************************************************************************************
89 Virtual Machine Monitor Exported Functionality
91 The following routines are used to implement a quick-switch mechanism for
92 virtual machines that need to execute within their own processor envinroment
93 (including register and MMU state).
94 **************************************************************************************/
96 /*-----------------------------------------------------------------------
99 ** This function returns the current version of the virtual machine
100 ** interface. It is divided into two portions. The top 16 bits
101 ** represent the major version number, and the bottom 16 bits
102 ** represent the minor version number. Clients using the Vmm
103 ** functionality should make sure they are using a verison new
110 ** 32-bit number representing major/minor version of
112 -----------------------------------------------------------------------*/
114 int vmm_get_version(struct savearea
*save
)
116 save
->save_r3
= kVmmCurrentVersion
; /* Return the version */
121 /*-----------------------------------------------------------------------
124 ** This function returns a set of flags that represents the functionality
125 ** supported by the current verison of the Vmm interface. Clients should
126 ** use this to determine whether they can run on this system.
132 ** 32-bit number representing functionality supported by this
133 ** version of the Vmm module
134 -----------------------------------------------------------------------*/
136 int vmm_get_features(struct savearea
*save
)
138 save
->save_r3
= kVmmCurrentFeatures
; /* Return the features */
143 /*-----------------------------------------------------------------------
146 ** This function initializes an emulation context. It allocates
147 ** a new pmap (address space) and fills in the initial processor
148 ** state within the specified structure. The structure, mapped
149 ** into the client's logical address space, must be page-aligned.
152 ** act - pointer to current thread activation
153 ** version - requested version of the Vmm interface (allowing
154 ** future versions of the interface to change, but still
155 ** support older clients)
156 ** vmm_user_state - pointer to a logical page within the
157 ** client's address space
160 ** kernel return code indicating success or failure
161 -----------------------------------------------------------------------*/
163 int vmm_init_context(struct savearea
*save
)
167 vmm_version_t version
;
168 vmm_state_page_t
* vmm_user_state
;
169 vmmCntrlTable
*CTable
;
171 vmm_state_page_t
* vks
;
177 thread_act_t fact
, gact
;
179 vmm_user_state
= (vmm_state_page_t
*)save
->save_r4
; /* Get the user address of the comm area */
180 if ((unsigned int)vmm_user_state
& (PAGE_SIZE
- 1)) { /* Make sure the comm area is page aligned */
181 save
->save_r3
= KERN_FAILURE
; /* Return failure */
185 /* If the client is requesting a newer major version than */
186 /* we currently support, we'll have to fail. In the future, */
187 /* we can add new major versions and support the older ones. */
188 version
= save
->save_r3
; /* Pick up passed in version */
189 if ((version
>> 16) > (kVmmCurrentVersion
>> 16)) {
192 act
= current_act(); /* Pick up our activation */
194 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
196 task
= current_task(); /* Figure out who we are */
198 task_lock(task
); /* Lock our task */
200 fact
= (thread_act_t
)task
->thr_acts
.next
; /* Get the first activation on task */
201 gact
= 0; /* Pretend we didn't find it yet */
203 for(i
= 0; i
< task
->thr_act_count
; i
++) { /* All of the activations */
204 if(fact
->mact
.vmmControl
) { /* Is this a virtual machine monitor? */
205 gact
= fact
; /* Yeah... */
206 break; /* Bail the loop... */
208 fact
= (thread_act_t
)fact
->thr_acts
.next
; /* Go to the next one */
213 * We only allow one thread per task to be a virtual machine monitor right now. This solves
214 * a number of potential problems that I can't put my finger on right now.
216 * Utlimately, I think we want to move the controls and make all this task based instead of
217 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
218 * VM (if they want) rather than hand dispatch contexts.
221 if(gact
&& (gact
!= act
)) { /* Check if another thread is a vmm or trying to be */
222 task_unlock(task
); /* Release task lock */
223 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
224 save
->save_r3
= KERN_FAILURE
; /* We must play alone... */
228 if(!gact
) act
->mact
.vmmControl
= (vmmCntrlTable
*)1; /* Temporarily mark that we are the vmm thread */
230 task_unlock(task
); /* Safe to release now (because we've marked ourselves) */
232 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
233 if ((unsigned int)CTable
== 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
234 if(!(CTable
= (vmmCntrlTable
*)kalloc(sizeof(vmmCntrlTable
)))) { /* Get a fresh emulation control table */
235 act
->mact
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
236 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
237 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No storage... */
241 bzero((void *)CTable
, sizeof(vmmCntrlTable
)); /* Clean it up */
242 act
->mact
.vmmControl
= CTable
; /* Initialize the table anchor */
245 for(cvi
= 0; cvi
< kVmmMaxContextsPerThread
; cvi
++) { /* Search to find a free slot */
246 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) break; /* Bail if we find an unused slot */
249 if(cvi
>= kVmmMaxContextsPerThread
) { /* Did we find one? */
250 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
251 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No empty slots... */
255 ret
= vm_map_wire( /* Wire the virtual machine monitor's context area */
257 (vm_offset_t
)vmm_user_state
,
258 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
259 VM_PROT_READ
| VM_PROT_WRITE
,
262 if (ret
!= KERN_SUCCESS
) /* The wire failed, return the code */
263 goto return_in_shame
;
265 /* Map the vmm state into the kernel's address space. */
266 conphys
= pmap_extract(act
->map
->pmap
, (vm_offset_t
)vmm_user_state
);
268 /* Find a virtual address to use. */
269 ret
= kmem_alloc_pageable(kernel_map
, &conkern
, PAGE_SIZE
);
270 if (ret
!= KERN_SUCCESS
) { /* Did we find an address? */
271 (void) vm_map_unwire(act
->map
, /* No, unwire the context area */
272 (vm_offset_t
)vmm_user_state
,
273 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
275 goto return_in_shame
;
278 /* Map it into the kernel's address space. */
279 pmap_enter(kernel_pmap
, conkern
, conphys
, VM_PROT_READ
| VM_PROT_WRITE
, TRUE
);
281 /* Clear the vmm state structure. */
282 vks
= (vmm_state_page_t
*)conkern
;
283 bzero((char *)vks
, PAGE_SIZE
);
285 /* Allocate a new pmap for the new vmm context. */
286 new_pmap
= pmap_create(0);
287 if (new_pmap
== PMAP_NULL
) {
288 (void) vm_map_unwire(act
->map
, /* Couldn't get a pmap, unwire the user page */
289 (vm_offset_t
)vmm_user_state
,
290 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
293 kmem_free(kernel_map
, conkern
, PAGE_SIZE
); /* Release the kernel address */
294 goto return_in_shame
;
297 /* We're home free now. Simply fill in the necessary info and return. */
299 vks
->interface_version
= version
; /* Set our version code */
300 vks
->thread_index
= cvi
+ 1; /* Tell the user the index for this virtual machine */
302 CTable
->vmmc
[cvi
].vmmFlags
= vmmInUse
; /* Mark the slot in use and make sure the rest are clear */
303 CTable
->vmmc
[cvi
].vmmPmap
= new_pmap
; /* Remember the pmap for this guy */
304 CTable
->vmmc
[cvi
].vmmContextKern
= vks
; /* Remember the kernel address of comm area */
305 CTable
->vmmc
[cvi
].vmmContextUser
= vmm_user_state
; /* Remember user address of comm area */
306 CTable
->vmmc
[cvi
].vmmFPU_pcb
= 0; /* Clear saved floating point context */
307 CTable
->vmmc
[cvi
].vmmFPU_cpu
= -1; /* Invalidate CPU saved fp context is valid on */
308 CTable
->vmmc
[cvi
].vmmVMX_pcb
= 0; /* Clear saved vector context */
309 CTable
->vmmc
[cvi
].vmmVMX_cpu
= -1; /* Invalidate CPU saved vector context is valid on */
311 hw_atomic_add(&saveanchor
.saveneed
, 2); /* Account for the number of extra saveareas we think we might "need" */
313 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
314 save
->save_r3
= KERN_SUCCESS
; /* Hip, hip, horay... */
318 if(!gact
) kfree((vm_offset_t
)CTable
, sizeof(vmmCntrlTable
)); /* Toss the table if we just allocated it */
319 act
->mact
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
320 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
321 save
->save_r3
= ret
; /* Pass back return code... */
327 /*-----------------------------------------------------------------------
328 ** vmm_tear_down_context
330 ** This function uninitializes an emulation context. It deallocates
331 ** internal resources associated with the context block.
334 ** act - pointer to current thread activation structure
335 ** index - index returned by vmm_init_context
338 ** kernel return code indicating success or failure
339 -----------------------------------------------------------------------*/
341 kern_return_t
vmm_tear_down_context(
343 vmm_thread_index_t index
)
345 vmmCntrlEntry
*CEntry
;
346 vmmCntrlTable
*CTable
;
348 register savearea
*sv
;
350 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
351 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
353 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
355 hw_atomic_sub(&saveanchor
.saveneed
, 2); /* We don't need these extra saveareas anymore */
357 if(CEntry
->vmmFPU_pcb
) { /* Is there any floating point context? */
358 sv
= (savearea
*)CEntry
->vmmFPU_pcb
; /* Make useable */
359 sv
->save_flags
&= ~SAVfpuvalid
; /* Clear in use bit */
360 if(!(sv
->save_flags
& SAVinuse
)) { /* Anyone left with this one? */
361 save_release(sv
); /* Nope, release it */
365 if(CEntry
->vmmVMX_pcb
) { /* Is there any vector context? */
366 sv
= (savearea
*)CEntry
->vmmVMX_pcb
; /* Make useable */
367 sv
->save_flags
&= ~SAVvmxvalid
; /* Clear in use bit */
368 if(!(sv
->save_flags
& SAVinuse
)) { /* Anyone left with this one? */
369 save_release(sv
); /* Nope, release it */
373 mapping_remove(CEntry
->vmmPmap
, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
374 pmap_remove(CEntry
->vmmPmap
, 0, 0xFFFFF000); /* Remove all entries from this map */
375 pmap_destroy(CEntry
->vmmPmap
); /* Toss the pmap for this context */
376 CEntry
->vmmPmap
= NULL
; /* Clean it up */
378 (void) vm_map_unwire( /* Unwire the user comm page */
380 (vm_offset_t
)CEntry
->vmmContextUser
,
381 (vm_offset_t
)CEntry
->vmmContextUser
+ PAGE_SIZE
,
384 kmem_free(kernel_map
, (vm_offset_t
)CEntry
->vmmContextKern
, PAGE_SIZE
); /* Remove kernel's view of the comm page */
386 CEntry
->vmmFlags
= 0; /* Clear out all of the flags for this entry including in use */
387 CEntry
->vmmPmap
= 0; /* Clear pmap pointer */
388 CEntry
->vmmContextKern
= 0; /* Clear the kernel address of comm area */
389 CEntry
->vmmContextUser
= 0; /* Clear the user address of comm area */
390 CEntry
->vmmFPU_pcb
= 0; /* Clear saved floating point context */
391 CEntry
->vmmFPU_cpu
= -1; /* Invalidate CPU saved fp context is valid on */
392 CEntry
->vmmVMX_pcb
= 0; /* Clear saved vector context */
393 CEntry
->vmmVMX_cpu
= -1; /* Invalidate CPU saved vector context is valid on */
395 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
396 for(cvi
= 0; cvi
< kVmmMaxContextsPerThread
; cvi
++) { /* Search to find a free slot */
397 if(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
) { /* Return if there are still some in use */
398 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
399 return KERN_SUCCESS
; /* Leave... */
403 kfree((vm_offset_t
)CTable
, sizeof(vmmCntrlTable
)); /* Toss the table because to tossed the last context */
404 act
->mact
.vmmControl
= 0; /* Unmark us as vmm */
406 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
411 /*-----------------------------------------------------------------------
414 ** This function uninitializes all emulation contexts. If there are
415 ** any vmm contexts, it calls vmm_tear_down_context for each one.
417 ** Note: this can also be called from normal thread termination. Because of
418 ** that, we will context switch out of an alternate if we are currenty in it.
419 ** It will be terminated with no valid return code set because we don't expect
420 ** the activation to ever run again.
423 ** activation to tear down
426 ** All vmm contexts released and VMM shut down
427 -----------------------------------------------------------------------*/
428 void vmm_tear_down_all(thread_act_t act
) {
430 vmmCntrlTable
*CTable
;
436 if(act
->mact
.specFlags
& runningVM
) { /* Are we actually in a context right now? */
437 save
= (savearea
*)find_user_regs(act
); /* Find the user state context */
438 if(!save
) { /* Did we find it? */
439 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
443 s
= splhigh(); /* Make sure interrupts are off */
444 vmm_force_exit(act
, save
); /* Force and exit from VM state */
445 splx(s
); /* Restore interrupts */
448 if(CTable
= act
->mact
.vmmControl
) { /* Do we have a vmm control block? */
450 for(cvi
= 1; cvi
<= kVmmMaxContextsPerThread
; cvi
++) { /* Look at all slots */
451 if(CTable
->vmmc
[cvi
- 1].vmmFlags
& vmmInUse
) { /* Is this one in use */
452 ret
= vmm_tear_down_context(act
, cvi
); /* Take down the found context */
453 if(ret
!= KERN_SUCCESS
) { /* Did it go away? */
454 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
459 if(act
->mact
.vmmControl
) { /* Did we find one? */
460 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
467 /*-----------------------------------------------------------------------
470 ** This function maps a page from within the client's logical
471 ** address space into the alternate address space of the
472 ** Virtual Machine Monitor context.
474 ** The page need not be locked or resident. If not resident, it will be faulted
475 ** in by this code, which may take some time. Also, if the page is not locked,
476 ** it, and this mapping may disappear at any time, even before it gets used. Note also
477 ** that reference and change information is NOT preserved when a page is unmapped, either
478 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
479 ** space). This means that if RC is needed, the page MUST be wired.
481 ** Note that if there is already a mapping at the address, it is removed and all
482 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
483 ** if the map call fails, the old address is still unmapped..
486 ** act - pointer to current thread activation
487 ** index - index of vmm state for this page
488 ** va - virtual address within the client's address
489 ** space (must be page aligned)
490 ** ava - virtual address within the alternate address
491 ** space (must be page aligned)
492 ** prot - protection flags
494 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
495 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
498 ** Interrupts disabled (from fast trap)
501 ** kernel return code indicating success or failure
502 ** if success, va resident and alternate mapping made
503 -----------------------------------------------------------------------*/
505 kern_return_t
vmm_map_page(
507 vmm_thread_index_t index
,
513 vmmCntrlEntry
*CEntry
;
514 vm_offset_t phys_addr
;
515 register mapping
*mpv
, *mp
, *nmpv
, *nmp
;
516 struct phys_entry
*pp
;
520 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
521 if (CEntry
== NULL
)return KERN_FAILURE
; /* No good, failure... */
524 * Find out if we have already mapped the address and toss it out if so.
526 mp
= hw_lock_phys_vir(CEntry
->vmmPmap
->space
, ava
); /* See if there is already a mapping */
527 if((unsigned int)mp
& 1) { /* Did we timeout? */
528 panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava
); /* Yeah, scream about it! */
529 return KERN_FAILURE
; /* Bad hair day, return FALSE... */
531 if(mp
) { /* If it was there, toss it */
532 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
533 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
534 (void)mapping_remove(CEntry
->vmmPmap
, ava
); /* Throw away the mapping. we're about to replace it */
536 map
= current_act()->map
; /* Get the current map */
538 while(1) { /* Keep trying until we get it or until we fail */
539 if(hw_cvp_blk(map
->pmap
, cva
)) return KERN_FAILURE
; /* Make sure that there is no block map at this address */
541 mp
= hw_lock_phys_vir(map
->pmap
->space
, cva
); /* Lock the physical entry for emulator's page */
542 if((unsigned int)mp
&1) { /* Did we timeout? */
543 panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva
); /* Yeah, scream about it! */
544 return KERN_FAILURE
; /* Bad hair day, return FALSE... */
547 if(mp
) { /* We found it... */
548 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
549 if(!(mpv
->PTEr
& 1)) break; /* If we are not write protected, we are ok... */
552 ml_set_interrupts_enabled(TRUE
); /* Enable interruptions */
553 ret
= vm_fault(map
, trunc_page(cva
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
); /* Didn't find it, try to fault it in read/write... */
554 ml_set_interrupts_enabled(FALSE
); /* Disable interruptions */
555 if (ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* There isn't a page there, return... */
559 if(!mpv
->physent
) { /* Is this an I/O area, e.g., framebuffer? */
560 return KERN_FAILURE
; /* Yes, we won't map it... */
564 * Now we make a mapping using all of the attributes of the source page except for protection.
565 * Also specify that the physical entry is locked.
567 nmpv
= mapping_make(CEntry
->vmmPmap
, mpv
->physent
, (ava
& -PAGE_SIZE
),
568 (mpv
->physent
->pte1
& -PAGE_SIZE
), prot
, ((mpv
->physent
->pte1
>> 3) & 0xF), 1);
570 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry now, we're done with it */
572 CEntry
->vmmLastMap
= ava
& -PAGE_SIZE
; /* Remember the last mapping we made */
573 CEntry
->vmmFlags
|= vmmMapDone
; /* Set that we did a map operation */
579 /*-----------------------------------------------------------------------
580 ** vmm_get_page_mapping
582 ** This function determines whether the specified VMM
583 ** virtual address is mapped.
586 ** act - pointer to current thread activation
587 ** index - index of vmm state for this page
588 ** va - virtual address within the alternate's address
592 ** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
595 ** If there are aliases to the page in the non-alternate address space,
596 ** this call could return the wrong one. Moral of the story: no aliases.
597 -----------------------------------------------------------------------*/
599 vm_offset_t
vmm_get_page_mapping(
601 vmm_thread_index_t index
,
604 vmmCntrlEntry
*CEntry
;
606 register mapping
*mpv
, *mp
, *nmpv
, *nmp
;
609 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
610 if (CEntry
== NULL
)return -1; /* No good, failure... */
612 mp
= hw_lock_phys_vir(CEntry
->vmmPmap
->space
, va
); /* Look up the mapping */
613 if((unsigned int)mp
& 1) { /* Did we timeout? */
614 panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va
); /* Yeah, scream about it! */
615 return -1; /* Bad hair day, return FALSE... */
617 if(!mp
) return -1; /* Not mapped, return -1 */
619 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
620 pmap
= current_act()->map
->pmap
; /* Get the current pmap */
621 ova
= -1; /* Assume failure for now */
623 for(nmpv
= hw_cpv(mpv
->physent
->phys_link
); nmpv
; nmpv
= hw_cpv(nmpv
->next
)) { /* Scan 'em all */
625 if(nmpv
->pmap
!= pmap
) continue; /* Skip all the rest if this is not the right pmap... */
627 ova
= ((((unsigned int)nmpv
->PTEhash
& -64) << 6) ^ (pmap
->space
<< 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
628 ova
= ova
| ((nmpv
->PTEv
<< 1) & 0xF0000000); /* Move in the segment number */
629 ova
= ova
| ((nmpv
->PTEv
<< 22) & 0x0FC00000); /* Add in the API for the top of the address */
630 break; /* We're done now, pass virtual address back */
633 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
635 if(ova
== -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va
); /* We are bad wrong if we can't find it */
640 /*-----------------------------------------------------------------------
643 ** This function unmaps a page from the alternate's logical
647 ** act - pointer to current thread activation
648 ** index - index of vmm state for this page
649 ** va - virtual address within the vmm's address
653 ** kernel return code indicating success or failure
654 -----------------------------------------------------------------------*/
656 kern_return_t
vmm_unmap_page(
658 vmm_thread_index_t index
,
661 vmmCntrlEntry
*CEntry
;
663 kern_return_t kern_result
= KERN_SUCCESS
;
665 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
666 if (CEntry
== NULL
)return -1; /* No good, failure... */
668 ret
= mapping_remove(CEntry
->vmmPmap
, va
); /* Toss the mapping */
670 return (ret
? KERN_SUCCESS
: KERN_FAILURE
); /* Return... */
673 /*-----------------------------------------------------------------------
674 ** vmm_unmap_all_pages
676 ** This function unmaps all pages from the alternates's logical
680 ** act - pointer to current thread activation
681 ** index - index of context state
687 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
688 -----------------------------------------------------------------------*/
690 void vmm_unmap_all_pages(
692 vmm_thread_index_t index
)
694 vmmCntrlEntry
*CEntry
;
696 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
697 if (CEntry
== NULL
) return; /* Either this isn't vmm thread or the index is bogus */
700 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
702 mapping_remove(CEntry
->vmmPmap
, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
703 pmap_remove(CEntry
->vmmPmap
, 0, 0xFFFFF000); /* Remove all entries from this map */
708 /*-----------------------------------------------------------------------
709 ** vmm_get_page_dirty_flag
711 ** This function returns the changed flag of the page
712 ** and optionally clears clears the flag.
715 ** act - pointer to current thread activation
716 ** index - index of vmm state for this page
717 ** va - virtual address within the vmm's address
719 ** reset - Clears dirty if true, untouched if not
723 ** clears the dirty bit in the pte if requested
726 ** The RC bits are merged into the global physical entry
727 -----------------------------------------------------------------------*/
729 boolean_t
vmm_get_page_dirty_flag(
731 vmm_thread_index_t index
,
735 vmmCntrlEntry
*CEntry
;
736 register mapping
*mpv
, *mp
;
739 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
740 if (CEntry
== NULL
) return 1; /* Either this isn't vmm thread or the index is bogus */
742 mp
= hw_lock_phys_vir(CEntry
->vmmPmap
->space
, va
); /* Look up the mapping */
743 if((unsigned int)mp
& 1) { /* Did we timeout? */
744 panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va
); /* Yeah, scream about it! */
745 return 1; /* Bad hair day, return dirty... */
747 if(!mp
) return 1; /* Not mapped, return dirty... */
749 RC
= hw_test_rc(mp
, reset
); /* Fetch the RC bits and clear if requested */
751 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
752 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
754 return (RC
& 1); /* Return the change bit */
757 /*-----------------------------------------------------------------------
758 ** vmm_get_float_state
760 ** This function causes the current floating point state to
761 ** be saved into the shared context area. It also clears the
762 ** vmmFloatCngd changed flag.
765 ** act - pointer to current thread activation structure
766 ** index - index returned by vmm_init_context
770 -----------------------------------------------------------------------*/
772 kern_return_t
vmm_get_float_state(
774 vmm_thread_index_t index
)
776 vmmCntrlEntry
*CEntry
;
777 vmmCntrlTable
*CTable
;
779 register struct savearea
*sv
;
781 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
782 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
784 act
->mact
.specFlags
&= ~floatCng
; /* Clear the special flag */
785 CEntry
->vmmContextKern
->vmmStat
&= ~vmmFloatCngd
; /* Clear the change indication */
787 if(sv
= (struct savearea
*)CEntry
->vmmFPU_pcb
) { /* Is there context yet? */
788 bcopy((char *)&sv
->save_fp0
, (char *)&(CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
[0].d
), sizeof(vmm_processor_state_t
)); /* 32 registers plus status and pad */
792 CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPSCR
.i
[0] = 0; /* Clear FPSCR */
793 CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPSCR
.i
[1] = 0; /* Clear FPSCR */
795 for(i
= 0; i
< 32; i
++) { /* Initialize floating points */
796 CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
[i
].d
= FloatInit
; /* Initial value */
802 /*-----------------------------------------------------------------------
803 ** vmm_get_vector_state
805 ** This function causes the current vector state to
806 ** be saved into the shared context area. It also clears the
807 ** vmmVectorCngd changed flag.
810 ** act - pointer to current thread activation structure
811 ** index - index returned by vmm_init_context
815 -----------------------------------------------------------------------*/
817 kern_return_t
vmm_get_vector_state(
819 vmm_thread_index_t index
)
821 vmmCntrlEntry
*CEntry
;
822 vmmCntrlTable
*CTable
;
824 unsigned int vrvalidwrk
;
825 register struct savearea
*sv
;
827 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
828 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
830 act
->mact
.specFlags
&= ~vectorCng
; /* Clear the special flag */
831 CEntry
->vmmContextKern
->vmmStat
&= ~vmmVectCngd
; /* Clear the change indication */
833 if(sv
= (savearea
*)CEntry
->vmmVMX_pcb
) { /* Is there context yet? */
835 vrvalidwrk
= sv
->save_vrvalid
; /* Get the valid flags */
837 for(j
=0; j
< 4; j
++) { /* Set value for vscr */
838 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVSCR
.i
[j
] = sv
->save_vscr
[j
];
841 for(i
= 0; i
< 32; i
++) { /* Copy the saved registers and invalidate the others */
842 if(vrvalidwrk
& 0x80000000) { /* Do we have a valid value here? */
843 for(j
= 0; j
< 4; j
++) { /* If so, copy it over */
844 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = ((unsigned int *)&(sv
->save_vr0
))[(i
* 4) + j
];
848 for(j
= 0; j
< 4; j
++) { /* Otherwise set to empty value */
849 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
];
853 vrvalidwrk
= vrvalidwrk
<< 1; /* Shift over to the next */
860 for(j
= 0; j
< 4; j
++) { /* Initialize vscr to java mode */
861 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVSCR
.i
[j
] = 0; /* Initial value */
864 for(i
= 0; i
< 32; i
++) { /* Initialize vector registers */
865 for(j
=0; j
< 4; j
++) { /* Do words */
866 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
]; /* Initial value */
873 /*-----------------------------------------------------------------------
876 ** This function causes a timer (in AbsoluteTime) for a specific time
877 ** to be set It also clears the vmmTimerPop flag if the timer is actually
878 ** set, it is cleared otherwise.
880 ** A timer is cleared by setting setting the time to 0. This will clear
881 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
882 ** current time clears the internal timer request, but leaves the
883 ** vmmTimerPop flag set.
887 ** act - pointer to current thread activation structure
888 ** index - index returned by vmm_init_context
889 ** timerhi - high order word of AbsoluteTime to pop
890 ** timerlo - low order word of AbsoluteTime to pop
893 ** timer set, vmmTimerPop cleared
894 -----------------------------------------------------------------------*/
896 kern_return_t
vmm_set_timer(
898 vmm_thread_index_t index
,
899 unsigned int timerhi
,
900 unsigned int timerlo
)
902 vmmCntrlEntry
*CEntry
;
904 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
905 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
907 CEntry
->vmmTimer
.hi
= timerhi
; /* Set the high order part */
908 CEntry
->vmmTimer
.lo
= timerlo
; /* Set the low order part */
910 vmm_timer_pop(act
); /* Go adjust all of the timer stuff */
911 return KERN_SUCCESS
; /* Leave now... */
915 /*-----------------------------------------------------------------------
918 ** This function causes the timer for a specified VM to be
919 ** returned in return_params[0] and return_params[1].
923 ** act - pointer to current thread activation structure
924 ** index - index returned by vmm_init_context
927 ** Timer value set in return_params[0] and return_params[1].
928 ** Set to 0 if timer is not set.
929 -----------------------------------------------------------------------*/
931 kern_return_t
vmm_get_timer(
933 vmm_thread_index_t index
)
935 vmmCntrlEntry
*CEntry
;
936 vmmCntrlTable
*CTable
;
938 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
939 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
941 CEntry
->vmmContextKern
->return_params
[0] = CEntry
->vmmTimer
.hi
; /* Return the last timer value */
942 CEntry
->vmmContextKern
->return_params
[1] = CEntry
->vmmTimer
.lo
; /* Return the last timer value */
949 /*-----------------------------------------------------------------------
952 ** This function causes all timers in the array of VMs to be updated.
953 ** All appropriate flags are set or reset. If a VM is currently
954 ** running and its timer expired, it is intercepted.
956 ** The qactTimer value is set to the lowest unexpired timer. It is
957 ** zeroed if all timers are expired or have been reset.
960 ** act - pointer to current thread activation structure
963 ** timers set, vmmTimerPop cleared or set
964 -----------------------------------------------------------------------*/
969 vmmCntrlEntry
*CEntry
;
970 vmmCntrlTable
*CTable
;
972 AbsoluteTime now
, soonest
;
975 if(!((unsigned int)act
->mact
.vmmControl
& 0xFFFFFFFE)) { /* Are there any virtual machines? */
976 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act
);
979 soonest
.hi
= 0xFFFFFFFF; /* Max time */
980 soonest
.lo
= 0xFFFFFFFF; /* Max time */
982 clock_get_uptime((AbsoluteTime
*)&now
); /* What time is it? */
984 CTable
= act
->mact
.vmmControl
; /* Make this easier */
985 any
= 0; /* Haven't found a running unexpired timer yet */
987 for(cvi
= 0; cvi
< kVmmMaxContextsPerThread
; cvi
++) { /* Cycle through all and check time now */
989 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) continue; /* Do not check if the entry is empty */
991 if(!(CTable
->vmmc
[cvi
].vmmTimer
.hi
| CTable
->vmmc
[cvi
].vmmTimer
.hi
)) { /* Is the timer reset? */
992 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Clear timer popped */
993 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Clear timer popped */
994 continue; /* Check next */
997 if (CMP_ABSOLUTETIME(&CTable
->vmmc
[cvi
].vmmTimer
, &now
) <= 0) {
998 CTable
->vmmc
[cvi
].vmmFlags
|= vmmTimerPop
; /* Set timer popped here */
999 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
|= vmmTimerPop
; /* Set timer popped here */
1000 if((unsigned int)&CTable
->vmmc
[cvi
] == (unsigned int)act
->mact
.vmmCEntry
) { /* Is this the running VM? */
1001 sv
= (savearea
*)find_user_regs(act
); /* Get the user state registers */
1002 if(!sv
) { /* Did we find something? */
1003 panic("vmm_timer_pop: no user context; act = %08X\n", act
);
1005 sv
->save_exception
= T_IN_VAIN
; /* Indicate that this is a null exception */
1006 vmm_force_exit(act
, sv
); /* Intercept a running VM */
1008 continue; /* Check the rest */
1010 else { /* It hasn't popped yet */
1011 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Set timer not popped here */
1012 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Set timer not popped here */
1015 any
= 1; /* Show we found an active unexpired timer */
1017 if (CMP_ABSOLUTETIME(&CTable
->vmmc
[cvi
].vmmTimer
, &soonest
) < 0) {
1018 soonest
.hi
= CTable
->vmmc
[cvi
].vmmTimer
.hi
; /* Set high order lowest timer */
1019 soonest
.lo
= CTable
->vmmc
[cvi
].vmmTimer
.lo
; /* Set low order lowest timer */
1024 if (!(act
->mact
.qactTimer
.hi
| act
->mact
.qactTimer
.lo
) ||
1025 (CMP_ABSOLUTETIME(&soonest
, &act
->mact
.qactTimer
) <= 0)) {
1026 act
->mact
.qactTimer
.hi
= soonest
.hi
; /* Set high order lowest timer */
1027 act
->mact
.qactTimer
.lo
= soonest
.lo
; /* Set low order lowest timer */