2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 /*-----------------------------------------------------------------------
28 ** C routines that we are adding to the MacOS X kernel.
30 ** Weird Apple PSL stuff goes here...
32 ** Until then, Copyright 2000, Connectix
33 -----------------------------------------------------------------------*/
35 #include <mach/mach_types.h>
36 #include <mach/kern_return.h>
37 #include <mach/host_info.h>
38 #include <kern/kern_types.h>
39 #include <kern/host.h>
40 #include <kern/task.h>
41 #include <kern/thread.h>
42 #include <kern/thread_act.h>
43 #include <ppc/exception.h>
44 #include <ppc/mappings.h>
45 #include <ppc/thread_act.h>
46 #include <ppc/pmap_internals.h>
47 #include <vm/vm_kern.h>
49 #include <ppc/vmachmon.h>
51 extern struct Saveanchor saveanchor
; /* Aligned savearea anchor */
52 extern double FloatInit
;
53 extern unsigned long QNaNbarbarian
[4];
55 /*************************************************************************************
56 Virtual Machine Monitor Internal Routines
57 **************************************************************************************/
59 /*-----------------------------------------------------------------------
62 ** This function verifies and return a vmm context entry index
65 ** act - pointer to current thread activation
66 ** index - index into vmm control table (this is a "one based" value)
69 ** address of a vmmCntrlEntry or 0 if not found
70 -----------------------------------------------------------------------*/
72 vmmCntrlEntry
*vmm_get_entry(
74 vmm_thread_index_t index
)
76 vmmCntrlTable
*CTable
;
77 vmmCntrlEntry
*CEntry
;
79 if (act
->mact
.vmmControl
== 0) return NULL
; /* No control table means no vmm */
80 if ((index
- 1) >= kVmmMaxContextsPerThread
) return NULL
; /* Index not in range */
82 CTable
= act
->mact
.vmmControl
; /* Make the address a bit more convienient */
83 CEntry
= &CTable
->vmmc
[index
- 1]; /* Point to the entry */
85 if (!(CEntry
->vmmFlags
& vmmInUse
)) return NULL
; /* See if the slot is actually in use */
92 /*************************************************************************************
93 Virtual Machine Monitor Exported Functionality
95 The following routines are used to implement a quick-switch mechanism for
96 virtual machines that need to execute within their own processor envinroment
97 (including register and MMU state).
98 **************************************************************************************/
100 /*-----------------------------------------------------------------------
103 ** This function returns the current version of the virtual machine
104 ** interface. It is divided into two portions. The top 16 bits
105 ** represent the major version number, and the bottom 16 bits
106 ** represent the minor version number. Clients using the Vmm
107 ** functionality should make sure they are using a verison new
114 ** 32-bit number representing major/minor version of
116 -----------------------------------------------------------------------*/
118 int vmm_get_version(struct savearea
*save
)
120 save
->save_r3
= kVmmCurrentVersion
; /* Return the version */
125 /*-----------------------------------------------------------------------
128 ** This function returns a set of flags that represents the functionality
129 ** supported by the current verison of the Vmm interface. Clients should
130 ** use this to determine whether they can run on this system.
136 ** 32-bit number representing functionality supported by this
137 ** version of the Vmm module
138 -----------------------------------------------------------------------*/
140 int vmm_get_features(struct savearea
*save
)
142 save
->save_r3
= kVmmCurrentFeatures
; /* Return the features */
147 /*-----------------------------------------------------------------------
150 ** This function initializes an emulation context. It allocates
151 ** a new pmap (address space) and fills in the initial processor
152 ** state within the specified structure. The structure, mapped
153 ** into the client's logical address space, must be page-aligned.
156 ** act - pointer to current thread activation
157 ** version - requested version of the Vmm interface (allowing
158 ** future versions of the interface to change, but still
159 ** support older clients)
160 ** vmm_user_state - pointer to a logical page within the
161 ** client's address space
164 ** kernel return code indicating success or failure
165 -----------------------------------------------------------------------*/
167 int vmm_init_context(struct savearea
*save
)
171 vmm_version_t version
;
172 vmm_state_page_t
* vmm_user_state
;
173 vmmCntrlTable
*CTable
;
175 vmm_state_page_t
* vks
;
181 thread_act_t fact
, gact
;
183 vmm_user_state
= (vmm_state_page_t
*)save
->save_r4
; /* Get the user address of the comm area */
184 if ((unsigned int)vmm_user_state
& (PAGE_SIZE
- 1)) { /* Make sure the comm area is page aligned */
185 save
->save_r3
= KERN_FAILURE
; /* Return failure */
189 /* Make sure that the version requested is supported */
190 version
= save
->save_r3
; /* Pick up passed in version */
191 if (((version
>> 16) < kVmmMinMajorVersion
) || ((version
>> 16) > (kVmmCurrentVersion
>> 16))) {
192 save
->save_r3
= KERN_FAILURE
; /* Return failure */
196 if((version
& 0xFFFF) > kVmmCurMinorVersion
) { /* Check for valid minor */
197 save
->save_r3
= KERN_FAILURE
; /* Return failure */
201 act
= current_act(); /* Pick up our activation */
203 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
205 task
= current_task(); /* Figure out who we are */
207 task_lock(task
); /* Lock our task */
209 fact
= (thread_act_t
)task
->thr_acts
.next
; /* Get the first activation on task */
210 gact
= 0; /* Pretend we didn't find it yet */
212 for(i
= 0; i
< task
->thr_act_count
; i
++) { /* All of the activations */
213 if(fact
->mact
.vmmControl
) { /* Is this a virtual machine monitor? */
214 gact
= fact
; /* Yeah... */
215 break; /* Bail the loop... */
217 fact
= (thread_act_t
)fact
->thr_acts
.next
; /* Go to the next one */
222 * We only allow one thread per task to be a virtual machine monitor right now. This solves
223 * a number of potential problems that I can't put my finger on right now.
225 * Utlimately, I think we want to move the controls and make all this task based instead of
226 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
227 * VM (if they want) rather than hand dispatch contexts.
230 if(gact
&& (gact
!= act
)) { /* Check if another thread is a vmm or trying to be */
231 task_unlock(task
); /* Release task lock */
232 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
233 save
->save_r3
= KERN_FAILURE
; /* We must play alone... */
237 if(!gact
) act
->mact
.vmmControl
= (vmmCntrlTable
*)1; /* Temporarily mark that we are the vmm thread */
239 task_unlock(task
); /* Safe to release now (because we've marked ourselves) */
241 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
242 if ((unsigned int)CTable
== 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
243 if(!(CTable
= (vmmCntrlTable
*)kalloc(sizeof(vmmCntrlTable
)))) { /* Get a fresh emulation control table */
244 act
->mact
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
245 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
246 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No storage... */
250 bzero((void *)CTable
, sizeof(vmmCntrlTable
)); /* Clean it up */
251 act
->mact
.vmmControl
= CTable
; /* Initialize the table anchor */
254 for(cvi
= 0; cvi
< kVmmMaxContextsPerThread
; cvi
++) { /* Search to find a free slot */
255 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) break; /* Bail if we find an unused slot */
258 if(cvi
>= kVmmMaxContextsPerThread
) { /* Did we find one? */
259 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
260 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No empty slots... */
264 ret
= vm_map_wire( /* Wire the virtual machine monitor's context area */
266 (vm_offset_t
)vmm_user_state
,
267 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
268 VM_PROT_READ
| VM_PROT_WRITE
,
271 if (ret
!= KERN_SUCCESS
) /* The wire failed, return the code */
272 goto return_in_shame
;
274 /* Map the vmm state into the kernel's address space. */
275 conphys
= pmap_extract(act
->map
->pmap
, (vm_offset_t
)vmm_user_state
);
277 /* Find a virtual address to use. */
278 ret
= kmem_alloc_pageable(kernel_map
, &conkern
, PAGE_SIZE
);
279 if (ret
!= KERN_SUCCESS
) { /* Did we find an address? */
280 (void) vm_map_unwire(act
->map
, /* No, unwire the context area */
281 (vm_offset_t
)vmm_user_state
,
282 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
284 goto return_in_shame
;
287 /* Map it into the kernel's address space. */
288 pmap_enter(kernel_pmap
, conkern
, conphys
,
289 VM_PROT_READ
| VM_PROT_WRITE
,
290 VM_WIMG_USE_DEFAULT
, TRUE
);
292 /* Clear the vmm state structure. */
293 vks
= (vmm_state_page_t
*)conkern
;
294 bzero((char *)vks
, PAGE_SIZE
);
296 /* Allocate a new pmap for the new vmm context. */
297 new_pmap
= pmap_create(0);
298 if (new_pmap
== PMAP_NULL
) {
299 (void) vm_map_unwire(act
->map
, /* Couldn't get a pmap, unwire the user page */
300 (vm_offset_t
)vmm_user_state
,
301 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
304 kmem_free(kernel_map
, conkern
, PAGE_SIZE
); /* Release the kernel address */
305 goto return_in_shame
;
308 /* We're home free now. Simply fill in the necessary info and return. */
310 vks
->interface_version
= version
; /* Set our version code */
311 vks
->thread_index
= cvi
+ 1; /* Tell the user the index for this virtual machine */
313 CTable
->vmmc
[cvi
].vmmFlags
= vmmInUse
; /* Mark the slot in use and make sure the rest are clear */
314 CTable
->vmmc
[cvi
].vmmPmap
= new_pmap
; /* Remember the pmap for this guy */
315 CTable
->vmmc
[cvi
].vmmContextKern
= vks
; /* Remember the kernel address of comm area */
316 CTable
->vmmc
[cvi
].vmmContextPhys
= (vmm_state_page_t
*)conphys
; /* Remember the state page physical addr */
317 CTable
->vmmc
[cvi
].vmmContextUser
= vmm_user_state
; /* Remember user address of comm area */
319 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
320 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
321 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
322 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
323 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
324 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
325 CTable
->vmmc
[cvi
].vmmFacCtx
.facAct
= act
; /* Point back to the activation */
327 hw_atomic_add((int *)&saveanchor
.savetarget
, 2); /* Account for the number of extra saveareas we think we might "need" */
329 if (!(act
->map
->pmap
->vflags
& pmapVMhost
)) {
330 simple_lock(&(act
->map
->pmap
->lock
));
331 act
->map
->pmap
->vflags
|= pmapVMhost
;
332 simple_unlock(&(act
->map
->pmap
->lock
));
335 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
336 save
->save_r3
= KERN_SUCCESS
; /* Hip, hip, horay... */
340 if(!gact
) kfree((vm_offset_t
)CTable
, sizeof(vmmCntrlTable
)); /* Toss the table if we just allocated it */
341 act
->mact
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
342 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
343 save
->save_r3
= ret
; /* Pass back return code... */
349 /*-----------------------------------------------------------------------
350 ** vmm_tear_down_context
352 ** This function uninitializes an emulation context. It deallocates
353 ** internal resources associated with the context block.
356 ** act - pointer to current thread activation structure
357 ** index - index returned by vmm_init_context
360 ** kernel return code indicating success or failure
361 -----------------------------------------------------------------------*/
363 kern_return_t
vmm_tear_down_context(
365 vmm_thread_index_t index
)
367 vmmCntrlEntry
*CEntry
;
368 vmmCntrlTable
*CTable
;
370 register savearea
*sv
;
372 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
373 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
375 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
377 hw_atomic_sub((int *)&saveanchor
.savetarget
, 2); /* We don't need these extra saveareas anymore */
379 if(CEntry
->vmmFacCtx
.FPUsave
) { /* Is there any floating point context? */
380 toss_live_fpu(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
381 save_release((savearea
*)CEntry
->vmmFacCtx
.FPUsave
); /* Release it */
384 if(CEntry
->vmmFacCtx
.VMXsave
) { /* Is there any vector context? */
385 toss_live_vec(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
386 save_release((savearea
*)CEntry
->vmmFacCtx
.VMXsave
); /* Release it */
389 mapping_remove(CEntry
->vmmPmap
, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
390 pmap_remove(CEntry
->vmmPmap
, 0, 0xFFFFF000); /* Remove all entries from this map */
391 pmap_destroy(CEntry
->vmmPmap
); /* Toss the pmap for this context */
392 CEntry
->vmmPmap
= NULL
; /* Clean it up */
394 (void) vm_map_unwire( /* Unwire the user comm page */
396 (vm_offset_t
)CEntry
->vmmContextUser
,
397 (vm_offset_t
)CEntry
->vmmContextUser
+ PAGE_SIZE
,
400 kmem_free(kernel_map
, (vm_offset_t
)CEntry
->vmmContextKern
, PAGE_SIZE
); /* Remove kernel's view of the comm page */
402 CEntry
->vmmFlags
= 0; /* Clear out all of the flags for this entry including in use */
403 CEntry
->vmmPmap
= 0; /* Clear pmap pointer */
404 CEntry
->vmmContextKern
= 0; /* Clear the kernel address of comm area */
405 CEntry
->vmmContextUser
= 0; /* Clear the user address of comm area */
407 CEntry
->vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
408 CEntry
->vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
409 CEntry
->vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
410 CEntry
->vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
411 CEntry
->vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
412 CEntry
->vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
413 CEntry
->vmmFacCtx
.facAct
= 0; /* Clear facility context control */
415 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
416 for(cvi
= 0; cvi
< kVmmMaxContextsPerThread
; cvi
++) { /* Search to find a free slot */
417 if(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
) { /* Return if there are still some in use */
418 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
419 return KERN_SUCCESS
; /* Leave... */
423 kfree((vm_offset_t
)CTable
, sizeof(vmmCntrlTable
)); /* Toss the table because to tossed the last context */
424 act
->mact
.vmmControl
= 0; /* Unmark us as vmm */
426 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
431 /*-----------------------------------------------------------------------
434 ** This function uninitializes all emulation contexts. If there are
435 ** any vmm contexts, it calls vmm_tear_down_context for each one.
437 ** Note: this can also be called from normal thread termination. Because of
438 ** that, we will context switch out of an alternate if we are currenty in it.
439 ** It will be terminated with no valid return code set because we don't expect
440 ** the activation to ever run again.
443 ** activation to tear down
446 ** All vmm contexts released and VMM shut down
447 -----------------------------------------------------------------------*/
448 void vmm_tear_down_all(thread_act_t act
) {
450 vmmCntrlTable
*CTable
;
456 if(act
->mact
.specFlags
& runningVM
) { /* Are we actually in a context right now? */
457 save
= find_user_regs(act
); /* Find the user state context */
458 if(!save
) { /* Did we find it? */
459 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
463 save
->save_exception
= kVmmBogusContext
*4; /* Indicate that this context is bogus now */
464 s
= splhigh(); /* Make sure interrupts are off */
465 vmm_force_exit(act
, save
); /* Force and exit from VM state */
466 splx(s
); /* Restore interrupts */
469 if(CTable
= act
->mact
.vmmControl
) { /* Do we have a vmm control block? */
471 for(cvi
= 1; cvi
<= kVmmMaxContextsPerThread
; cvi
++) { /* Look at all slots */
472 if(CTable
->vmmc
[cvi
- 1].vmmFlags
& vmmInUse
) { /* Is this one in use */
473 ret
= vmm_tear_down_context(act
, cvi
); /* Take down the found context */
474 if(ret
!= KERN_SUCCESS
) { /* Did it go away? */
475 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
480 if(act
->mact
.vmmControl
) { /* Did we find one? */
481 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
488 /*-----------------------------------------------------------------------
491 ** This function maps a page from within the client's logical
492 ** address space into the alternate address space of the
493 ** Virtual Machine Monitor context.
495 ** The page need not be locked or resident. If not resident, it will be faulted
496 ** in by this code, which may take some time. Also, if the page is not locked,
497 ** it, and this mapping may disappear at any time, even before it gets used. Note also
498 ** that reference and change information is NOT preserved when a page is unmapped, either
499 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
500 ** space). This means that if RC is needed, the page MUST be wired.
502 ** Note that if there is already a mapping at the address, it is removed and all
503 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
504 ** if the map call fails, the old address is still unmapped..
507 ** act - pointer to current thread activation
508 ** index - index of vmm state for this page
509 ** va - virtual address within the client's address
511 ** ava - virtual address within the alternate address
513 ** prot - protection flags
515 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
516 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
519 ** Interrupts disabled (from fast trap)
522 ** kernel return code indicating success or failure
523 ** if success, va resident and alternate mapping made
524 -----------------------------------------------------------------------*/
526 kern_return_t
vmm_map_page(
528 vmm_thread_index_t index
,
534 vmmCntrlEntry
*CEntry
;
535 vm_offset_t phys_addr
;
536 register mapping
*mpv
, *mp
, *nmpv
, *nmp
;
537 struct phys_entry
*pp
;
541 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
542 if (CEntry
== NULL
)return KERN_FAILURE
; /* No good, failure... */
545 * Find out if we have already mapped the address and toss it out if so.
547 mp
= hw_lock_phys_vir(CEntry
->vmmPmap
->space
, ava
); /* See if there is already a mapping */
548 if((unsigned int)mp
& 1) { /* Did we timeout? */
549 panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava
); /* Yeah, scream about it! */
550 return KERN_FAILURE
; /* Bad hair day, return FALSE... */
552 if(mp
) { /* If it was there, toss it */
553 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
554 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
555 (void)mapping_remove(CEntry
->vmmPmap
, ava
); /* Throw away the mapping. we're about to replace it */
557 map
= current_act()->map
; /* Get the current map */
559 while(1) { /* Keep trying until we get it or until we fail */
560 if(hw_cvp_blk(map
->pmap
, cva
)) return KERN_FAILURE
; /* Make sure that there is no block map at this address */
562 mp
= hw_lock_phys_vir(map
->pmap
->space
, cva
); /* Lock the physical entry for emulator's page */
563 if((unsigned int)mp
&1) { /* Did we timeout? */
564 panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva
); /* Yeah, scream about it! */
565 return KERN_FAILURE
; /* Bad hair day, return FALSE... */
568 if(mp
) { /* We found it... */
569 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
571 if(!mpv
->physent
) return KERN_FAILURE
; /* If there is no physical entry (e.g., I/O area), we won't map it */
573 if(!(mpv
->PTEr
& 1)) break; /* If we are writable go ahead and map it... */
575 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the map before we try to fault the write bit on */
578 ml_set_interrupts_enabled(TRUE
); /* Enable interruptions */
579 ret
= vm_fault(map
, trunc_page(cva
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
, NULL
, 0); /* Didn't find it, try to fault it in read/write... */
580 ml_set_interrupts_enabled(FALSE
); /* Disable interruptions */
581 if (ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* There isn't a page there, return... */
585 * Now we make a mapping using all of the attributes of the source page except for protection.
586 * Also specify that the physical entry is locked.
588 nmpv
= mapping_make(CEntry
->vmmPmap
, mpv
->physent
, (ava
& -PAGE_SIZE
),
589 (mpv
->physent
->pte1
& -PAGE_SIZE
), prot
, ((mpv
->physent
->pte1
>> 3) & 0xF), 1);
591 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* Unlock the physical entry now, we're done with it */
593 CEntry
->vmmLastMap
= ava
& -PAGE_SIZE
; /* Remember the last mapping we made */
594 if (!((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
))
595 CEntry
->vmmFlags
|= vmmMapDone
; /* Set that we did a map operation */
601 /*-----------------------------------------------------------------------
604 ** This function maps a page from within the client's logical
605 ** address space into the alternate address space of the
606 ** Virtual Machine Monitor context and then directly starts executing.
608 ** See description of vmm_map_page for details.
611 ** Normal exit is to run the VM. Abnormal exit is triggered via a
612 ** non-KERN_SUCCESS return from vmm_map_page or later during the
613 ** attempt to transition into the VM.
614 -----------------------------------------------------------------------*/
616 vmm_return_code_t
vmm_map_execute(
618 vmm_thread_index_t index
,
624 vmmCntrlEntry
*CEntry
;
626 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
628 if (CEntry
== NULL
) return kVmmBogusContext
; /* Return bogus context */
630 if (((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
) && (CEntry
!= act
->mact
.vmmCEntry
))
631 return kVmmBogusContext
; /* Yes, invalid index in Fam */
633 ret
= vmm_map_page(act
, index
, cva
, ava
, prot
); /* Go try to map the page on in */
635 if(ret
== KERN_SUCCESS
) {
636 CEntry
->vmmFlags
|= vmmMapDone
; /* Set that we did a map operation */
637 vmm_execute_vm(act
, index
); /* Return was ok, launch the VM */
640 return kVmmInvalidAddress
; /* We had trouble mapping in the page */
644 /*-----------------------------------------------------------------------
647 ** This function maps a list of pages into the alternate's logical
651 ** act - pointer to current thread activation
652 ** index - index of vmm state for this page
653 ** count - number of pages to release
654 ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
657 ** kernel return code indicating success or failure
658 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
659 ** or the vmm_map_page call fails.
660 -----------------------------------------------------------------------*/
662 kern_return_t
vmm_map_list(
664 vmm_thread_index_t index
,
667 vmmCntrlEntry
*CEntry
;
675 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
676 if (CEntry
== NULL
)return -1; /* No good, failure... */
678 if(cnt
> kVmmMaxMapPages
) return KERN_FAILURE
; /* They tried to map too many */
679 if(!cnt
) return KERN_SUCCESS
; /* If they said none, we're done... */
681 lst
= (vmmMapList
*)(&((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]); /* Point to the first entry */
683 for(i
= 0; i
< cnt
; i
++) { /* Step and release all pages in list */
684 cva
= lst
[i
].vmlva
; /* Get the actual address */
685 ava
= lst
[i
].vmlava
& -vmlFlgs
; /* Get the alternate address */
686 prot
= lst
[i
].vmlava
& vmlProt
; /* Get the protection bits */
687 ret
= vmm_map_page(act
, index
, cva
, ava
, prot
); /* Go try to map the page on in */
688 if(ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* Bail if any error */
691 return KERN_SUCCESS
; /* Return... */
694 /*-----------------------------------------------------------------------
695 ** vmm_get_page_mapping
697 ** This function determines whether the specified VMM
698 ** virtual address is mapped.
701 ** act - pointer to current thread activation
702 ** index - index of vmm state for this page
703 ** va - virtual address within the alternate's address
707 ** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
710 ** If there are aliases to the page in the non-alternate address space,
711 ** this call could return the wrong one. Moral of the story: no aliases.
712 -----------------------------------------------------------------------*/
714 vm_offset_t
vmm_get_page_mapping(
716 vmm_thread_index_t index
,
719 vmmCntrlEntry
*CEntry
;
721 register mapping
*mpv
, *mp
, *nmpv
, *nmp
;
724 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
725 if (CEntry
== NULL
)return -1; /* No good, failure... */
727 mp
= hw_lock_phys_vir(CEntry
->vmmPmap
->space
, va
); /* Look up the mapping */
728 if((unsigned int)mp
& 1) { /* Did we timeout? */
729 panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va
); /* Yeah, scream about it! */
730 return -1; /* Bad hair day, return FALSE... */
732 if(!mp
) return -1; /* Not mapped, return -1 */
734 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
735 pmap
= current_act()->map
->pmap
; /* Get the current pmap */
736 ova
= -1; /* Assume failure for now */
738 for(nmpv
= hw_cpv(mpv
->physent
->phys_link
); nmpv
; nmpv
= hw_cpv(nmpv
->next
)) { /* Scan 'em all */
740 if(nmpv
->pmap
!= pmap
) continue; /* Skip all the rest if this is not the right pmap... */
742 ova
= ((((unsigned int)nmpv
->PTEhash
& -64) << 6) ^ (pmap
->space
<< 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
743 ova
= ova
| ((nmpv
->PTEv
<< 1) & 0xF0000000); /* Move in the segment number */
744 ova
= ova
| ((nmpv
->PTEv
<< 22) & 0x0FC00000); /* Add in the API for the top of the address */
745 break; /* We're done now, pass virtual address back */
748 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
750 if(ova
== -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va
); /* We are bad wrong if we can't find it */
755 /*-----------------------------------------------------------------------
758 ** This function unmaps a page from the alternate's logical
762 ** act - pointer to current thread activation
763 ** index - index of vmm state for this page
764 ** va - virtual address within the vmm's address
768 ** kernel return code indicating success or failure
769 -----------------------------------------------------------------------*/
771 kern_return_t
vmm_unmap_page(
773 vmm_thread_index_t index
,
776 vmmCntrlEntry
*CEntry
;
778 kern_return_t kern_result
= KERN_SUCCESS
;
780 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
781 if (CEntry
== NULL
)return -1; /* No good, failure... */
783 ret
= mapping_remove(CEntry
->vmmPmap
, va
); /* Toss the mapping */
785 return (ret
? KERN_SUCCESS
: KERN_FAILURE
); /* Return... */
788 /*-----------------------------------------------------------------------
791 ** This function unmaps a list of pages from the alternate's logical
795 ** act - pointer to current thread activation
796 ** index - index of vmm state for this page
797 ** count - number of pages to release
798 ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
801 ** kernel return code indicating success or failure
802 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
803 -----------------------------------------------------------------------*/
805 kern_return_t
vmm_unmap_list(
807 vmm_thread_index_t index
,
810 vmmCntrlEntry
*CEntry
;
812 kern_return_t kern_result
= KERN_SUCCESS
;
813 unsigned int *pgaddr
, i
;
815 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
816 if (CEntry
== NULL
)return -1; /* No good, failure... */
818 if(cnt
> kVmmMaxUnmapPages
) return KERN_FAILURE
; /* They tried to unmap too many */
819 if(!cnt
) return KERN_SUCCESS
; /* If they said none, we're done... */
821 pgaddr
= &((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
823 for(i
= 0; i
< cnt
; i
++) { /* Step and release all pages in list */
825 (void)mapping_remove(CEntry
->vmmPmap
, pgaddr
[i
]); /* Toss the mapping */
828 return KERN_SUCCESS
; /* Return... */
831 /*-----------------------------------------------------------------------
832 ** vmm_unmap_all_pages
834 ** This function unmaps all pages from the alternates's logical
838 ** act - pointer to current thread activation
839 ** index - index of context state
845 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
846 -----------------------------------------------------------------------*/
848 void vmm_unmap_all_pages(
850 vmm_thread_index_t index
)
852 vmmCntrlEntry
*CEntry
;
854 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
855 if (CEntry
== NULL
) return; /* Either this isn't vmm thread or the index is bogus */
858 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
860 mapping_remove(CEntry
->vmmPmap
, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
861 pmap_remove(CEntry
->vmmPmap
, 0, 0xFFFFF000); /* Remove all entries from this map */
866 /*-----------------------------------------------------------------------
867 ** vmm_get_page_dirty_flag
869 ** This function returns the changed flag of the page
870 ** and optionally clears clears the flag.
873 ** act - pointer to current thread activation
874 ** index - index of vmm state for this page
875 ** va - virtual address within the vmm's address
877 ** reset - Clears dirty if true, untouched if not
881 ** clears the dirty bit in the pte if requested
884 ** The RC bits are merged into the global physical entry
885 -----------------------------------------------------------------------*/
887 boolean_t
vmm_get_page_dirty_flag(
889 vmm_thread_index_t index
,
893 vmmCntrlEntry
*CEntry
;
894 register mapping
*mpv
, *mp
;
897 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
898 if (CEntry
== NULL
) return 1; /* Either this isn't vmm thread or the index is bogus */
900 mp
= hw_lock_phys_vir(CEntry
->vmmPmap
->space
, va
); /* Look up the mapping */
901 if((unsigned int)mp
& 1) { /* Did we timeout? */
902 panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va
); /* Yeah, scream about it! */
903 return 1; /* Bad hair day, return dirty... */
905 if(!mp
) return 1; /* Not mapped, return dirty... */
907 RC
= hw_test_rc(mp
, reset
); /* Fetch the RC bits and clear if requested */
909 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
910 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
912 return (RC
& 1); /* Return the change bit */
916 /*-----------------------------------------------------------------------
919 ** This function sets the protection bits of a mapped page
922 ** act - pointer to current thread activation
923 ** index - index of vmm state for this page
924 ** va - virtual address within the vmm's address
926 ** prot - Protection flags
930 ** Protection bits of the mapping are modifed
932 -----------------------------------------------------------------------*/
934 kern_return_t
vmm_protect_page(
936 vmm_thread_index_t index
,
940 vmmCntrlEntry
*CEntry
;
941 register mapping
*mpv
, *mp
;
944 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
945 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
947 mp
= hw_lock_phys_vir(CEntry
->vmmPmap
->space
, va
); /* Look up the mapping */
948 if((unsigned int)mp
& 1) { /* Did we timeout? */
949 panic("vmm_protect_page: timeout locking physical entry for virtual address (%08X)\n", va
); /* Yeah, scream about it! */
950 return 1; /* Bad hair day, return dirty... */
952 if(!mp
) return KERN_SUCCESS
; /* Not mapped, just return... */
954 hw_prot_virt(mp
, prot
); /* Set the protection */
956 mpv
= hw_cpv(mp
); /* Convert mapping block to virtual */
957 hw_unlock_bit((unsigned int *)&mpv
->physent
->phys_link
, PHYS_LOCK
); /* We're done, unlock the physical entry */
959 CEntry
->vmmLastMap
= va
& -PAGE_SIZE
; /* Remember the last mapping we changed */
960 if (!((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
))
961 CEntry
->vmmFlags
|= vmmMapDone
; /* Set that we did a map operation */
963 return KERN_SUCCESS
; /* Return */
967 /*-----------------------------------------------------------------------
968 ** vmm_protect_execute
970 ** This function sets the protection bits of a mapped page
971 ** and then directly starts executing.
973 ** See description of vmm_protect_page for details.
976 ** Normal exit is to run the VM. Abnormal exit is triggered via a
977 ** non-KERN_SUCCESS return from vmm_map_page or later during the
978 ** attempt to transition into the VM.
979 -----------------------------------------------------------------------*/
981 vmm_return_code_t
vmm_protect_execute(
983 vmm_thread_index_t index
,
988 vmmCntrlEntry
*CEntry
;
990 CEntry
= vmm_get_entry(act
, index
); /* Get and validate the index */
992 if (CEntry
== NULL
) return kVmmBogusContext
; /* Return bogus context */
994 if (((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
) && (CEntry
!= act
->mact
.vmmCEntry
))
995 return kVmmBogusContext
; /* Yes, invalid index in Fam */
997 ret
= vmm_protect_page(act
, index
, va
, prot
); /* Go try to change access */
999 if(ret
== KERN_SUCCESS
) {
1000 CEntry
->vmmFlags
|= vmmMapDone
; /* Set that we did a map operation */
1001 vmm_execute_vm(act
, index
); /* Return was ok, launch the VM */
1004 return kVmmInvalidAddress
; /* We had trouble of some kind (shouldn't happen) */
1009 /*-----------------------------------------------------------------------
1010 ** vmm_get_float_state
1012 ** This function causes the current floating point state to
1013 ** be saved into the shared context area. It also clears the
1014 ** vmmFloatCngd changed flag.
1017 ** act - pointer to current thread activation structure
1018 ** index - index returned by vmm_init_context
1022 -----------------------------------------------------------------------*/
1024 kern_return_t
vmm_get_float_state(
1026 vmm_thread_index_t index
)
1028 vmmCntrlEntry
*CEntry
;
1029 vmmCntrlTable
*CTable
;
1031 register struct savearea_fpu
*sv
;
1033 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1034 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1036 act
->mact
.specFlags
&= ~floatCng
; /* Clear the special flag */
1037 CEntry
->vmmContextKern
->vmmStat
&= ~vmmFloatCngd
; /* Clear the change indication */
1039 fpu_save(&CEntry
->vmmFacCtx
); /* Save context if live */
1041 CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPSCRshadow
.i
[0] = CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPSCR
.i
[0]; /* Copy FPSCR */
1042 CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPSCRshadow
.i
[1] = CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPSCR
.i
[1]; /* Copy FPSCR */
1044 if(sv
= CEntry
->vmmFacCtx
.FPUsave
) { /* Is there context yet? */
1045 bcopy((char *)&sv
->save_fp0
, (char *)&(CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
), 32 * 8); /* 32 registers */
1046 return KERN_SUCCESS
;
1050 for(i
= 0; i
< 32; i
++) { /* Initialize floating points */
1051 CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
[i
].d
= FloatInit
; /* Initial value */
1054 return KERN_SUCCESS
;
1057 /*-----------------------------------------------------------------------
1058 ** vmm_get_vector_state
1060 ** This function causes the current vector state to
1061 ** be saved into the shared context area. It also clears the
1062 ** vmmVectorCngd changed flag.
1065 ** act - pointer to current thread activation structure
1066 ** index - index returned by vmm_init_context
1070 -----------------------------------------------------------------------*/
1072 kern_return_t
vmm_get_vector_state(
1074 vmm_thread_index_t index
)
1076 vmmCntrlEntry
*CEntry
;
1077 vmmCntrlTable
*CTable
;
1079 unsigned int vrvalidwrk
;
1080 register struct savearea_vec
*sv
;
1082 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1083 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1085 vec_save(&CEntry
->vmmFacCtx
); /* Save context if live */
1087 act
->mact
.specFlags
&= ~vectorCng
; /* Clear the special flag */
1088 CEntry
->vmmContextKern
->vmmStat
&= ~vmmVectCngd
; /* Clear the change indication */
1090 for(j
=0; j
< 4; j
++) { /* Set value for vscr */
1091 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVSCRshadow
.i
[j
] = CEntry
->vmmContextKern
->vmm_proc_state
.ppcVSCR
.i
[j
];
1094 if(sv
= CEntry
->vmmFacCtx
.VMXsave
) { /* Is there context yet? */
1096 vrvalidwrk
= sv
->save_vrvalid
; /* Get the valid flags */
1098 for(i
= 0; i
< 32; i
++) { /* Copy the saved registers and invalidate the others */
1099 if(vrvalidwrk
& 0x80000000) { /* Do we have a valid value here? */
1100 for(j
= 0; j
< 4; j
++) { /* If so, copy it over */
1101 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = ((unsigned int *)&(sv
->save_vr0
))[(i
* 4) + j
];
1105 for(j
= 0; j
< 4; j
++) { /* Otherwise set to empty value */
1106 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
];
1110 vrvalidwrk
= vrvalidwrk
<< 1; /* Shift over to the next */
1114 return KERN_SUCCESS
;
1117 for(i
= 0; i
< 32; i
++) { /* Initialize vector registers */
1118 for(j
=0; j
< 4; j
++) { /* Do words */
1119 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
]; /* Initial value */
1123 return KERN_SUCCESS
;
1126 /*-----------------------------------------------------------------------
1129 ** This function causes a timer (in AbsoluteTime) for a specific time
1130 ** to be set It also clears the vmmTimerPop flag if the timer is actually
1131 ** set, it is cleared otherwise.
1133 ** A timer is cleared by setting setting the time to 0. This will clear
1134 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
1135 ** current time clears the internal timer request, but leaves the
1136 ** vmmTimerPop flag set.
1140 ** act - pointer to current thread activation structure
1141 ** index - index returned by vmm_init_context
1142 ** timerhi - high order word of AbsoluteTime to pop
1143 ** timerlo - low order word of AbsoluteTime to pop
1146 ** timer set, vmmTimerPop cleared
1147 -----------------------------------------------------------------------*/
1149 kern_return_t
vmm_set_timer(
1151 vmm_thread_index_t index
,
1152 unsigned int timerhi
,
1153 unsigned int timerlo
)
1155 vmmCntrlEntry
*CEntry
;
1157 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1158 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1160 CEntry
->vmmTimer
= ((uint64_t)timerhi
<< 32) | timerlo
;
1162 vmm_timer_pop(act
); /* Go adjust all of the timer stuff */
1163 return KERN_SUCCESS
; /* Leave now... */
1167 /*-----------------------------------------------------------------------
1170 ** This function causes the timer for a specified VM to be
1171 ** returned in return_params[0] and return_params[1].
1175 ** act - pointer to current thread activation structure
1176 ** index - index returned by vmm_init_context
1179 ** Timer value set in return_params[0] and return_params[1].
1180 ** Set to 0 if timer is not set.
1181 -----------------------------------------------------------------------*/
1183 kern_return_t
vmm_get_timer(
1185 vmm_thread_index_t index
)
1187 vmmCntrlEntry
*CEntry
;
1188 vmmCntrlTable
*CTable
;
1190 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1191 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1193 CEntry
->vmmContextKern
->return_params
[0] = (CEntry
->vmmTimer
>> 32); /* Return the last timer value */
1194 CEntry
->vmmContextKern
->return_params
[1] = (uint32_t)CEntry
->vmmTimer
; /* Return the last timer value */
1196 return KERN_SUCCESS
;
1201 /*-----------------------------------------------------------------------
1204 ** This function causes all timers in the array of VMs to be updated.
1205 ** All appropriate flags are set or reset. If a VM is currently
1206 ** running and its timer expired, it is intercepted.
1208 ** The qactTimer value is set to the lowest unexpired timer. It is
1209 ** zeroed if all timers are expired or have been reset.
1212 ** act - pointer to current thread activation structure
1215 ** timers set, vmmTimerPop cleared or set
1216 -----------------------------------------------------------------------*/
1221 vmmCntrlEntry
*CEntry
;
1222 vmmCntrlTable
*CTable
;
1224 uint64_t now
, soonest
;
1227 if(!((unsigned int)act
->mact
.vmmControl
& 0xFFFFFFFE)) { /* Are there any virtual machines? */
1228 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act
);
1231 soonest
= 0xFFFFFFFFFFFFFFFFULL
; /* Max time */
1233 clock_get_uptime(&now
); /* What time is it? */
1235 CTable
= act
->mact
.vmmControl
; /* Make this easier */
1236 any
= 0; /* Haven't found a running unexpired timer yet */
1238 for(cvi
= 0; cvi
< kVmmMaxContextsPerThread
; cvi
++) { /* Cycle through all and check time now */
1240 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) continue; /* Do not check if the entry is empty */
1242 if(CTable
->vmmc
[cvi
].vmmTimer
== 0) { /* Is the timer reset? */
1243 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Clear timer popped */
1244 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Clear timer popped */
1245 continue; /* Check next */
1248 if (CTable
->vmmc
[cvi
].vmmTimer
<= now
) {
1249 CTable
->vmmc
[cvi
].vmmFlags
|= vmmTimerPop
; /* Set timer popped here */
1250 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
|= vmmTimerPop
; /* Set timer popped here */
1251 if((unsigned int)&CTable
->vmmc
[cvi
] == (unsigned int)act
->mact
.vmmCEntry
) { /* Is this the running VM? */
1252 sv
= find_user_regs(act
); /* Get the user state registers */
1253 if(!sv
) { /* Did we find something? */
1254 panic("vmm_timer_pop: no user context; act = %08X\n", act
);
1256 sv
->save_exception
= kVmmReturnNull
*4; /* Indicate that this is a null exception */
1257 vmm_force_exit(act
, sv
); /* Intercept a running VM */
1259 continue; /* Check the rest */
1261 else { /* It hasn't popped yet */
1262 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Set timer not popped here */
1263 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Set timer not popped here */
1266 any
= 1; /* Show we found an active unexpired timer */
1268 if (CTable
->vmmc
[cvi
].vmmTimer
< soonest
)
1269 soonest
= CTable
->vmmc
[cvi
].vmmTimer
;
1273 if (act
->mact
.qactTimer
== 0 || soonest
<= act
->mact
.qactTimer
)
1274 act
->mact
.qactTimer
= soonest
; /* Set lowest timer */
1282 /*-----------------------------------------------------------------------
1285 ** This function prevents the specified VM(s) to from running.
1286 ** If any is currently executing, the execution is intercepted
1287 ** with a code of kVmmStopped. Note that execution of the VM is
1288 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
1289 ** This provides the ability for a thread to stop execution of a VM and
1290 ** insure that it will not be run until the emulator has processed the
1291 ** "virtual" interruption.
1294 ** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1295 ** NOTE: if this mask is all 0s, any executing VM is intercepted with
1296 * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1297 ** note that there is a potential race here and the VM may not stop.
1300 ** kernel return code indicating success
1301 ** or if no VMs are enabled, an invalid syscall exception.
1302 -----------------------------------------------------------------------*/
1304 int vmm_stop_vm(struct savearea
*save
)
1308 vmmCntrlTable
*CTable
;
1312 unsigned int vmmask
;
1313 ReturnHandler
*stopapc
;
1315 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
1317 task
= current_task(); /* Figure out who we are */
1319 task_lock(task
); /* Lock our task */
1321 fact
= (thread_act_t
)task
->thr_acts
.next
; /* Get the first activation on task */
1322 act
= 0; /* Pretend we didn't find it yet */
1324 for(i
= 0; i
< task
->thr_act_count
; i
++) { /* All of the activations */
1325 if(fact
->mact
.vmmControl
) { /* Is this a virtual machine monitor? */
1326 act
= fact
; /* Yeah... */
1327 break; /* Bail the loop... */
1329 fact
= (thread_act_t
)fact
->thr_acts
.next
; /* Go to the next one */
1332 if(!((unsigned int)act
)) { /* See if we have VMMs yet */
1333 task_unlock(task
); /* No, unlock the task */
1334 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1335 return 0; /* Go generate a syscall exception */
1338 act_lock_thread(act
); /* Make sure this stays 'round */
1339 task_unlock(task
); /* Safe to release now */
1341 CTable
= act
->mact
.vmmControl
; /* Get the pointer to the table */
1343 if(!((unsigned int)CTable
& -2)) { /* Are there any all the way up yet? */
1344 act_unlock_thread(act
); /* Unlock the activation */
1345 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1346 return 0; /* Go generate a syscall exception */
1349 if(!(vmmask
= save
->save_r3
)) { /* Get the stop mask and check if all zeros */
1350 act_unlock_thread(act
); /* Unlock the activation */
1351 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1352 save
->save_r3
= KERN_SUCCESS
; /* Set success */
1353 return 1; /* Return... */
1356 for(cvi
= 0; cvi
< kVmmMaxContextsPerThread
; cvi
++) { /* Search slots */
1357 if((0x80000000 & vmmask
) && (CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) { /* See if we need to stop and if it is in use */
1358 hw_atomic_or(&CTable
->vmmc
[cvi
].vmmFlags
, vmmXStop
); /* Set this one to stop */
1360 vmmask
= vmmask
<< 1; /* Slide mask over */
1363 if(hw_compare_and_store(0, 1, &act
->mact
.emPendRupts
)) { /* See if there is already a stop pending and lock out others if not */
1364 act_unlock_thread(act
); /* Already one pending, unlock the activation */
1365 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1366 save
->save_r3
= KERN_SUCCESS
; /* Say we did it... */
1367 return 1; /* Leave */
1370 if(!(stopapc
= (ReturnHandler
*)kalloc(sizeof(ReturnHandler
)))) { /* Get a return handler control block */
1371 act
->mact
.emPendRupts
= 0; /* No memory, say we have given up request */
1372 act_unlock_thread(act
); /* Unlock the activation */
1373 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1374 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No storage... */
1375 return 1; /* Return... */
1378 ml_set_interrupts_enabled(FALSE
); /* Disable interruptions for now */
1380 stopapc
->handler
= vmm_interrupt
; /* Set interruption routine */
1382 stopapc
->next
= act
->handlers
; /* Put our interrupt at the start of the list */
1383 act
->handlers
= stopapc
; /* Point to us */
1385 act_set_apc(act
); /* Set an APC AST */
1386 ml_set_interrupts_enabled(TRUE
); /* Enable interruptions now */
1388 act_unlock_thread(act
); /* Unlock the activation */
1390 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1391 save
->save_r3
= KERN_SUCCESS
; /* Hip, hip, horay... */
1395 /*-----------------------------------------------------------------------
1398 ** This function is executed asynchronously from an APC AST.
1399 ** It is to be used for anything that needs to interrupt a running VM.
1400 ** This include any kind of interruption generation (other than timer pop)
1401 ** or entering the stopped state.
1404 ** ReturnHandler *rh - the return handler control block as required by the APC.
1405 ** thread_act_t act - the activation
1408 ** Whatever needed to be done is done.
1409 -----------------------------------------------------------------------*/
1411 void vmm_interrupt(ReturnHandler
*rh
, thread_act_t act
) {
1413 vmmCntrlTable
*CTable
;
1419 kfree((vm_offset_t
)rh
, sizeof(ReturnHandler
)); /* Release the return handler block */
1421 inter
= ml_set_interrupts_enabled(FALSE
); /* Disable interruptions for now */
1423 act
->mact
.emPendRupts
= 0; /* Say that there are no more interrupts pending */
1424 CTable
= act
->mact
.vmmControl
; /* Get the pointer to the table */
1426 if(!((unsigned int)CTable
& -2)) return; /* Leave if we aren't doing VMs any more... */
1428 if(act
->mact
.vmmCEntry
&& (act
->mact
.vmmCEntry
->vmmFlags
& vmmXStop
)) { /* Do we need to stop the running guy? */
1429 sv
= find_user_regs(act
); /* Get the user state registers */
1430 if(!sv
) { /* Did we find something? */
1431 panic("vmm_interrupt: no user context; act = %08X\n", act
);
1433 sv
->save_exception
= kVmmStopped
*4; /* Set a "stopped" exception */
1434 vmm_force_exit(act
, sv
); /* Intercept a running VM */
1436 ml_set_interrupts_enabled(inter
); /* Put interrupts back to what they were */