2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /*-----------------------------------------------------------------------
25 ** C routines that we are adding to the MacOS X kernel.
27 -----------------------------------------------------------------------*/
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/host_info.h>
32 #include <kern/kern_types.h>
33 #include <kern/host.h>
34 #include <kern/task.h>
35 #include <kern/thread.h>
36 #include <kern/thread_act.h>
37 #include <ppc/exception.h>
38 #include <ppc/mappings.h>
39 #include <ppc/thread_act.h>
40 #include <vm/vm_kern.h>
42 #include <ppc/vmachmon.h>
44 extern struct Saveanchor saveanchor
; /* Aligned savearea anchor */
45 extern double FloatInit
;
46 extern unsigned long QNaNbarbarian
[4];
48 /*************************************************************************************
49 Virtual Machine Monitor Internal Routines
50 **************************************************************************************/
52 /*-----------------------------------------------------------------------
55 ** This function verifies and return a vmm context entry index
58 ** act - pointer to current thread activation
59 ** index - index into vmm control table (this is a "one based" value)
62 ** address of a vmmCntrlEntry or 0 if not found
63 -----------------------------------------------------------------------*/
65 vmmCntrlEntry
*vmm_get_entry(
67 vmm_thread_index_t index
)
69 vmmCntrlTable
*CTable
;
70 vmmCntrlEntry
*CEntry
;
72 index
= index
& vmmTInum
; /* Clean up the index */
74 if (act
->mact
.vmmControl
== 0) return NULL
; /* No control table means no vmm */
75 if ((index
- 1) >= kVmmMaxContexts
) return NULL
; /* Index not in range */
77 CTable
= act
->mact
.vmmControl
; /* Make the address a bit more convienient */
78 CEntry
= &CTable
->vmmc
[index
- 1]; /* Point to the entry */
80 if (!(CEntry
->vmmFlags
& vmmInUse
)) return NULL
; /* See if the slot is actually in use */
85 /*-----------------------------------------------------------------------
88 ** This function verifies and returns the pmap for an address space.
89 ** If there is none and the request is valid, a pmap will be created.
92 ** act - pointer to current thread activation
93 ** index - index into vmm control table (this is a "one based" value)
96 ** address of a pmap or 0 if not found or could no be created
97 ** Note that if there is no pmap for the address space it will be created.
98 -----------------------------------------------------------------------*/
100 pmap_t
vmm_get_adsp(thread_act_t act
, vmm_thread_index_t index
)
104 if (act
->mact
.vmmControl
== 0) return NULL
; /* No control table means no vmm */
105 if ((index
- 1) >= kVmmMaxContexts
) return NULL
; /* Index not in range */
107 pmap
= act
->mact
.vmmControl
->vmmAdsp
[index
- 1]; /* Get the pmap */
108 if(pmap
) return pmap
; /* We've got it... */
110 pmap
= pmap_create(0); /* Make a fresh one */
111 act
->mact
.vmmControl
->vmmAdsp
[index
- 1] = pmap
; /* Remember it */
113 * Note that if the create fails, we will return a null.
115 return pmap
; /* Return it... */
120 /*************************************************************************************
121 Virtual Machine Monitor Exported Functionality
123 The following routines are used to implement a quick-switch mechanism for
124 virtual machines that need to execute within their own processor envinroment
125 (including register and MMU state).
126 **************************************************************************************/
128 /*-----------------------------------------------------------------------
131 ** This function returns the current version of the virtual machine
132 ** interface. It is divided into two portions. The top 16 bits
133 ** represent the major version number, and the bottom 16 bits
134 ** represent the minor version number. Clients using the Vmm
135 ** functionality should make sure they are using a verison new
142 ** 32-bit number representing major/minor version of
144 -----------------------------------------------------------------------*/
146 int vmm_get_version(struct savearea
*save
)
148 save
->save_r3
= kVmmCurrentVersion
; /* Return the version */
153 /*-----------------------------------------------------------------------
156 ** This function returns a set of flags that represents the functionality
157 ** supported by the current verison of the Vmm interface. Clients should
158 ** use this to determine whether they can run on this system.
164 ** 32-bit number representing functionality supported by this
165 ** version of the Vmm module
166 -----------------------------------------------------------------------*/
168 int vmm_get_features(struct savearea
*save
)
170 save
->save_r3
= kVmmCurrentFeatures
; /* Return the features */
171 if(per_proc_info
->pf
.Available
& pf64Bit
) {
172 save
->save_r3
&= ~kVmmFeature_LittleEndian
; /* No little endian here */
173 save
->save_r3
|= kVmmFeature_SixtyFourBit
; /* Set that we can do 64-bit */
179 /*-----------------------------------------------------------------------
182 ** This function returns the maximum addressable virtual address sported
185 ** Returns max address
186 -----------------------------------------------------------------------*/
188 addr64_t
vmm_max_addr(thread_act_t act
)
190 return vm_max_address
; /* Return the maximum address */
193 /*-----------------------------------------------------------------------
196 ** This function retrieves the eXtended Architecture flags for the specifed VM.
198 ** We need to return the result in the return code rather than in the return parameters
199 ** because we need an architecture independent format so the results are actually
200 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
205 ** act - pointer to current thread activation structure
206 ** index - index returned by vmm_init_context
209 ** Return code is set to the XA flags. If the index is invalid or the
210 ** context has not been created, we return 0.
211 -----------------------------------------------------------------------*/
213 unsigned int vmm_get_XA(
215 vmm_thread_index_t index
)
217 vmmCntrlEntry
*CEntry
;
219 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
220 if (CEntry
== NULL
) return 0; /* Either this isn't a vmm or the index is bogus */
222 return CEntry
->vmmXAFlgs
; /* Return the flags */
225 /*-----------------------------------------------------------------------
228 ** This function initializes an emulation context. It allocates
229 ** a new pmap (address space) and fills in the initial processor
230 ** state within the specified structure. The structure, mapped
231 ** into the client's logical address space, must be page-aligned.
234 ** act - pointer to current thread activation
235 ** version - requested version of the Vmm interface (allowing
236 ** future versions of the interface to change, but still
237 ** support older clients)
238 ** vmm_user_state - pointer to a logical page within the
239 ** client's address space
242 ** kernel return code indicating success or failure
243 -----------------------------------------------------------------------*/
245 int vmm_init_context(struct savearea
*save
)
249 vmm_version_t version
;
250 vmm_state_page_t
* vmm_user_state
;
251 vmmCntrlTable
*CTable
;
253 vmm_state_page_t
* vks
;
259 thread_act_t fact
, gact
;
261 vmm_user_state
= CAST_DOWN(vmm_state_page_t
*, save
->save_r4
); /* Get the user address of the comm area */
262 if ((unsigned int)vmm_user_state
& (PAGE_SIZE
- 1)) { /* Make sure the comm area is page aligned */
263 save
->save_r3
= KERN_FAILURE
; /* Return failure */
267 /* Make sure that the version requested is supported */
268 version
= save
->save_r3
; /* Pick up passed in version */
269 if (((version
>> 16) < kVmmMinMajorVersion
) || ((version
>> 16) > (kVmmCurrentVersion
>> 16))) {
270 save
->save_r3
= KERN_FAILURE
; /* Return failure */
274 if((version
& 0xFFFF) > kVmmCurMinorVersion
) { /* Check for valid minor */
275 save
->save_r3
= KERN_FAILURE
; /* Return failure */
279 act
= current_act(); /* Pick up our activation */
281 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
283 task
= current_task(); /* Figure out who we are */
285 task_lock(task
); /* Lock our task */
287 fact
= (thread_act_t
)task
->threads
.next
; /* Get the first activation on task */
288 gact
= 0; /* Pretend we didn't find it yet */
290 for(i
= 0; i
< task
->thread_count
; i
++) { /* All of the activations */
291 if(fact
->mact
.vmmControl
) { /* Is this a virtual machine monitor? */
292 gact
= fact
; /* Yeah... */
293 break; /* Bail the loop... */
295 fact
= (thread_act_t
)fact
->task_threads
.next
; /* Go to the next one */
300 * We only allow one thread per task to be a virtual machine monitor right now. This solves
301 * a number of potential problems that I can't put my finger on right now.
303 * Utlimately, I think we want to move the controls and make all this task based instead of
304 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
305 * VM (if they want) rather than hand dispatch contexts.
308 if(gact
&& (gact
!= act
)) { /* Check if another thread is a vmm or trying to be */
309 task_unlock(task
); /* Release task lock */
310 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
311 save
->save_r3
= KERN_FAILURE
; /* We must play alone... */
315 if(!gact
) act
->mact
.vmmControl
= (vmmCntrlTable
*)1; /* Temporarily mark that we are the vmm thread */
317 task_unlock(task
); /* Safe to release now (because we've marked ourselves) */
319 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
320 if ((unsigned int)CTable
== 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
321 if(!(CTable
= (vmmCntrlTable
*)kalloc(sizeof(vmmCntrlTable
)))) { /* Get a fresh emulation control table */
322 act
->mact
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
323 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
324 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No storage... */
328 bzero((void *)CTable
, sizeof(vmmCntrlTable
)); /* Clean it up */
329 act
->mact
.vmmControl
= CTable
; /* Initialize the table anchor */
332 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Search to find a free slot */
333 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) break; /* Bail if we find an unused slot */
336 if(cvi
>= kVmmMaxContexts
) { /* Did we find one? */
337 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
338 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No empty slots... */
342 ret
= vm_map_wire( /* Wire the virtual machine monitor's context area */
344 (vm_offset_t
)vmm_user_state
,
345 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
346 VM_PROT_READ
| VM_PROT_WRITE
,
349 if (ret
!= KERN_SUCCESS
) /* The wire failed, return the code */
350 goto return_in_shame
;
352 /* Map the vmm state into the kernel's address space. */
353 conphys
= pmap_find_phys(act
->map
->pmap
, (addr64_t
)((uintptr_t)vmm_user_state
));
355 /* Find a virtual address to use. */
356 ret
= kmem_alloc_pageable(kernel_map
, &conkern
, PAGE_SIZE
);
357 if (ret
!= KERN_SUCCESS
) { /* Did we find an address? */
358 (void) vm_map_unwire(act
->map
, /* No, unwire the context area */
359 (vm_offset_t
)vmm_user_state
,
360 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
362 goto return_in_shame
;
365 /* Map it into the kernel's address space. */
367 pmap_enter(kernel_pmap
, conkern
, conphys
,
368 VM_PROT_READ
| VM_PROT_WRITE
,
369 VM_WIMG_USE_DEFAULT
, TRUE
);
371 /* Clear the vmm state structure. */
372 vks
= (vmm_state_page_t
*)conkern
;
373 bzero((char *)vks
, PAGE_SIZE
);
376 /* We're home free now. Simply fill in the necessary info and return. */
378 vks
->interface_version
= version
; /* Set our version code */
379 vks
->thread_index
= cvi
+ 1; /* Tell the user the index for this virtual machine */
381 CTable
->vmmc
[cvi
].vmmFlags
= vmmInUse
; /* Mark the slot in use and make sure the rest are clear */
382 CTable
->vmmc
[cvi
].vmmContextKern
= vks
; /* Remember the kernel address of comm area */
383 CTable
->vmmc
[cvi
].vmmContextPhys
= (vmm_state_page_t
*)conphys
; /* Remember the state page physical addr */
384 CTable
->vmmc
[cvi
].vmmContextUser
= vmm_user_state
; /* Remember user address of comm area */
386 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
387 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
388 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
389 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
390 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
391 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
392 CTable
->vmmc
[cvi
].vmmFacCtx
.facAct
= act
; /* Point back to the activation */
394 hw_atomic_add((int *)&saveanchor
.savetarget
, 2); /* Account for the number of extra saveareas we think we might "need" */
396 if (!(act
->map
->pmap
->pmapFlags
& pmapVMhost
)) {
397 simple_lock(&(act
->map
->pmap
->lock
));
398 act
->map
->pmap
->pmapFlags
|= pmapVMhost
;
399 simple_unlock(&(act
->map
->pmap
->lock
));
402 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
403 save
->save_r3
= KERN_SUCCESS
; /* Hip, hip, horay... */
407 if(!gact
) kfree((vm_offset_t
)CTable
, sizeof(vmmCntrlTable
)); /* Toss the table if we just allocated it */
408 act
->mact
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
409 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
410 save
->save_r3
= ret
; /* Pass back return code... */
416 /*-----------------------------------------------------------------------
417 ** vmm_tear_down_context
419 ** This function uninitializes an emulation context. It deallocates
420 ** internal resources associated with the context block.
423 ** act - pointer to current thread activation structure
424 ** index - index returned by vmm_init_context
427 ** kernel return code indicating success or failure
430 ** This call will also trash the address space with the same ID. While this
431 ** is really not too cool, we have to do it because we need to make
432 ** sure that old VMM users (not that we really have any) who depend upon
433 ** the address space going away with the context still work the same.
434 -----------------------------------------------------------------------*/
436 kern_return_t
vmm_tear_down_context(
438 vmm_thread_index_t index
)
440 vmmCntrlEntry
*CEntry
;
441 vmmCntrlTable
*CTable
;
443 register savearea
*sv
;
445 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
446 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
448 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
450 hw_atomic_sub((int *)&saveanchor
.savetarget
, 2); /* We don't need these extra saveareas anymore */
452 if(CEntry
->vmmFacCtx
.FPUsave
) { /* Is there any floating point context? */
453 toss_live_fpu(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
454 save_release((savearea
*)CEntry
->vmmFacCtx
.FPUsave
); /* Release it */
457 if(CEntry
->vmmFacCtx
.VMXsave
) { /* Is there any vector context? */
458 toss_live_vec(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
459 save_release((savearea
*)CEntry
->vmmFacCtx
.VMXsave
); /* Release it */
462 CEntry
->vmmPmap
= 0; /* Remove this trace */
463 if(act
->mact
.vmmControl
->vmmAdsp
[index
- 1]) { /* Check if there is an address space assigned here */
464 mapping_remove(act
->mact
.vmmControl
->vmmAdsp
[index
- 1], 0xFFFFFFFFFFFFF000LL
); /* Remove final page explicitly because we might have mapped it */
465 pmap_remove(act
->mact
.vmmControl
->vmmAdsp
[index
- 1], 0, 0xFFFFFFFFFFFFF000LL
); /* Remove all entries from this map */
466 pmap_destroy(act
->mact
.vmmControl
->vmmAdsp
[index
- 1]); /* Toss the pmap for this context */
467 act
->mact
.vmmControl
->vmmAdsp
[index
- 1] = NULL
; /* Clean it up */
470 (void) vm_map_unwire( /* Unwire the user comm page */
472 (vm_offset_t
)CEntry
->vmmContextUser
,
473 (vm_offset_t
)CEntry
->vmmContextUser
+ PAGE_SIZE
,
476 kmem_free(kernel_map
, (vm_offset_t
)CEntry
->vmmContextKern
, PAGE_SIZE
); /* Remove kernel's view of the comm page */
478 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
479 CTable
->vmmGFlags
= CTable
->vmmGFlags
& ~vmmLastAdSp
; /* Make sure we don't try to automap into this */
481 CEntry
->vmmFlags
= 0; /* Clear out all of the flags for this entry including in use */
482 CEntry
->vmmContextKern
= 0; /* Clear the kernel address of comm area */
483 CEntry
->vmmContextUser
= 0; /* Clear the user address of comm area */
485 CEntry
->vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
486 CEntry
->vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
487 CEntry
->vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
488 CEntry
->vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
489 CEntry
->vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
490 CEntry
->vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
491 CEntry
->vmmFacCtx
.facAct
= 0; /* Clear facility context control */
493 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Search to find a free slot */
494 if(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
) { /* Return if there are still some in use */
495 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
496 return KERN_SUCCESS
; /* Leave... */
501 * When we have tossed the last context, toss any address spaces left over before releasing
502 * the VMM control block
505 for(cvi
= 1; cvi
<= kVmmMaxContexts
; cvi
++) { /* Look at all slots */
506 if(!act
->mact
.vmmControl
->vmmAdsp
[index
- 1]) continue; /* Nothing to remove here */
507 mapping_remove(act
->mact
.vmmControl
->vmmAdsp
[index
- 1], 0xFFFFFFFFFFFFF000LL
); /* Remove final page explicitly because we might have mapped it */
508 pmap_remove(act
->mact
.vmmControl
->vmmAdsp
[index
- 1], 0, 0xFFFFFFFFFFFFF000LL
); /* Remove all entries from this map */
509 pmap_destroy(act
->mact
.vmmControl
->vmmAdsp
[index
- 1]); /* Toss the pmap for this context */
510 act
->mact
.vmmControl
->vmmAdsp
[index
- 1] = 0; /* Clear just in case */
513 kfree((vm_offset_t
)CTable
, sizeof(vmmCntrlTable
)); /* Toss the table because to tossed the last context */
514 act
->mact
.vmmControl
= 0; /* Unmark us as vmm */
516 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
522 /*-----------------------------------------------------------------------
525 ** This function sets the eXtended Architecture flags for the specifed VM.
527 ** We need to return the result in the return code rather than in the return parameters
528 ** because we need an architecture independent format so the results are actually
529 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
532 ** Note that this function does a lot of the same stuff as vmm_tear_down_context
533 ** and vmm_init_context.
536 ** act - pointer to current thread activation structure
537 ** index - index returned by vmm_init_context
538 ** flags - the extended architecture flags
542 ** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
543 ** Also, the internal flags are set and, additionally, the VM is completely reset.
544 -----------------------------------------------------------------------*/
546 kern_return_t
vmm_set_XA(
548 vmm_thread_index_t index
,
549 unsigned int xaflags
)
551 vmmCntrlEntry
*CEntry
;
552 vmmCntrlTable
*CTable
;
553 vmm_state_page_t
*vks
;
554 vmm_version_t version
;
556 if(xaflags
& ~vmm64Bit
) return KERN_FAILURE
; /* We only support this one kind now */
558 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
559 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
561 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
563 if(CEntry
->vmmFacCtx
.FPUsave
) { /* Is there any floating point context? */
564 toss_live_fpu(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
565 save_release((savearea
*)CEntry
->vmmFacCtx
.FPUsave
); /* Release it */
568 if(CEntry
->vmmFacCtx
.VMXsave
) { /* Is there any vector context? */
569 toss_live_vec(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
570 save_release((savearea
*)CEntry
->vmmFacCtx
.VMXsave
); /* Release it */
573 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
574 CTable
->vmmGFlags
= CTable
->vmmGFlags
& ~vmmLastAdSp
; /* Make sure we don't try to automap into this */
576 CEntry
->vmmFlags
&= vmmInUse
; /* Clear out all of the flags for this entry except in use */
577 CEntry
->vmmXAFlgs
= (xaflags
& vmm64Bit
) | (CEntry
->vmmXAFlgs
& ~vmm64Bit
); /* Set the XA flags */
578 CEntry
->vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
579 CEntry
->vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
580 CEntry
->vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
581 CEntry
->vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
582 CEntry
->vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
583 CEntry
->vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
585 vks
= CEntry
->vmmContextKern
; /* Get address of the context page */
586 version
= vks
->interface_version
; /* Save the version code */
587 bzero((char *)vks
, 4096); /* Clear all */
589 vks
->interface_version
= version
; /* Set our version code */
590 vks
->thread_index
= index
% vmmTInum
; /* Tell the user the index for this virtual machine */
592 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
594 return KERN_SUCCESS
; /* Return the flags */
598 /*-----------------------------------------------------------------------
601 ** This function uninitializes all emulation contexts. If there are
602 ** any vmm contexts, it calls vmm_tear_down_context for each one.
604 ** Note: this can also be called from normal thread termination. Because of
605 ** that, we will context switch out of an alternate if we are currenty in it.
606 ** It will be terminated with no valid return code set because we don't expect
607 ** the activation to ever run again.
610 ** activation to tear down
613 ** All vmm contexts released and VMM shut down
614 -----------------------------------------------------------------------*/
615 void vmm_tear_down_all(thread_act_t act
) {
617 vmmCntrlTable
*CTable
;
623 if(act
->mact
.specFlags
& runningVM
) { /* Are we actually in a context right now? */
624 save
= find_user_regs(act
); /* Find the user state context */
625 if(!save
) { /* Did we find it? */
626 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
630 save
->save_exception
= kVmmBogusContext
*4; /* Indicate that this context is bogus now */
631 s
= splhigh(); /* Make sure interrupts are off */
632 vmm_force_exit(act
, save
); /* Force and exit from VM state */
633 splx(s
); /* Restore interrupts */
636 if(CTable
= act
->mact
.vmmControl
) { /* Do we have a vmm control block? */
639 for(cvi
= 1; cvi
<= kVmmMaxContexts
; cvi
++) { /* Look at all slots */
640 if(CTable
->vmmc
[cvi
- 1].vmmFlags
& vmmInUse
) { /* Is this one in use */
641 ret
= vmm_tear_down_context(act
, cvi
); /* Take down the found context */
642 if(ret
!= KERN_SUCCESS
) { /* Did it go away? */
643 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
650 * Note that all address apces should be gone here.
652 if(act
->mact
.vmmControl
) { /* Did we find one? */
653 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
660 /*-----------------------------------------------------------------------
663 ** This function maps a page from within the client's logical
664 ** address space into the alternate address space.
666 ** The page need not be locked or resident. If not resident, it will be faulted
667 ** in by this code, which may take some time. Also, if the page is not locked,
668 ** it, and this mapping may disappear at any time, even before it gets used. Note also
669 ** that reference and change information is NOT preserved when a page is unmapped, either
670 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
671 ** space). This means that if RC is needed, the page MUST be wired.
673 ** Note that if there is already a mapping at the address, it is removed and all
674 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
675 ** if the map call fails, the old address is still unmapped..
678 ** act - pointer to current thread activation
679 ** index - index of address space to map into
680 ** va - virtual address within the client's address
682 ** ava - virtual address within the alternate address
684 ** prot - protection flags
686 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
687 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
690 ** Interrupts disabled (from fast trap)
693 ** kernel return code indicating success or failure
694 ** if success, va resident and alternate mapping made
695 -----------------------------------------------------------------------*/
697 kern_return_t
vmm_map_page(
705 vmmCntrlEntry
*CEntry
;
706 register mapping
*mp
;
707 struct phys_entry
*pp
;
709 addr64_t ova
, nextva
;
712 pmap
= vmm_get_adsp(act
, index
); /* Get the pmap for this address space */
713 if(!pmap
) return KERN_FAILURE
; /* Bogus address space, no VMs, or we can't make a pmap, failure... */
715 if(ava
> vm_max_address
) return kVmmInvalidAddress
; /* Does the machine support an address of this size? */
717 map
= current_act()->map
; /* Get the current map */
719 while(1) { /* Keep trying until we get it or until we fail */
721 mp
= mapping_find(map
->pmap
, cva
, &nextva
, 0); /* Find the mapping for this address */
723 if(mp
) break; /* We found it */
725 ml_set_interrupts_enabled(TRUE
); /* Enable interruptions */
726 ret
= vm_fault(map
, trunc_page_32((vm_offset_t
)cva
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
); /* Didn't find it, try to fault it in read/write... */
727 ml_set_interrupts_enabled(FALSE
); /* Disable interruptions */
728 if (ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* There isn't a page there, return... */
731 if(mp
->mpFlags
& (mpBlock
| mpNest
| mpSpecial
)) { /* If this is a block, a nest, or some other special thing, we can't map it */
732 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
733 return KERN_FAILURE
; /* Leave in shame */
736 while(1) { /* Keep trying the enter until it goes in */
737 ova
= mapping_make(pmap
, ava
, mp
->mpPAddr
, 0, 1, prot
); /* Enter the mapping into the pmap */
738 if(!ova
) break; /* If there were no collisions, we are done... */
739 mapping_remove(pmap
, ova
); /* Remove the mapping that collided */
742 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
744 if (!((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
)) {
745 act
->mact
.vmmControl
->vmmLastMap
= ava
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
746 act
->mact
.vmmControl
->vmmGFlags
= (act
->mact
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | index
; /* Remember last address space */
753 /*-----------------------------------------------------------------------
756 ** This function maps a page from within the client's logical
757 ** address space into the alternate address space of the
758 ** Virtual Machine Monitor context and then directly starts executing.
760 ** See description of vmm_map_page for details.
763 ** Index is used for both the context and the address space ID.
764 ** index[24:31] is the context id and index[16:23] is the address space.
765 ** if the address space ID is 0, the context ID is used for it.
768 ** Normal exit is to run the VM. Abnormal exit is triggered via a
769 ** non-KERN_SUCCESS return from vmm_map_page or later during the
770 ** attempt to transition into the VM.
771 -----------------------------------------------------------------------*/
773 vmm_return_code_t
vmm_map_execute(
775 vmm_thread_index_t index
,
781 vmmCntrlEntry
*CEntry
;
783 vmm_thread_index_t cndx
;
785 cndx
= index
& 0xFF; /* Clean it up */
787 CEntry
= vmm_get_entry(act
, cndx
); /* Get and validate the index */
788 if (CEntry
== NULL
) return kVmmBogusContext
; /* Return bogus context */
790 if (((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
) && (CEntry
!= act
->mact
.vmmCEntry
))
791 return kVmmBogusContext
; /* Yes, invalid index in Fam */
793 adsp
= (index
>> 8) & 0xFF; /* Get any requested address space */
794 if(!adsp
) adsp
= (index
& 0xFF); /* If 0, use context ID as address space ID */
796 ret
= vmm_map_page(act
, adsp
, cva
, ava
, prot
); /* Go try to map the page on in */
799 if(ret
== KERN_SUCCESS
) {
800 act
->mact
.vmmControl
->vmmLastMap
= ava
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
801 act
->mact
.vmmControl
->vmmGFlags
= (act
->mact
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | cndx
; /* Remember last address space */
802 vmm_execute_vm(act
, cndx
); /* Return was ok, launch the VM */
805 return ret
; /* We had trouble mapping in the page */
809 /*-----------------------------------------------------------------------
812 ** This function maps a list of pages into various address spaces
815 ** act - pointer to current thread activation
816 ** index - index of default address space (used if not specifed in list entry
817 ** count - number of pages to release
818 ** flavor - 0 if 32-bit version, 1 if 64-bit
819 ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
822 ** kernel return code indicating success or failure
823 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
824 ** or the vmm_map_page call fails.
825 ** We return kVmmInvalidAddress if virtual address size is not supported
826 -----------------------------------------------------------------------*/
828 kern_return_t
vmm_map_list(
834 vmmCntrlEntry
*CEntry
;
844 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
845 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
847 if(cnt
> kVmmMaxMapPages
) return KERN_FAILURE
; /* They tried to map too many */
848 if(!cnt
) return KERN_SUCCESS
; /* If they said none, we're done... */
850 lst
= (vmmMList
*)&((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
851 lstx
= (vmmMList64
*)&((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
853 for(i
= 0; i
< cnt
; i
++) { /* Step and release all pages in list */
854 if(flavor
) { /* Check if 32- or 64-bit addresses */
855 cva
= lstx
[i
].vmlva
; /* Get the 64-bit actual address */
856 ava
= lstx
[i
].vmlava
; /* Get the 64-bit guest address */
859 cva
= lst
[i
].vmlva
; /* Get the 32-bit actual address */
860 ava
= lst
[i
].vmlava
; /* Get the 32-bit guest address */
863 prot
= ava
& vmmlProt
; /* Extract the protection bits */
864 adsp
= (ava
& vmmlAdID
) >> 4; /* Extract an explicit address space request */
865 if(!adsp
) adsp
= index
- 1; /* If no explicit, use supplied default */
866 ava
= ava
&= 0xFFFFFFFFFFFFF000ULL
; /* Clean up the address */
868 ret
= vmm_map_page(act
, index
, cva
, ava
, prot
); /* Go try to map the page on in */
869 if(ret
!= KERN_SUCCESS
) return ret
; /* Bail if any error */
872 return KERN_SUCCESS
; /* Return... */
875 /*-----------------------------------------------------------------------
876 ** vmm_get_page_mapping
878 ** This function determines whether the specified VMM
879 ** virtual address is mapped.
882 ** act - pointer to current thread activation
883 ** index - index of vmm state for this page
884 ** va - virtual address within the alternate's address
888 ** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
891 ** If there are aliases to the page in the non-alternate address space,
892 ** this call could return the wrong one. Moral of the story: no aliases.
893 -----------------------------------------------------------------------*/
895 addr64_t
vmm_get_page_mapping(
900 vmmCntrlEntry
*CEntry
;
901 register mapping
*mp
;
903 addr64_t nextva
, sva
;
906 pmap
= vmm_get_adsp(act
, index
); /* Get and validate the index */
907 if (!pmap
)return -1; /* No good, failure... */
909 mp
= mapping_find(pmap
, va
, &nextva
, 0); /* Find our page */
911 if(!mp
) return -1; /* Not mapped, return -1 */
913 pa
= mp
->mpPAddr
; /* Remember the page address */
915 mapping_drop_busy(mp
); /* Go ahead and relase the mapping now */
917 pmap
= current_act()->map
->pmap
; /* Get the current pmap */
918 sva
= mapping_p2v(pmap
, pa
); /* Now find the source virtual */
920 if(sva
!= 0) return sva
; /* We found it... */
922 panic("vmm_get_page_mapping: could not back-map alternate va (%016llX)\n", va
); /* We are bad wrong if we can't find it */
927 /*-----------------------------------------------------------------------
930 ** This function unmaps a page from the alternate's logical
934 ** act - pointer to current thread activation
935 ** index - index of vmm state for this page
936 ** va - virtual address within the vmm's address
940 ** kernel return code indicating success or failure
941 -----------------------------------------------------------------------*/
943 kern_return_t
vmm_unmap_page(
948 vmmCntrlEntry
*CEntry
;
951 kern_return_t kern_result
= KERN_SUCCESS
;
953 pmap
= vmm_get_adsp(act
, index
); /* Get and validate the index */
954 if (!pmap
)return -1; /* No good, failure... */
956 nadd
= mapping_remove(pmap
, va
); /* Toss the mapping */
958 return ((nadd
& 1) ? KERN_FAILURE
: KERN_SUCCESS
); /* Return... */
961 /*-----------------------------------------------------------------------
964 ** This function unmaps a list of pages from the alternate's logical
968 ** act - pointer to current thread activation
969 ** index - index of vmm state for this page
970 ** count - number of pages to release
971 ** flavor - 0 if 32-bit, 1 if 64-bit
972 ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
975 ** kernel return code indicating success or failure
976 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
977 -----------------------------------------------------------------------*/
979 kern_return_t
vmm_unmap_list(
985 vmmCntrlEntry
*CEntry
;
987 kern_return_t kern_result
= KERN_SUCCESS
;
988 unsigned int *pgaddr
, i
;
995 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
996 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
998 if(cnt
> kVmmMaxUnmapPages
) return KERN_FAILURE
; /* They tried to unmap too many */
999 if(!cnt
) return KERN_SUCCESS
; /* If they said none, we're done... */
1001 lst
= (vmmUMList
*)lstx
= (vmmUMList64
*) &((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
1003 for(i
= 0; i
< cnt
; i
++) { /* Step and release all pages in list */
1004 if(flavor
) { /* Check if 32- or 64-bit addresses */
1005 gva
= lstx
[i
].vmlava
; /* Get the 64-bit guest address */
1008 gva
= lst
[i
].vmlava
; /* Get the 32-bit guest address */
1011 adsp
= (gva
& vmmlAdID
) >> 4; /* Extract an explicit address space request */
1012 if(!adsp
) adsp
= index
- 1; /* If no explicit, use supplied default */
1013 pmap
= act
->mact
.vmmControl
->vmmAdsp
[adsp
]; /* Get the pmap for this request */
1014 if(!pmap
) continue; /* Ain't nuthin' mapped here, no durn map... */
1016 gva
= gva
&= 0xFFFFFFFFFFFFF000ULL
; /* Clean up the address */
1017 (void)mapping_remove(pmap
, gva
); /* Toss the mapping */
1020 return KERN_SUCCESS
; /* Return... */
1023 /*-----------------------------------------------------------------------
1024 ** vmm_unmap_all_pages
1026 ** This function unmaps all pages from the alternates's logical
1030 ** act - pointer to current thread activation
1031 ** index - index of context state
1037 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
1038 -----------------------------------------------------------------------*/
1040 void vmm_unmap_all_pages(
1042 vmm_adsp_id_t index
)
1044 vmmCntrlEntry
*CEntry
;
1047 pmap
= vmm_get_adsp(act
, index
); /* Convert index to entry */
1048 if (!pmap
) return; /* Either this isn't vmm thread or the index is bogus */
1051 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
1053 mapping_remove(pmap
, 0xFFFFFFFFFFFFF000LL
); /* Remove final page explicitly because we might have mapped it */
1054 pmap_remove(pmap
, 0, 0xFFFFFFFFFFFFF000LL
); /* Remove all entries from this map */
1059 /*-----------------------------------------------------------------------
1060 ** vmm_get_page_dirty_flag
1062 ** This function returns the changed flag of the page
1063 ** and optionally clears clears the flag.
1066 ** act - pointer to current thread activation
1067 ** index - index of vmm state for this page
1068 ** va - virtual address within the vmm's address
1070 ** reset - Clears dirty if true, untouched if not
1074 ** clears the dirty bit in the pte if requested
1077 ** The RC bits are merged into the global physical entry
1078 -----------------------------------------------------------------------*/
1080 boolean_t
vmm_get_page_dirty_flag(
1082 vmm_adsp_id_t index
,
1086 vmmCntrlEntry
*CEntry
;
1087 register mapping
*mpv
, *mp
;
1091 pmap
= vmm_get_adsp(act
, index
); /* Convert index to entry */
1092 if (!pmap
) return 1; /* Either this isn't vmm thread or the index is bogus */
1094 RC
= hw_test_rc(pmap
, (addr64_t
)va
, reset
); /* Fetch the RC bits and clear if requested */
1096 switch (RC
& mapRetCode
) { /* Decode return code */
1098 case mapRtOK
: /* Changed */
1099 return ((RC
& (unsigned int)mpC
) == (unsigned int)mpC
); /* Return if dirty or not */
1102 case mapRtNotFnd
: /* Didn't find it */
1103 return 1; /* Return dirty */
1107 panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC
, pmap
, va
);
1111 return 1; /* Return the change bit */
1115 /*-----------------------------------------------------------------------
1118 ** This function sets the protection bits of a mapped page
1121 ** act - pointer to current thread activation
1122 ** index - index of vmm state for this page
1123 ** va - virtual address within the vmm's address
1125 ** prot - Protection flags
1129 ** Protection bits of the mapping are modifed
1131 -----------------------------------------------------------------------*/
1133 kern_return_t
vmm_protect_page(
1135 vmm_adsp_id_t index
,
1139 vmmCntrlEntry
*CEntry
;
1144 pmap
= vmm_get_adsp(act
, index
); /* Convert index to entry */
1145 if (!pmap
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1147 ret
= hw_protect(pmap
, va
, prot
, &nextva
); /* Try to change the protect here */
1149 switch (ret
) { /* Decode return code */
1151 case mapRtOK
: /* All ok... */
1152 break; /* Outta here */
1154 case mapRtNotFnd
: /* Didn't find it */
1155 return KERN_SUCCESS
; /* Ok, return... */
1159 panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret
, pmap
, (addr64_t
)va
);
1163 if (!((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
)) {
1164 act
->mact
.vmmControl
->vmmLastMap
= va
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
1165 act
->mact
.vmmControl
->vmmGFlags
= (act
->mact
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | index
; /* Remember last address space */
1168 return KERN_SUCCESS
; /* Return */
1172 /*-----------------------------------------------------------------------
1173 ** vmm_protect_execute
1175 ** This function sets the protection bits of a mapped page
1176 ** and then directly starts executing.
1178 ** See description of vmm_protect_page for details
1181 ** See vmm_protect_page and vmm_map_execute
1184 ** Normal exit is to run the VM. Abnormal exit is triggered via a
1185 ** non-KERN_SUCCESS return from vmm_map_page or later during the
1186 ** attempt to transition into the VM.
1187 -----------------------------------------------------------------------*/
1189 vmm_return_code_t
vmm_protect_execute(
1191 vmm_thread_index_t index
,
1196 vmmCntrlEntry
*CEntry
;
1198 vmm_thread_index_t cndx
;
1200 cndx
= index
& 0xFF; /* Clean it up */
1201 CEntry
= vmm_get_entry(act
, cndx
); /* Get and validate the index */
1202 if (CEntry
== NULL
) return kVmmBogusContext
; /* Return bogus context */
1204 adsp
= (index
>> 8) & 0xFF; /* Get any requested address space */
1205 if(!adsp
) adsp
= (index
& 0xFF); /* If 0, use context ID as address space ID */
1207 if (((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
) && (CEntry
!= act
->mact
.vmmCEntry
))
1208 return kVmmBogusContext
; /* Yes, invalid index in Fam */
1210 ret
= vmm_protect_page(act
, adsp
, va
, prot
); /* Go try to change access */
1212 if(ret
== KERN_SUCCESS
) {
1213 act
->mact
.vmmControl
->vmmLastMap
= va
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
1214 act
->mact
.vmmControl
->vmmGFlags
= (act
->mact
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | cndx
; /* Remember last address space */
1215 vmm_execute_vm(act
, cndx
); /* Return was ok, launch the VM */
1218 return ret
; /* We had trouble of some kind (shouldn't happen) */
1223 /*-----------------------------------------------------------------------
1224 ** vmm_get_float_state
1226 ** This function causes the current floating point state to
1227 ** be saved into the shared context area. It also clears the
1228 ** vmmFloatCngd changed flag.
1231 ** act - pointer to current thread activation structure
1232 ** index - index returned by vmm_init_context
1236 -----------------------------------------------------------------------*/
1238 kern_return_t
vmm_get_float_state(
1240 vmm_thread_index_t index
)
1242 vmmCntrlEntry
*CEntry
;
1243 vmmCntrlTable
*CTable
;
1245 register struct savearea_fpu
*sv
;
1247 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1248 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1250 act
->mact
.specFlags
&= ~floatCng
; /* Clear the special flag */
1251 CEntry
->vmmContextKern
->vmmStat
&= ~vmmFloatCngd
; /* Clear the change indication */
1253 fpu_save(&CEntry
->vmmFacCtx
); /* Save context if live */
1255 if(sv
= CEntry
->vmmFacCtx
.FPUsave
) { /* Is there context yet? */
1256 bcopy((char *)&sv
->save_fp0
, (char *)&(CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
), 32 * 8); /* 32 registers */
1257 return KERN_SUCCESS
;
1261 for(i
= 0; i
< 32; i
++) { /* Initialize floating points */
1262 CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
[i
].d
= FloatInit
; /* Initial value */
1265 return KERN_SUCCESS
;
1268 /*-----------------------------------------------------------------------
1269 ** vmm_get_vector_state
1271 ** This function causes the current vector state to
1272 ** be saved into the shared context area. It also clears the
1273 ** vmmVectorCngd changed flag.
1276 ** act - pointer to current thread activation structure
1277 ** index - index returned by vmm_init_context
1281 -----------------------------------------------------------------------*/
1283 kern_return_t
vmm_get_vector_state(
1285 vmm_thread_index_t index
)
1287 vmmCntrlEntry
*CEntry
;
1288 vmmCntrlTable
*CTable
;
1290 unsigned int vrvalidwrk
;
1291 register struct savearea_vec
*sv
;
1293 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1294 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1296 vec_save(&CEntry
->vmmFacCtx
); /* Save context if live */
1298 act
->mact
.specFlags
&= ~vectorCng
; /* Clear the special flag */
1299 CEntry
->vmmContextKern
->vmmStat
&= ~vmmVectCngd
; /* Clear the change indication */
1301 if(sv
= CEntry
->vmmFacCtx
.VMXsave
) { /* Is there context yet? */
1303 vrvalidwrk
= sv
->save_vrvalid
; /* Get the valid flags */
1305 for(i
= 0; i
< 32; i
++) { /* Copy the saved registers and invalidate the others */
1306 if(vrvalidwrk
& 0x80000000) { /* Do we have a valid value here? */
1307 for(j
= 0; j
< 4; j
++) { /* If so, copy it over */
1308 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = ((unsigned int *)&(sv
->save_vr0
))[(i
* 4) + j
];
1312 for(j
= 0; j
< 4; j
++) { /* Otherwise set to empty value */
1313 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
];
1317 vrvalidwrk
= vrvalidwrk
<< 1; /* Shift over to the next */
1321 return KERN_SUCCESS
;
1324 for(i
= 0; i
< 32; i
++) { /* Initialize vector registers */
1325 for(j
=0; j
< 4; j
++) { /* Do words */
1326 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
]; /* Initial value */
1330 return KERN_SUCCESS
;
1333 /*-----------------------------------------------------------------------
1336 ** This function causes a timer (in AbsoluteTime) for a specific time
1337 ** to be set It also clears the vmmTimerPop flag if the timer is actually
1338 ** set, it is cleared otherwise.
1340 ** A timer is cleared by setting setting the time to 0. This will clear
1341 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
1342 ** current time clears the internal timer request, but leaves the
1343 ** vmmTimerPop flag set.
1347 ** act - pointer to current thread activation structure
1348 ** index - index returned by vmm_init_context
1349 ** timerhi - high order word of AbsoluteTime to pop
1350 ** timerlo - low order word of AbsoluteTime to pop
1353 ** timer set, vmmTimerPop cleared
1354 -----------------------------------------------------------------------*/
1356 kern_return_t
vmm_set_timer(
1358 vmm_thread_index_t index
,
1359 unsigned int timerhi
,
1360 unsigned int timerlo
)
1362 vmmCntrlEntry
*CEntry
;
1364 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1365 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1367 CEntry
->vmmTimer
= ((uint64_t)timerhi
<< 32) | timerlo
;
1369 vmm_timer_pop(act
); /* Go adjust all of the timer stuff */
1370 return KERN_SUCCESS
; /* Leave now... */
1374 /*-----------------------------------------------------------------------
1377 ** This function causes the timer for a specified VM to be
1378 ** returned in return_params[0] and return_params[1].
1379 ** Note that this is kind of funky for 64-bit VMs because we
1380 ** split the timer into two parts so that we still set parms 0 and 1.
1381 ** Obviously, we don't need to do this because the parms are 8 bytes
1386 ** act - pointer to current thread activation structure
1387 ** index - index returned by vmm_init_context
1390 ** Timer value set in return_params[0] and return_params[1].
1391 ** Set to 0 if timer is not set.
1392 -----------------------------------------------------------------------*/
1394 kern_return_t
vmm_get_timer(
1396 vmm_thread_index_t index
)
1398 vmmCntrlEntry
*CEntry
;
1399 vmmCntrlTable
*CTable
;
1401 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1402 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1404 if(CEntry
->vmmXAFlgs
& vmm64Bit
) { /* A 64-bit virtual machine? */
1405 CEntry
->vmmContextKern
->vmmRet
.vmmrp64
.return_params
[0] = (uint32_t)(CEntry
->vmmTimer
>> 32); /* Return the last timer value */
1406 CEntry
->vmmContextKern
->vmmRet
.vmmrp64
.return_params
[1] = (uint32_t)CEntry
->vmmTimer
; /* Return the last timer value */
1409 CEntry
->vmmContextKern
->vmmRet
.vmmrp32
.return_params
[0] = (CEntry
->vmmTimer
>> 32); /* Return the last timer value */
1410 CEntry
->vmmContextKern
->vmmRet
.vmmrp32
.return_params
[1] = (uint32_t)CEntry
->vmmTimer
; /* Return the last timer value */
1412 return KERN_SUCCESS
;
1416 /*-----------------------------------------------------------------------
1419 ** This function causes all timers in the array of VMs to be updated.
1420 ** All appropriate flags are set or reset. If a VM is currently
1421 ** running and its timer expired, it is intercepted.
1423 ** The qactTimer value is set to the lowest unexpired timer. It is
1424 ** zeroed if all timers are expired or have been reset.
1427 ** act - pointer to current thread activation structure
1430 ** timers set, vmmTimerPop cleared or set
1431 -----------------------------------------------------------------------*/
1436 vmmCntrlEntry
*CEntry
;
1437 vmmCntrlTable
*CTable
;
1439 uint64_t now
, soonest
;
1442 if(!((unsigned int)act
->mact
.vmmControl
& 0xFFFFFFFE)) { /* Are there any virtual machines? */
1443 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act
);
1446 soonest
= 0xFFFFFFFFFFFFFFFFULL
; /* Max time */
1448 clock_get_uptime(&now
); /* What time is it? */
1450 CTable
= act
->mact
.vmmControl
; /* Make this easier */
1451 any
= 0; /* Haven't found a running unexpired timer yet */
1453 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Cycle through all and check time now */
1455 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) continue; /* Do not check if the entry is empty */
1457 if(CTable
->vmmc
[cvi
].vmmTimer
== 0) { /* Is the timer reset? */
1458 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Clear timer popped */
1459 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Clear timer popped */
1460 continue; /* Check next */
1463 if (CTable
->vmmc
[cvi
].vmmTimer
<= now
) {
1464 CTable
->vmmc
[cvi
].vmmFlags
|= vmmTimerPop
; /* Set timer popped here */
1465 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
|= vmmTimerPop
; /* Set timer popped here */
1466 if((unsigned int)&CTable
->vmmc
[cvi
] == (unsigned int)act
->mact
.vmmCEntry
) { /* Is this the running VM? */
1467 sv
= find_user_regs(act
); /* Get the user state registers */
1468 if(!sv
) { /* Did we find something? */
1469 panic("vmm_timer_pop: no user context; act = %08X\n", act
);
1471 sv
->save_exception
= kVmmReturnNull
*4; /* Indicate that this is a null exception */
1472 vmm_force_exit(act
, sv
); /* Intercept a running VM */
1474 continue; /* Check the rest */
1476 else { /* It hasn't popped yet */
1477 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Set timer not popped here */
1478 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Set timer not popped here */
1481 any
= 1; /* Show we found an active unexpired timer */
1483 if (CTable
->vmmc
[cvi
].vmmTimer
< soonest
)
1484 soonest
= CTable
->vmmc
[cvi
].vmmTimer
;
1488 if (act
->mact
.qactTimer
== 0 || soonest
<= act
->mact
.qactTimer
)
1489 act
->mact
.qactTimer
= soonest
; /* Set lowest timer */
1497 /*-----------------------------------------------------------------------
1500 ** This function prevents the specified VM(s) to from running.
1501 ** If any is currently executing, the execution is intercepted
1502 ** with a code of kVmmStopped. Note that execution of the VM is
1503 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
1504 ** This provides the ability for a thread to stop execution of a VM and
1505 ** insure that it will not be run until the emulator has processed the
1506 ** "virtual" interruption.
1509 ** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1510 ** NOTE: if this mask is all 0s, any executing VM is intercepted with
1511 * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1512 ** note that there is a potential race here and the VM may not stop.
1515 ** kernel return code indicating success
1516 ** or if no VMs are enabled, an invalid syscall exception.
1517 -----------------------------------------------------------------------*/
1519 int vmm_stop_vm(struct savearea
*save
)
1523 vmmCntrlTable
*CTable
;
1527 unsigned int vmmask
;
1528 ReturnHandler
*stopapc
;
1530 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
1532 task
= current_task(); /* Figure out who we are */
1534 task_lock(task
); /* Lock our task */
1536 fact
= (thread_act_t
)task
->threads
.next
; /* Get the first activation on task */
1537 act
= 0; /* Pretend we didn't find it yet */
1539 for(i
= 0; i
< task
->thread_count
; i
++) { /* All of the activations */
1540 if(fact
->mact
.vmmControl
) { /* Is this a virtual machine monitor? */
1541 act
= fact
; /* Yeah... */
1542 break; /* Bail the loop... */
1544 fact
= (thread_act_t
)fact
->task_threads
.next
; /* Go to the next one */
1547 if(!((unsigned int)act
)) { /* See if we have VMMs yet */
1548 task_unlock(task
); /* No, unlock the task */
1549 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1550 return 0; /* Go generate a syscall exception */
1553 act_lock_thread(act
); /* Make sure this stays 'round */
1554 task_unlock(task
); /* Safe to release now */
1556 CTable
= act
->mact
.vmmControl
; /* Get the pointer to the table */
1558 if(!((unsigned int)CTable
& -2)) { /* Are there any all the way up yet? */
1559 act_unlock_thread(act
); /* Unlock the activation */
1560 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1561 return 0; /* Go generate a syscall exception */
1564 if(!(vmmask
= save
->save_r3
)) { /* Get the stop mask and check if all zeros */
1565 act_unlock_thread(act
); /* Unlock the activation */
1566 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1567 save
->save_r3
= KERN_SUCCESS
; /* Set success */
1568 return 1; /* Return... */
1571 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Search slots */
1572 if((0x80000000 & vmmask
) && (CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) { /* See if we need to stop and if it is in use */
1573 hw_atomic_or(&CTable
->vmmc
[cvi
].vmmFlags
, vmmXStop
); /* Set this one to stop */
1575 vmmask
= vmmask
<< 1; /* Slide mask over */
1578 if(hw_compare_and_store(0, 1, &act
->mact
.emPendRupts
)) { /* See if there is already a stop pending and lock out others if not */
1579 act_unlock_thread(act
); /* Already one pending, unlock the activation */
1580 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1581 save
->save_r3
= KERN_SUCCESS
; /* Say we did it... */
1582 return 1; /* Leave */
1585 if(!(stopapc
= (ReturnHandler
*)kalloc(sizeof(ReturnHandler
)))) { /* Get a return handler control block */
1586 act
->mact
.emPendRupts
= 0; /* No memory, say we have given up request */
1587 act_unlock_thread(act
); /* Unlock the activation */
1588 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1589 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No storage... */
1590 return 1; /* Return... */
1593 ml_set_interrupts_enabled(FALSE
); /* Disable interruptions for now */
1595 stopapc
->handler
= vmm_interrupt
; /* Set interruption routine */
1597 stopapc
->next
= act
->handlers
; /* Put our interrupt at the start of the list */
1598 act
->handlers
= stopapc
; /* Point to us */
1600 act_set_apc(act
); /* Set an APC AST */
1601 ml_set_interrupts_enabled(TRUE
); /* Enable interruptions now */
1603 act_unlock_thread(act
); /* Unlock the activation */
1605 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1606 save
->save_r3
= KERN_SUCCESS
; /* Hip, hip, horay... */
1610 /*-----------------------------------------------------------------------
1613 ** This function is executed asynchronously from an APC AST.
1614 ** It is to be used for anything that needs to interrupt a running VM.
1615 ** This include any kind of interruption generation (other than timer pop)
1616 ** or entering the stopped state.
1619 ** ReturnHandler *rh - the return handler control block as required by the APC.
1620 ** thread_act_t act - the activation
1623 ** Whatever needed to be done is done.
1624 -----------------------------------------------------------------------*/
1626 void vmm_interrupt(ReturnHandler
*rh
, thread_act_t act
) {
1628 vmmCntrlTable
*CTable
;
1634 kfree((vm_offset_t
)rh
, sizeof(ReturnHandler
)); /* Release the return handler block */
1636 inter
= ml_set_interrupts_enabled(FALSE
); /* Disable interruptions for now */
1638 act
->mact
.emPendRupts
= 0; /* Say that there are no more interrupts pending */
1639 CTable
= act
->mact
.vmmControl
; /* Get the pointer to the table */
1641 if(!((unsigned int)CTable
& -2)) return; /* Leave if we aren't doing VMs any more... */
1643 if(act
->mact
.vmmCEntry
&& (act
->mact
.vmmCEntry
->vmmFlags
& vmmXStop
)) { /* Do we need to stop the running guy? */
1644 sv
= find_user_regs(act
); /* Get the user state registers */
1645 if(!sv
) { /* Did we find something? */
1646 panic("vmm_interrupt: no user context; act = %08X\n", act
);
1648 sv
->save_exception
= kVmmStopped
*4; /* Set a "stopped" exception */
1649 vmm_force_exit(act
, sv
); /* Intercept a running VM */
1651 ml_set_interrupts_enabled(inter
); /* Put interrupts back to what they were */