2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
25 /*-----------------------------------------------------------------------
28 ** C routines that we are adding to the MacOS X kernel.
30 -----------------------------------------------------------------------*/
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/host_info.h>
35 #include <kern/kern_types.h>
36 #include <kern/host.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/thread_act.h>
40 #include <ppc/exception.h>
41 #include <ppc/mappings.h>
42 #include <ppc/thread_act.h>
43 #include <vm/vm_kern.h>
45 #include <ppc/vmachmon.h>
47 extern struct Saveanchor saveanchor
; /* Aligned savearea anchor */
48 extern double FloatInit
;
49 extern unsigned long QNaNbarbarian
[4];
51 /*************************************************************************************
52 Virtual Machine Monitor Internal Routines
53 **************************************************************************************/
55 /*-----------------------------------------------------------------------
58 ** This function verifies and return a vmm context entry index
61 ** act - pointer to current thread activation
62 ** index - index into vmm control table (this is a "one based" value)
65 ** address of a vmmCntrlEntry or 0 if not found
66 -----------------------------------------------------------------------*/
68 vmmCntrlEntry
*vmm_get_entry(
70 vmm_thread_index_t index
)
72 vmmCntrlTable
*CTable
;
73 vmmCntrlEntry
*CEntry
;
75 index
= index
& vmmTInum
; /* Clean up the index */
77 if (act
->mact
.vmmControl
== 0) return NULL
; /* No control table means no vmm */
78 if ((index
- 1) >= kVmmMaxContexts
) return NULL
; /* Index not in range */
80 CTable
= act
->mact
.vmmControl
; /* Make the address a bit more convienient */
81 CEntry
= &CTable
->vmmc
[index
- 1]; /* Point to the entry */
83 if (!(CEntry
->vmmFlags
& vmmInUse
)) return NULL
; /* See if the slot is actually in use */
88 /*-----------------------------------------------------------------------
91 ** This function verifies and returns the pmap for an address space.
92 ** If there is none and the request is valid, a pmap will be created.
95 ** act - pointer to current thread activation
96 ** index - index into vmm control table (this is a "one based" value)
99 ** address of a pmap or 0 if not found or could no be created
100 ** Note that if there is no pmap for the address space it will be created.
101 -----------------------------------------------------------------------*/
103 pmap_t
vmm_get_adsp(thread_act_t act
, vmm_thread_index_t index
)
107 if (act
->mact
.vmmControl
== 0) return NULL
; /* No control table means no vmm */
108 if ((index
- 1) >= kVmmMaxContexts
) return NULL
; /* Index not in range */
110 pmap
= act
->mact
.vmmControl
->vmmAdsp
[index
- 1]; /* Get the pmap */
111 if(pmap
) return pmap
; /* We've got it... */
113 pmap
= pmap_create(0); /* Make a fresh one */
114 act
->mact
.vmmControl
->vmmAdsp
[index
- 1] = pmap
; /* Remember it */
116 * Note that if the create fails, we will return a null.
118 return pmap
; /* Return it... */
123 /*************************************************************************************
124 Virtual Machine Monitor Exported Functionality
126 The following routines are used to implement a quick-switch mechanism for
127 virtual machines that need to execute within their own processor envinroment
128 (including register and MMU state).
129 **************************************************************************************/
131 /*-----------------------------------------------------------------------
134 ** This function returns the current version of the virtual machine
135 ** interface. It is divided into two portions. The top 16 bits
136 ** represent the major version number, and the bottom 16 bits
137 ** represent the minor version number. Clients using the Vmm
138 ** functionality should make sure they are using a verison new
145 ** 32-bit number representing major/minor version of
147 -----------------------------------------------------------------------*/
149 int vmm_get_version(struct savearea
*save
)
151 save
->save_r3
= kVmmCurrentVersion
; /* Return the version */
156 /*-----------------------------------------------------------------------
159 ** This function returns a set of flags that represents the functionality
160 ** supported by the current verison of the Vmm interface. Clients should
161 ** use this to determine whether they can run on this system.
167 ** 32-bit number representing functionality supported by this
168 ** version of the Vmm module
169 -----------------------------------------------------------------------*/
171 int vmm_get_features(struct savearea
*save
)
173 save
->save_r3
= kVmmCurrentFeatures
; /* Return the features */
174 if(per_proc_info
->pf
.Available
& pf64Bit
) {
175 save
->save_r3
&= ~kVmmFeature_LittleEndian
; /* No little endian here */
176 save
->save_r3
|= kVmmFeature_SixtyFourBit
; /* Set that we can do 64-bit */
182 /*-----------------------------------------------------------------------
185 ** This function returns the maximum addressable virtual address sported
188 ** Returns max address
189 -----------------------------------------------------------------------*/
191 addr64_t
vmm_max_addr(thread_act_t act
)
193 return vm_max_address
; /* Return the maximum address */
196 /*-----------------------------------------------------------------------
199 ** This function retrieves the eXtended Architecture flags for the specifed VM.
201 ** We need to return the result in the return code rather than in the return parameters
202 ** because we need an architecture independent format so the results are actually
203 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
208 ** act - pointer to current thread activation structure
209 ** index - index returned by vmm_init_context
212 ** Return code is set to the XA flags. If the index is invalid or the
213 ** context has not been created, we return 0.
214 -----------------------------------------------------------------------*/
216 unsigned int vmm_get_XA(
218 vmm_thread_index_t index
)
220 vmmCntrlEntry
*CEntry
;
222 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
223 if (CEntry
== NULL
) return 0; /* Either this isn't a vmm or the index is bogus */
225 return CEntry
->vmmXAFlgs
; /* Return the flags */
228 /*-----------------------------------------------------------------------
231 ** This function initializes an emulation context. It allocates
232 ** a new pmap (address space) and fills in the initial processor
233 ** state within the specified structure. The structure, mapped
234 ** into the client's logical address space, must be page-aligned.
237 ** act - pointer to current thread activation
238 ** version - requested version of the Vmm interface (allowing
239 ** future versions of the interface to change, but still
240 ** support older clients)
241 ** vmm_user_state - pointer to a logical page within the
242 ** client's address space
245 ** kernel return code indicating success or failure
246 -----------------------------------------------------------------------*/
248 int vmm_init_context(struct savearea
*save
)
252 vmm_version_t version
;
253 vmm_state_page_t
* vmm_user_state
;
254 vmmCntrlTable
*CTable
;
256 vmm_state_page_t
* vks
;
262 thread_act_t fact
, gact
;
264 vmm_user_state
= CAST_DOWN(vmm_state_page_t
*, save
->save_r4
); /* Get the user address of the comm area */
265 if ((unsigned int)vmm_user_state
& (PAGE_SIZE
- 1)) { /* Make sure the comm area is page aligned */
266 save
->save_r3
= KERN_FAILURE
; /* Return failure */
270 /* Make sure that the version requested is supported */
271 version
= save
->save_r3
; /* Pick up passed in version */
272 if (((version
>> 16) < kVmmMinMajorVersion
) || ((version
>> 16) > (kVmmCurrentVersion
>> 16))) {
273 save
->save_r3
= KERN_FAILURE
; /* Return failure */
277 if((version
& 0xFFFF) > kVmmCurMinorVersion
) { /* Check for valid minor */
278 save
->save_r3
= KERN_FAILURE
; /* Return failure */
282 act
= current_act(); /* Pick up our activation */
284 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
286 task
= current_task(); /* Figure out who we are */
288 task_lock(task
); /* Lock our task */
290 fact
= (thread_act_t
)task
->threads
.next
; /* Get the first activation on task */
291 gact
= 0; /* Pretend we didn't find it yet */
293 for(i
= 0; i
< task
->thread_count
; i
++) { /* All of the activations */
294 if(fact
->mact
.vmmControl
) { /* Is this a virtual machine monitor? */
295 gact
= fact
; /* Yeah... */
296 break; /* Bail the loop... */
298 fact
= (thread_act_t
)fact
->task_threads
.next
; /* Go to the next one */
303 * We only allow one thread per task to be a virtual machine monitor right now. This solves
304 * a number of potential problems that I can't put my finger on right now.
306 * Utlimately, I think we want to move the controls and make all this task based instead of
307 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
308 * VM (if they want) rather than hand dispatch contexts.
311 if(gact
&& (gact
!= act
)) { /* Check if another thread is a vmm or trying to be */
312 task_unlock(task
); /* Release task lock */
313 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
314 save
->save_r3
= KERN_FAILURE
; /* We must play alone... */
318 if(!gact
) act
->mact
.vmmControl
= (vmmCntrlTable
*)1; /* Temporarily mark that we are the vmm thread */
320 task_unlock(task
); /* Safe to release now (because we've marked ourselves) */
322 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
323 if ((unsigned int)CTable
== 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
324 if(!(CTable
= (vmmCntrlTable
*)kalloc(sizeof(vmmCntrlTable
)))) { /* Get a fresh emulation control table */
325 act
->mact
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
326 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
327 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No storage... */
331 bzero((void *)CTable
, sizeof(vmmCntrlTable
)); /* Clean it up */
332 act
->mact
.vmmControl
= CTable
; /* Initialize the table anchor */
335 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Search to find a free slot */
336 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) break; /* Bail if we find an unused slot */
339 if(cvi
>= kVmmMaxContexts
) { /* Did we find one? */
340 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
341 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No empty slots... */
345 ret
= vm_map_wire( /* Wire the virtual machine monitor's context area */
347 (vm_offset_t
)vmm_user_state
,
348 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
349 VM_PROT_READ
| VM_PROT_WRITE
,
352 if (ret
!= KERN_SUCCESS
) /* The wire failed, return the code */
353 goto return_in_shame
;
355 /* Map the vmm state into the kernel's address space. */
356 conphys
= pmap_find_phys(act
->map
->pmap
, (addr64_t
)((uintptr_t)vmm_user_state
));
358 /* Find a virtual address to use. */
359 ret
= kmem_alloc_pageable(kernel_map
, &conkern
, PAGE_SIZE
);
360 if (ret
!= KERN_SUCCESS
) { /* Did we find an address? */
361 (void) vm_map_unwire(act
->map
, /* No, unwire the context area */
362 (vm_offset_t
)vmm_user_state
,
363 (vm_offset_t
)vmm_user_state
+ PAGE_SIZE
,
365 goto return_in_shame
;
368 /* Map it into the kernel's address space. */
370 pmap_enter(kernel_pmap
, conkern
, conphys
,
371 VM_PROT_READ
| VM_PROT_WRITE
,
372 VM_WIMG_USE_DEFAULT
, TRUE
);
374 /* Clear the vmm state structure. */
375 vks
= (vmm_state_page_t
*)conkern
;
376 bzero((char *)vks
, PAGE_SIZE
);
379 /* We're home free now. Simply fill in the necessary info and return. */
381 vks
->interface_version
= version
; /* Set our version code */
382 vks
->thread_index
= cvi
+ 1; /* Tell the user the index for this virtual machine */
384 CTable
->vmmc
[cvi
].vmmFlags
= vmmInUse
; /* Mark the slot in use and make sure the rest are clear */
385 CTable
->vmmc
[cvi
].vmmContextKern
= vks
; /* Remember the kernel address of comm area */
386 CTable
->vmmc
[cvi
].vmmContextPhys
= (vmm_state_page_t
*)conphys
; /* Remember the state page physical addr */
387 CTable
->vmmc
[cvi
].vmmContextUser
= vmm_user_state
; /* Remember user address of comm area */
389 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
390 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
391 CTable
->vmmc
[cvi
].vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
392 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
393 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
394 CTable
->vmmc
[cvi
].vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
395 CTable
->vmmc
[cvi
].vmmFacCtx
.facAct
= act
; /* Point back to the activation */
397 hw_atomic_add((int *)&saveanchor
.savetarget
, 2); /* Account for the number of extra saveareas we think we might "need" */
399 if (!(act
->map
->pmap
->pmapFlags
& pmapVMhost
)) {
400 simple_lock(&(act
->map
->pmap
->lock
));
401 act
->map
->pmap
->pmapFlags
|= pmapVMhost
;
402 simple_unlock(&(act
->map
->pmap
->lock
));
405 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
406 save
->save_r3
= KERN_SUCCESS
; /* Hip, hip, horay... */
410 if(!gact
) kfree((vm_offset_t
)CTable
, sizeof(vmmCntrlTable
)); /* Toss the table if we just allocated it */
411 act
->mact
.vmmControl
= 0; /* Unmark us as vmm 'cause we failed */
412 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
413 save
->save_r3
= ret
; /* Pass back return code... */
419 /*-----------------------------------------------------------------------
420 ** vmm_tear_down_context
422 ** This function uninitializes an emulation context. It deallocates
423 ** internal resources associated with the context block.
426 ** act - pointer to current thread activation structure
427 ** index - index returned by vmm_init_context
430 ** kernel return code indicating success or failure
433 ** This call will also trash the address space with the same ID. While this
434 ** is really not too cool, we have to do it because we need to make
435 ** sure that old VMM users (not that we really have any) who depend upon
436 ** the address space going away with the context still work the same.
437 -----------------------------------------------------------------------*/
439 kern_return_t
vmm_tear_down_context(
441 vmm_thread_index_t index
)
443 vmmCntrlEntry
*CEntry
;
444 vmmCntrlTable
*CTable
;
446 register savearea
*sv
;
448 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
449 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
451 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
453 hw_atomic_sub((int *)&saveanchor
.savetarget
, 2); /* We don't need these extra saveareas anymore */
455 if(CEntry
->vmmFacCtx
.FPUsave
) { /* Is there any floating point context? */
456 toss_live_fpu(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
457 save_release((savearea
*)CEntry
->vmmFacCtx
.FPUsave
); /* Release it */
460 if(CEntry
->vmmFacCtx
.VMXsave
) { /* Is there any vector context? */
461 toss_live_vec(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
462 save_release((savearea
*)CEntry
->vmmFacCtx
.VMXsave
); /* Release it */
465 CEntry
->vmmPmap
= 0; /* Remove this trace */
466 if(act
->mact
.vmmControl
->vmmAdsp
[index
- 1]) { /* Check if there is an address space assigned here */
467 mapping_remove(act
->mact
.vmmControl
->vmmAdsp
[index
- 1], 0xFFFFFFFFFFFFF000LL
); /* Remove final page explicitly because we might have mapped it */
468 pmap_remove(act
->mact
.vmmControl
->vmmAdsp
[index
- 1], 0, 0xFFFFFFFFFFFFF000LL
); /* Remove all entries from this map */
469 pmap_destroy(act
->mact
.vmmControl
->vmmAdsp
[index
- 1]); /* Toss the pmap for this context */
470 act
->mact
.vmmControl
->vmmAdsp
[index
- 1] = NULL
; /* Clean it up */
473 (void) vm_map_unwire( /* Unwire the user comm page */
475 (vm_offset_t
)CEntry
->vmmContextUser
,
476 (vm_offset_t
)CEntry
->vmmContextUser
+ PAGE_SIZE
,
479 kmem_free(kernel_map
, (vm_offset_t
)CEntry
->vmmContextKern
, PAGE_SIZE
); /* Remove kernel's view of the comm page */
481 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
482 CTable
->vmmGFlags
= CTable
->vmmGFlags
& ~vmmLastAdSp
; /* Make sure we don't try to automap into this */
484 CEntry
->vmmFlags
= 0; /* Clear out all of the flags for this entry including in use */
485 CEntry
->vmmContextKern
= 0; /* Clear the kernel address of comm area */
486 CEntry
->vmmContextUser
= 0; /* Clear the user address of comm area */
488 CEntry
->vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
489 CEntry
->vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
490 CEntry
->vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
491 CEntry
->vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
492 CEntry
->vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
493 CEntry
->vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
494 CEntry
->vmmFacCtx
.facAct
= 0; /* Clear facility context control */
496 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Search to find a free slot */
497 if(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
) { /* Return if there are still some in use */
498 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
499 return KERN_SUCCESS
; /* Leave... */
504 * When we have tossed the last context, toss any address spaces left over before releasing
505 * the VMM control block
508 for(cvi
= 1; cvi
<= kVmmMaxContexts
; cvi
++) { /* Look at all slots */
509 if(!act
->mact
.vmmControl
->vmmAdsp
[index
- 1]) continue; /* Nothing to remove here */
510 mapping_remove(act
->mact
.vmmControl
->vmmAdsp
[index
- 1], 0xFFFFFFFFFFFFF000LL
); /* Remove final page explicitly because we might have mapped it */
511 pmap_remove(act
->mact
.vmmControl
->vmmAdsp
[index
- 1], 0, 0xFFFFFFFFFFFFF000LL
); /* Remove all entries from this map */
512 pmap_destroy(act
->mact
.vmmControl
->vmmAdsp
[index
- 1]); /* Toss the pmap for this context */
513 act
->mact
.vmmControl
->vmmAdsp
[index
- 1] = 0; /* Clear just in case */
516 kfree((vm_offset_t
)CTable
, sizeof(vmmCntrlTable
)); /* Toss the table because to tossed the last context */
517 act
->mact
.vmmControl
= 0; /* Unmark us as vmm */
519 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
525 /*-----------------------------------------------------------------------
528 ** This function sets the eXtended Architecture flags for the specifed VM.
530 ** We need to return the result in the return code rather than in the return parameters
531 ** because we need an architecture independent format so the results are actually
532 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
535 ** Note that this function does a lot of the same stuff as vmm_tear_down_context
536 ** and vmm_init_context.
539 ** act - pointer to current thread activation structure
540 ** index - index returned by vmm_init_context
541 ** flags - the extended architecture flags
545 ** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
546 ** Also, the internal flags are set and, additionally, the VM is completely reset.
547 -----------------------------------------------------------------------*/
549 kern_return_t
vmm_set_XA(
551 vmm_thread_index_t index
,
552 unsigned int xaflags
)
554 vmmCntrlEntry
*CEntry
;
555 vmmCntrlTable
*CTable
;
556 vmm_state_page_t
*vks
;
557 vmm_version_t version
;
559 if(xaflags
& ~vmm64Bit
) return KERN_FAILURE
; /* We only support this one kind now */
561 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
562 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
564 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
566 if(CEntry
->vmmFacCtx
.FPUsave
) { /* Is there any floating point context? */
567 toss_live_fpu(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
568 save_release((savearea
*)CEntry
->vmmFacCtx
.FPUsave
); /* Release it */
571 if(CEntry
->vmmFacCtx
.VMXsave
) { /* Is there any vector context? */
572 toss_live_vec(&CEntry
->vmmFacCtx
); /* Get rid of any live context here */
573 save_release((savearea
*)CEntry
->vmmFacCtx
.VMXsave
); /* Release it */
576 CTable
= act
->mact
.vmmControl
; /* Get the control table address */
577 CTable
->vmmGFlags
= CTable
->vmmGFlags
& ~vmmLastAdSp
; /* Make sure we don't try to automap into this */
579 CEntry
->vmmFlags
&= vmmInUse
; /* Clear out all of the flags for this entry except in use */
580 CEntry
->vmmXAFlgs
= (xaflags
& vmm64Bit
) | (CEntry
->vmmXAFlgs
& ~vmm64Bit
); /* Set the XA flags */
581 CEntry
->vmmFacCtx
.FPUsave
= 0; /* Clear facility context control */
582 CEntry
->vmmFacCtx
.FPUlevel
= 0; /* Clear facility context control */
583 CEntry
->vmmFacCtx
.FPUcpu
= 0; /* Clear facility context control */
584 CEntry
->vmmFacCtx
.VMXsave
= 0; /* Clear facility context control */
585 CEntry
->vmmFacCtx
.VMXlevel
= 0; /* Clear facility context control */
586 CEntry
->vmmFacCtx
.VMXcpu
= 0; /* Clear facility context control */
588 vks
= CEntry
->vmmContextKern
; /* Get address of the context page */
589 version
= vks
->interface_version
; /* Save the version code */
590 bzero((char *)vks
, 4096); /* Clear all */
592 vks
->interface_version
= version
; /* Set our version code */
593 vks
->thread_index
= index
% vmmTInum
; /* Tell the user the index for this virtual machine */
595 ml_set_interrupts_enabled(FALSE
); /* No more interruptions */
597 return KERN_SUCCESS
; /* Return the flags */
601 /*-----------------------------------------------------------------------
604 ** This function uninitializes all emulation contexts. If there are
605 ** any vmm contexts, it calls vmm_tear_down_context for each one.
607 ** Note: this can also be called from normal thread termination. Because of
608 ** that, we will context switch out of an alternate if we are currenty in it.
609 ** It will be terminated with no valid return code set because we don't expect
610 ** the activation to ever run again.
613 ** activation to tear down
616 ** All vmm contexts released and VMM shut down
617 -----------------------------------------------------------------------*/
618 void vmm_tear_down_all(thread_act_t act
) {
620 vmmCntrlTable
*CTable
;
626 if(act
->mact
.specFlags
& runningVM
) { /* Are we actually in a context right now? */
627 save
= find_user_regs(act
); /* Find the user state context */
628 if(!save
) { /* Did we find it? */
629 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
633 save
->save_exception
= kVmmBogusContext
*4; /* Indicate that this context is bogus now */
634 s
= splhigh(); /* Make sure interrupts are off */
635 vmm_force_exit(act
, save
); /* Force and exit from VM state */
636 splx(s
); /* Restore interrupts */
639 if(CTable
= act
->mact
.vmmControl
) { /* Do we have a vmm control block? */
642 for(cvi
= 1; cvi
<= kVmmMaxContexts
; cvi
++) { /* Look at all slots */
643 if(CTable
->vmmc
[cvi
- 1].vmmFlags
& vmmInUse
) { /* Is this one in use */
644 ret
= vmm_tear_down_context(act
, cvi
); /* Take down the found context */
645 if(ret
!= KERN_SUCCESS
) { /* Did it go away? */
646 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
653 * Note that all address apces should be gone here.
655 if(act
->mact
.vmmControl
) { /* Did we find one? */
656 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
663 /*-----------------------------------------------------------------------
666 ** This function maps a page from within the client's logical
667 ** address space into the alternate address space.
669 ** The page need not be locked or resident. If not resident, it will be faulted
670 ** in by this code, which may take some time. Also, if the page is not locked,
671 ** it, and this mapping may disappear at any time, even before it gets used. Note also
672 ** that reference and change information is NOT preserved when a page is unmapped, either
673 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
674 ** space). This means that if RC is needed, the page MUST be wired.
676 ** Note that if there is already a mapping at the address, it is removed and all
677 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
678 ** if the map call fails, the old address is still unmapped..
681 ** act - pointer to current thread activation
682 ** index - index of address space to map into
683 ** va - virtual address within the client's address
685 ** ava - virtual address within the alternate address
687 ** prot - protection flags
689 ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
690 ** areas are not allowed and will fail. Same with directly mapped I/O areas.
693 ** Interrupts disabled (from fast trap)
696 ** kernel return code indicating success or failure
697 ** if success, va resident and alternate mapping made
698 -----------------------------------------------------------------------*/
700 kern_return_t
vmm_map_page(
708 vmmCntrlEntry
*CEntry
;
709 register mapping
*mp
;
710 struct phys_entry
*pp
;
712 addr64_t ova
, nextva
;
715 pmap
= vmm_get_adsp(act
, index
); /* Get the pmap for this address space */
716 if(!pmap
) return KERN_FAILURE
; /* Bogus address space, no VMs, or we can't make a pmap, failure... */
718 if(ava
> vm_max_address
) return kVmmInvalidAddress
; /* Does the machine support an address of this size? */
720 map
= current_act()->map
; /* Get the current map */
722 while(1) { /* Keep trying until we get it or until we fail */
724 mp
= mapping_find(map
->pmap
, cva
, &nextva
, 0); /* Find the mapping for this address */
726 if(mp
) break; /* We found it */
728 ml_set_interrupts_enabled(TRUE
); /* Enable interruptions */
729 ret
= vm_fault(map
, trunc_page_32((vm_offset_t
)cva
), VM_PROT_READ
| VM_PROT_WRITE
, FALSE
); /* Didn't find it, try to fault it in read/write... */
730 ml_set_interrupts_enabled(FALSE
); /* Disable interruptions */
731 if (ret
!= KERN_SUCCESS
) return KERN_FAILURE
; /* There isn't a page there, return... */
734 if(mp
->mpFlags
& (mpBlock
| mpNest
| mpSpecial
)) { /* If this is a block, a nest, or some other special thing, we can't map it */
735 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
736 return KERN_FAILURE
; /* Leave in shame */
739 while(1) { /* Keep trying the enter until it goes in */
740 ova
= mapping_make(pmap
, ava
, mp
->mpPAddr
, 0, 1, prot
); /* Enter the mapping into the pmap */
741 if(!ova
) break; /* If there were no collisions, we are done... */
742 mapping_remove(pmap
, ova
); /* Remove the mapping that collided */
745 mapping_drop_busy(mp
); /* We have everything we need from the mapping */
747 if (!((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
)) {
748 act
->mact
.vmmControl
->vmmLastMap
= ava
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
749 act
->mact
.vmmControl
->vmmGFlags
= (act
->mact
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | index
; /* Remember last address space */
756 /*-----------------------------------------------------------------------
759 ** This function maps a page from within the client's logical
760 ** address space into the alternate address space of the
761 ** Virtual Machine Monitor context and then directly starts executing.
763 ** See description of vmm_map_page for details.
766 ** Index is used for both the context and the address space ID.
767 ** index[24:31] is the context id and index[16:23] is the address space.
768 ** if the address space ID is 0, the context ID is used for it.
771 ** Normal exit is to run the VM. Abnormal exit is triggered via a
772 ** non-KERN_SUCCESS return from vmm_map_page or later during the
773 ** attempt to transition into the VM.
774 -----------------------------------------------------------------------*/
776 vmm_return_code_t
vmm_map_execute(
778 vmm_thread_index_t index
,
784 vmmCntrlEntry
*CEntry
;
786 vmm_thread_index_t cndx
;
788 cndx
= index
& 0xFF; /* Clean it up */
790 CEntry
= vmm_get_entry(act
, cndx
); /* Get and validate the index */
791 if (CEntry
== NULL
) return kVmmBogusContext
; /* Return bogus context */
793 if (((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
) && (CEntry
!= act
->mact
.vmmCEntry
))
794 return kVmmBogusContext
; /* Yes, invalid index in Fam */
796 adsp
= (index
>> 8) & 0xFF; /* Get any requested address space */
797 if(!adsp
) adsp
= (index
& 0xFF); /* If 0, use context ID as address space ID */
799 ret
= vmm_map_page(act
, adsp
, cva
, ava
, prot
); /* Go try to map the page on in */
802 if(ret
== KERN_SUCCESS
) {
803 act
->mact
.vmmControl
->vmmLastMap
= ava
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
804 act
->mact
.vmmControl
->vmmGFlags
= (act
->mact
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | cndx
; /* Remember last address space */
805 vmm_execute_vm(act
, cndx
); /* Return was ok, launch the VM */
808 return ret
; /* We had trouble mapping in the page */
812 /*-----------------------------------------------------------------------
815 ** This function maps a list of pages into various address spaces
818 ** act - pointer to current thread activation
819 ** index - index of default address space (used if not specifed in list entry
820 ** count - number of pages to release
821 ** flavor - 0 if 32-bit version, 1 if 64-bit
822 ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
825 ** kernel return code indicating success or failure
826 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
827 ** or the vmm_map_page call fails.
828 ** We return kVmmInvalidAddress if virtual address size is not supported
829 -----------------------------------------------------------------------*/
831 kern_return_t
vmm_map_list(
837 vmmCntrlEntry
*CEntry
;
847 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
848 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
850 if(cnt
> kVmmMaxMapPages
) return KERN_FAILURE
; /* They tried to map too many */
851 if(!cnt
) return KERN_SUCCESS
; /* If they said none, we're done... */
853 lst
= (vmmMList
*)&((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
854 lstx
= (vmmMList64
*)&((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
856 for(i
= 0; i
< cnt
; i
++) { /* Step and release all pages in list */
857 if(flavor
) { /* Check if 32- or 64-bit addresses */
858 cva
= lstx
[i
].vmlva
; /* Get the 64-bit actual address */
859 ava
= lstx
[i
].vmlava
; /* Get the 64-bit guest address */
862 cva
= lst
[i
].vmlva
; /* Get the 32-bit actual address */
863 ava
= lst
[i
].vmlava
; /* Get the 32-bit guest address */
866 prot
= ava
& vmmlProt
; /* Extract the protection bits */
867 adsp
= (ava
& vmmlAdID
) >> 4; /* Extract an explicit address space request */
868 if(!adsp
) adsp
= index
- 1; /* If no explicit, use supplied default */
869 ava
= ava
&= 0xFFFFFFFFFFFFF000ULL
; /* Clean up the address */
871 ret
= vmm_map_page(act
, index
, cva
, ava
, prot
); /* Go try to map the page on in */
872 if(ret
!= KERN_SUCCESS
) return ret
; /* Bail if any error */
875 return KERN_SUCCESS
; /* Return... */
878 /*-----------------------------------------------------------------------
879 ** vmm_get_page_mapping
881 ** This function determines whether the specified VMM
882 ** virtual address is mapped.
885 ** act - pointer to current thread activation
886 ** index - index of vmm state for this page
887 ** va - virtual address within the alternate's address
891 ** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
894 ** If there are aliases to the page in the non-alternate address space,
895 ** this call could return the wrong one. Moral of the story: no aliases.
896 -----------------------------------------------------------------------*/
898 addr64_t
vmm_get_page_mapping(
903 vmmCntrlEntry
*CEntry
;
904 register mapping
*mp
;
906 addr64_t nextva
, sva
;
909 pmap
= vmm_get_adsp(act
, index
); /* Get and validate the index */
910 if (!pmap
)return -1; /* No good, failure... */
912 mp
= mapping_find(pmap
, va
, &nextva
, 0); /* Find our page */
914 if(!mp
) return -1; /* Not mapped, return -1 */
916 pa
= mp
->mpPAddr
; /* Remember the page address */
918 mapping_drop_busy(mp
); /* Go ahead and relase the mapping now */
920 pmap
= current_act()->map
->pmap
; /* Get the current pmap */
921 sva
= mapping_p2v(pmap
, pa
); /* Now find the source virtual */
923 if(sva
!= 0) return sva
; /* We found it... */
925 panic("vmm_get_page_mapping: could not back-map alternate va (%016llX)\n", va
); /* We are bad wrong if we can't find it */
930 /*-----------------------------------------------------------------------
933 ** This function unmaps a page from the alternate's logical
937 ** act - pointer to current thread activation
938 ** index - index of vmm state for this page
939 ** va - virtual address within the vmm's address
943 ** kernel return code indicating success or failure
944 -----------------------------------------------------------------------*/
946 kern_return_t
vmm_unmap_page(
951 vmmCntrlEntry
*CEntry
;
954 kern_return_t kern_result
= KERN_SUCCESS
;
956 pmap
= vmm_get_adsp(act
, index
); /* Get and validate the index */
957 if (!pmap
)return -1; /* No good, failure... */
959 nadd
= mapping_remove(pmap
, va
); /* Toss the mapping */
961 return ((nadd
& 1) ? KERN_FAILURE
: KERN_SUCCESS
); /* Return... */
964 /*-----------------------------------------------------------------------
967 ** This function unmaps a list of pages from the alternate's logical
971 ** act - pointer to current thread activation
972 ** index - index of vmm state for this page
973 ** count - number of pages to release
974 ** flavor - 0 if 32-bit, 1 if 64-bit
975 ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
978 ** kernel return code indicating success or failure
979 ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
980 -----------------------------------------------------------------------*/
982 kern_return_t
vmm_unmap_list(
988 vmmCntrlEntry
*CEntry
;
990 kern_return_t kern_result
= KERN_SUCCESS
;
991 unsigned int *pgaddr
, i
;
998 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
999 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't a vmm or the index is bogus */
1001 if(cnt
> kVmmMaxUnmapPages
) return KERN_FAILURE
; /* They tried to unmap too many */
1002 if(!cnt
) return KERN_SUCCESS
; /* If they said none, we're done... */
1004 lst
= (vmmUMList
*)lstx
= (vmmUMList64
*) &((vmm_comm_page_t
*)CEntry
->vmmContextKern
)->vmcpComm
[0]; /* Point to the first entry */
1006 for(i
= 0; i
< cnt
; i
++) { /* Step and release all pages in list */
1007 if(flavor
) { /* Check if 32- or 64-bit addresses */
1008 gva
= lstx
[i
].vmlava
; /* Get the 64-bit guest address */
1011 gva
= lst
[i
].vmlava
; /* Get the 32-bit guest address */
1014 adsp
= (gva
& vmmlAdID
) >> 4; /* Extract an explicit address space request */
1015 if(!adsp
) adsp
= index
- 1; /* If no explicit, use supplied default */
1016 pmap
= act
->mact
.vmmControl
->vmmAdsp
[adsp
]; /* Get the pmap for this request */
1017 if(!pmap
) continue; /* Ain't nuthin' mapped here, no durn map... */
1019 gva
= gva
&= 0xFFFFFFFFFFFFF000ULL
; /* Clean up the address */
1020 (void)mapping_remove(pmap
, gva
); /* Toss the mapping */
1023 return KERN_SUCCESS
; /* Return... */
1026 /*-----------------------------------------------------------------------
1027 ** vmm_unmap_all_pages
1029 ** This function unmaps all pages from the alternates's logical
1033 ** act - pointer to current thread activation
1034 ** index - index of context state
1040 ** All pages are unmapped, but the address space (i.e., pmap) is still alive
1041 -----------------------------------------------------------------------*/
1043 void vmm_unmap_all_pages(
1045 vmm_adsp_id_t index
)
1047 vmmCntrlEntry
*CEntry
;
1050 pmap
= vmm_get_adsp(act
, index
); /* Convert index to entry */
1051 if (!pmap
) return; /* Either this isn't vmm thread or the index is bogus */
1054 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
1056 mapping_remove(pmap
, 0xFFFFFFFFFFFFF000LL
); /* Remove final page explicitly because we might have mapped it */
1057 pmap_remove(pmap
, 0, 0xFFFFFFFFFFFFF000LL
); /* Remove all entries from this map */
1062 /*-----------------------------------------------------------------------
1063 ** vmm_get_page_dirty_flag
1065 ** This function returns the changed flag of the page
1066 ** and optionally clears clears the flag.
1069 ** act - pointer to current thread activation
1070 ** index - index of vmm state for this page
1071 ** va - virtual address within the vmm's address
1073 ** reset - Clears dirty if true, untouched if not
1077 ** clears the dirty bit in the pte if requested
1080 ** The RC bits are merged into the global physical entry
1081 -----------------------------------------------------------------------*/
1083 boolean_t
vmm_get_page_dirty_flag(
1085 vmm_adsp_id_t index
,
1089 vmmCntrlEntry
*CEntry
;
1090 register mapping
*mpv
, *mp
;
1094 pmap
= vmm_get_adsp(act
, index
); /* Convert index to entry */
1095 if (!pmap
) return 1; /* Either this isn't vmm thread or the index is bogus */
1097 RC
= hw_test_rc(pmap
, (addr64_t
)va
, reset
); /* Fetch the RC bits and clear if requested */
1099 switch (RC
& mapRetCode
) { /* Decode return code */
1101 case mapRtOK
: /* Changed */
1102 return ((RC
& (unsigned int)mpC
) == (unsigned int)mpC
); /* Return if dirty or not */
1105 case mapRtNotFnd
: /* Didn't find it */
1106 return 1; /* Return dirty */
1110 panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC
, pmap
, va
);
1114 return 1; /* Return the change bit */
1118 /*-----------------------------------------------------------------------
1121 ** This function sets the protection bits of a mapped page
1124 ** act - pointer to current thread activation
1125 ** index - index of vmm state for this page
1126 ** va - virtual address within the vmm's address
1128 ** prot - Protection flags
1132 ** Protection bits of the mapping are modifed
1134 -----------------------------------------------------------------------*/
1136 kern_return_t
vmm_protect_page(
1138 vmm_adsp_id_t index
,
1142 vmmCntrlEntry
*CEntry
;
1147 pmap
= vmm_get_adsp(act
, index
); /* Convert index to entry */
1148 if (!pmap
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1150 ret
= hw_protect(pmap
, va
, prot
, &nextva
); /* Try to change the protect here */
1152 switch (ret
) { /* Decode return code */
1154 case mapRtOK
: /* All ok... */
1155 break; /* Outta here */
1157 case mapRtNotFnd
: /* Didn't find it */
1158 return KERN_SUCCESS
; /* Ok, return... */
1162 panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret
, pmap
, (addr64_t
)va
);
1166 if (!((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
)) {
1167 act
->mact
.vmmControl
->vmmLastMap
= va
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
1168 act
->mact
.vmmControl
->vmmGFlags
= (act
->mact
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | index
; /* Remember last address space */
1171 return KERN_SUCCESS
; /* Return */
1175 /*-----------------------------------------------------------------------
1176 ** vmm_protect_execute
1178 ** This function sets the protection bits of a mapped page
1179 ** and then directly starts executing.
1181 ** See description of vmm_protect_page for details
1184 ** See vmm_protect_page and vmm_map_execute
1187 ** Normal exit is to run the VM. Abnormal exit is triggered via a
1188 ** non-KERN_SUCCESS return from vmm_map_page or later during the
1189 ** attempt to transition into the VM.
1190 -----------------------------------------------------------------------*/
1192 vmm_return_code_t
vmm_protect_execute(
1194 vmm_thread_index_t index
,
1199 vmmCntrlEntry
*CEntry
;
1201 vmm_thread_index_t cndx
;
1203 cndx
= index
& 0xFF; /* Clean it up */
1204 CEntry
= vmm_get_entry(act
, cndx
); /* Get and validate the index */
1205 if (CEntry
== NULL
) return kVmmBogusContext
; /* Return bogus context */
1207 adsp
= (index
>> 8) & 0xFF; /* Get any requested address space */
1208 if(!adsp
) adsp
= (index
& 0xFF); /* If 0, use context ID as address space ID */
1210 if (((per_proc_info
[cpu_number()].spcFlags
) & FamVMmode
) && (CEntry
!= act
->mact
.vmmCEntry
))
1211 return kVmmBogusContext
; /* Yes, invalid index in Fam */
1213 ret
= vmm_protect_page(act
, adsp
, va
, prot
); /* Go try to change access */
1215 if(ret
== KERN_SUCCESS
) {
1216 act
->mact
.vmmControl
->vmmLastMap
= va
& 0xFFFFFFFFFFFFF000ULL
; /* Remember the last mapping we made */
1217 act
->mact
.vmmControl
->vmmGFlags
= (act
->mact
.vmmControl
->vmmGFlags
& ~vmmLastAdSp
) | cndx
; /* Remember last address space */
1218 vmm_execute_vm(act
, cndx
); /* Return was ok, launch the VM */
1221 return ret
; /* We had trouble of some kind (shouldn't happen) */
1226 /*-----------------------------------------------------------------------
1227 ** vmm_get_float_state
1229 ** This function causes the current floating point state to
1230 ** be saved into the shared context area. It also clears the
1231 ** vmmFloatCngd changed flag.
1234 ** act - pointer to current thread activation structure
1235 ** index - index returned by vmm_init_context
1239 -----------------------------------------------------------------------*/
1241 kern_return_t
vmm_get_float_state(
1243 vmm_thread_index_t index
)
1245 vmmCntrlEntry
*CEntry
;
1246 vmmCntrlTable
*CTable
;
1248 register struct savearea_fpu
*sv
;
1250 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1251 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1253 act
->mact
.specFlags
&= ~floatCng
; /* Clear the special flag */
1254 CEntry
->vmmContextKern
->vmmStat
&= ~vmmFloatCngd
; /* Clear the change indication */
1256 fpu_save(&CEntry
->vmmFacCtx
); /* Save context if live */
1258 if(sv
= CEntry
->vmmFacCtx
.FPUsave
) { /* Is there context yet? */
1259 bcopy((char *)&sv
->save_fp0
, (char *)&(CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
), 32 * 8); /* 32 registers */
1260 return KERN_SUCCESS
;
1264 for(i
= 0; i
< 32; i
++) { /* Initialize floating points */
1265 CEntry
->vmmContextKern
->vmm_proc_state
.ppcFPRs
[i
].d
= FloatInit
; /* Initial value */
1268 return KERN_SUCCESS
;
1271 /*-----------------------------------------------------------------------
1272 ** vmm_get_vector_state
1274 ** This function causes the current vector state to
1275 ** be saved into the shared context area. It also clears the
1276 ** vmmVectorCngd changed flag.
1279 ** act - pointer to current thread activation structure
1280 ** index - index returned by vmm_init_context
1284 -----------------------------------------------------------------------*/
1286 kern_return_t
vmm_get_vector_state(
1288 vmm_thread_index_t index
)
1290 vmmCntrlEntry
*CEntry
;
1291 vmmCntrlTable
*CTable
;
1293 unsigned int vrvalidwrk
;
1294 register struct savearea_vec
*sv
;
1296 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1297 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1299 vec_save(&CEntry
->vmmFacCtx
); /* Save context if live */
1301 act
->mact
.specFlags
&= ~vectorCng
; /* Clear the special flag */
1302 CEntry
->vmmContextKern
->vmmStat
&= ~vmmVectCngd
; /* Clear the change indication */
1304 if(sv
= CEntry
->vmmFacCtx
.VMXsave
) { /* Is there context yet? */
1306 vrvalidwrk
= sv
->save_vrvalid
; /* Get the valid flags */
1308 for(i
= 0; i
< 32; i
++) { /* Copy the saved registers and invalidate the others */
1309 if(vrvalidwrk
& 0x80000000) { /* Do we have a valid value here? */
1310 for(j
= 0; j
< 4; j
++) { /* If so, copy it over */
1311 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = ((unsigned int *)&(sv
->save_vr0
))[(i
* 4) + j
];
1315 for(j
= 0; j
< 4; j
++) { /* Otherwise set to empty value */
1316 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
];
1320 vrvalidwrk
= vrvalidwrk
<< 1; /* Shift over to the next */
1324 return KERN_SUCCESS
;
1327 for(i
= 0; i
< 32; i
++) { /* Initialize vector registers */
1328 for(j
=0; j
< 4; j
++) { /* Do words */
1329 CEntry
->vmmContextKern
->vmm_proc_state
.ppcVRs
[i
].i
[j
] = QNaNbarbarian
[j
]; /* Initial value */
1333 return KERN_SUCCESS
;
1336 /*-----------------------------------------------------------------------
1339 ** This function causes a timer (in AbsoluteTime) for a specific time
1340 ** to be set It also clears the vmmTimerPop flag if the timer is actually
1341 ** set, it is cleared otherwise.
1343 ** A timer is cleared by setting setting the time to 0. This will clear
1344 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
1345 ** current time clears the internal timer request, but leaves the
1346 ** vmmTimerPop flag set.
1350 ** act - pointer to current thread activation structure
1351 ** index - index returned by vmm_init_context
1352 ** timerhi - high order word of AbsoluteTime to pop
1353 ** timerlo - low order word of AbsoluteTime to pop
1356 ** timer set, vmmTimerPop cleared
1357 -----------------------------------------------------------------------*/
1359 kern_return_t
vmm_set_timer(
1361 vmm_thread_index_t index
,
1362 unsigned int timerhi
,
1363 unsigned int timerlo
)
1365 vmmCntrlEntry
*CEntry
;
1367 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1368 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1370 CEntry
->vmmTimer
= ((uint64_t)timerhi
<< 32) | timerlo
;
1372 vmm_timer_pop(act
); /* Go adjust all of the timer stuff */
1373 return KERN_SUCCESS
; /* Leave now... */
1377 /*-----------------------------------------------------------------------
1380 ** This function causes the timer for a specified VM to be
1381 ** returned in return_params[0] and return_params[1].
1382 ** Note that this is kind of funky for 64-bit VMs because we
1383 ** split the timer into two parts so that we still set parms 0 and 1.
1384 ** Obviously, we don't need to do this because the parms are 8 bytes
1389 ** act - pointer to current thread activation structure
1390 ** index - index returned by vmm_init_context
1393 ** Timer value set in return_params[0] and return_params[1].
1394 ** Set to 0 if timer is not set.
1395 -----------------------------------------------------------------------*/
1397 kern_return_t
vmm_get_timer(
1399 vmm_thread_index_t index
)
1401 vmmCntrlEntry
*CEntry
;
1402 vmmCntrlTable
*CTable
;
1404 CEntry
= vmm_get_entry(act
, index
); /* Convert index to entry */
1405 if (CEntry
== NULL
) return KERN_FAILURE
; /* Either this isn't vmm thread or the index is bogus */
1407 if(CEntry
->vmmXAFlgs
& vmm64Bit
) { /* A 64-bit virtual machine? */
1408 CEntry
->vmmContextKern
->vmmRet
.vmmrp64
.return_params
[0] = (uint32_t)(CEntry
->vmmTimer
>> 32); /* Return the last timer value */
1409 CEntry
->vmmContextKern
->vmmRet
.vmmrp64
.return_params
[1] = (uint32_t)CEntry
->vmmTimer
; /* Return the last timer value */
1412 CEntry
->vmmContextKern
->vmmRet
.vmmrp32
.return_params
[0] = (CEntry
->vmmTimer
>> 32); /* Return the last timer value */
1413 CEntry
->vmmContextKern
->vmmRet
.vmmrp32
.return_params
[1] = (uint32_t)CEntry
->vmmTimer
; /* Return the last timer value */
1415 return KERN_SUCCESS
;
1419 /*-----------------------------------------------------------------------
1422 ** This function causes all timers in the array of VMs to be updated.
1423 ** All appropriate flags are set or reset. If a VM is currently
1424 ** running and its timer expired, it is intercepted.
1426 ** The qactTimer value is set to the lowest unexpired timer. It is
1427 ** zeroed if all timers are expired or have been reset.
1430 ** act - pointer to current thread activation structure
1433 ** timers set, vmmTimerPop cleared or set
1434 -----------------------------------------------------------------------*/
1439 vmmCntrlEntry
*CEntry
;
1440 vmmCntrlTable
*CTable
;
1442 uint64_t now
, soonest
;
1445 if(!((unsigned int)act
->mact
.vmmControl
& 0xFFFFFFFE)) { /* Are there any virtual machines? */
1446 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act
);
1449 soonest
= 0xFFFFFFFFFFFFFFFFULL
; /* Max time */
1451 clock_get_uptime(&now
); /* What time is it? */
1453 CTable
= act
->mact
.vmmControl
; /* Make this easier */
1454 any
= 0; /* Haven't found a running unexpired timer yet */
1456 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Cycle through all and check time now */
1458 if(!(CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) continue; /* Do not check if the entry is empty */
1460 if(CTable
->vmmc
[cvi
].vmmTimer
== 0) { /* Is the timer reset? */
1461 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Clear timer popped */
1462 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Clear timer popped */
1463 continue; /* Check next */
1466 if (CTable
->vmmc
[cvi
].vmmTimer
<= now
) {
1467 CTable
->vmmc
[cvi
].vmmFlags
|= vmmTimerPop
; /* Set timer popped here */
1468 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
|= vmmTimerPop
; /* Set timer popped here */
1469 if((unsigned int)&CTable
->vmmc
[cvi
] == (unsigned int)act
->mact
.vmmCEntry
) { /* Is this the running VM? */
1470 sv
= find_user_regs(act
); /* Get the user state registers */
1471 if(!sv
) { /* Did we find something? */
1472 panic("vmm_timer_pop: no user context; act = %08X\n", act
);
1474 sv
->save_exception
= kVmmReturnNull
*4; /* Indicate that this is a null exception */
1475 vmm_force_exit(act
, sv
); /* Intercept a running VM */
1477 continue; /* Check the rest */
1479 else { /* It hasn't popped yet */
1480 CTable
->vmmc
[cvi
].vmmFlags
&= ~vmmTimerPop
; /* Set timer not popped here */
1481 CTable
->vmmc
[cvi
].vmmContextKern
->vmmStat
&= ~vmmTimerPop
; /* Set timer not popped here */
1484 any
= 1; /* Show we found an active unexpired timer */
1486 if (CTable
->vmmc
[cvi
].vmmTimer
< soonest
)
1487 soonest
= CTable
->vmmc
[cvi
].vmmTimer
;
1491 if (act
->mact
.qactTimer
== 0 || soonest
<= act
->mact
.qactTimer
)
1492 act
->mact
.qactTimer
= soonest
; /* Set lowest timer */
1500 /*-----------------------------------------------------------------------
1503 ** This function prevents the specified VM(s) to from running.
1504 ** If any is currently executing, the execution is intercepted
1505 ** with a code of kVmmStopped. Note that execution of the VM is
1506 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
1507 ** This provides the ability for a thread to stop execution of a VM and
1508 ** insure that it will not be run until the emulator has processed the
1509 ** "virtual" interruption.
1512 ** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1513 ** NOTE: if this mask is all 0s, any executing VM is intercepted with
1514 * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1515 ** note that there is a potential race here and the VM may not stop.
1518 ** kernel return code indicating success
1519 ** or if no VMs are enabled, an invalid syscall exception.
1520 -----------------------------------------------------------------------*/
1522 int vmm_stop_vm(struct savearea
*save
)
1526 vmmCntrlTable
*CTable
;
1530 unsigned int vmmask
;
1531 ReturnHandler
*stopapc
;
1533 ml_set_interrupts_enabled(TRUE
); /* This can take a bit of time so pass interruptions */
1535 task
= current_task(); /* Figure out who we are */
1537 task_lock(task
); /* Lock our task */
1539 fact
= (thread_act_t
)task
->threads
.next
; /* Get the first activation on task */
1540 act
= 0; /* Pretend we didn't find it yet */
1542 for(i
= 0; i
< task
->thread_count
; i
++) { /* All of the activations */
1543 if(fact
->mact
.vmmControl
) { /* Is this a virtual machine monitor? */
1544 act
= fact
; /* Yeah... */
1545 break; /* Bail the loop... */
1547 fact
= (thread_act_t
)fact
->task_threads
.next
; /* Go to the next one */
1550 if(!((unsigned int)act
)) { /* See if we have VMMs yet */
1551 task_unlock(task
); /* No, unlock the task */
1552 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1553 return 0; /* Go generate a syscall exception */
1556 act_lock_thread(act
); /* Make sure this stays 'round */
1557 task_unlock(task
); /* Safe to release now */
1559 CTable
= act
->mact
.vmmControl
; /* Get the pointer to the table */
1561 if(!((unsigned int)CTable
& -2)) { /* Are there any all the way up yet? */
1562 act_unlock_thread(act
); /* Unlock the activation */
1563 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1564 return 0; /* Go generate a syscall exception */
1567 if(!(vmmask
= save
->save_r3
)) { /* Get the stop mask and check if all zeros */
1568 act_unlock_thread(act
); /* Unlock the activation */
1569 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1570 save
->save_r3
= KERN_SUCCESS
; /* Set success */
1571 return 1; /* Return... */
1574 for(cvi
= 0; cvi
< kVmmMaxContexts
; cvi
++) { /* Search slots */
1575 if((0x80000000 & vmmask
) && (CTable
->vmmc
[cvi
].vmmFlags
& vmmInUse
)) { /* See if we need to stop and if it is in use */
1576 hw_atomic_or(&CTable
->vmmc
[cvi
].vmmFlags
, vmmXStop
); /* Set this one to stop */
1578 vmmask
= vmmask
<< 1; /* Slide mask over */
1581 if(hw_compare_and_store(0, 1, &act
->mact
.emPendRupts
)) { /* See if there is already a stop pending and lock out others if not */
1582 act_unlock_thread(act
); /* Already one pending, unlock the activation */
1583 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1584 save
->save_r3
= KERN_SUCCESS
; /* Say we did it... */
1585 return 1; /* Leave */
1588 if(!(stopapc
= (ReturnHandler
*)kalloc(sizeof(ReturnHandler
)))) { /* Get a return handler control block */
1589 act
->mact
.emPendRupts
= 0; /* No memory, say we have given up request */
1590 act_unlock_thread(act
); /* Unlock the activation */
1591 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1592 save
->save_r3
= KERN_RESOURCE_SHORTAGE
; /* No storage... */
1593 return 1; /* Return... */
1596 ml_set_interrupts_enabled(FALSE
); /* Disable interruptions for now */
1598 stopapc
->handler
= vmm_interrupt
; /* Set interruption routine */
1600 stopapc
->next
= act
->handlers
; /* Put our interrupt at the start of the list */
1601 act
->handlers
= stopapc
; /* Point to us */
1603 act_set_apc(act
); /* Set an APC AST */
1604 ml_set_interrupts_enabled(TRUE
); /* Enable interruptions now */
1606 act_unlock_thread(act
); /* Unlock the activation */
1608 ml_set_interrupts_enabled(FALSE
); /* Set back interruptions */
1609 save
->save_r3
= KERN_SUCCESS
; /* Hip, hip, horay... */
1613 /*-----------------------------------------------------------------------
1616 ** This function is executed asynchronously from an APC AST.
1617 ** It is to be used for anything that needs to interrupt a running VM.
1618 ** This include any kind of interruption generation (other than timer pop)
1619 ** or entering the stopped state.
1622 ** ReturnHandler *rh - the return handler control block as required by the APC.
1623 ** thread_act_t act - the activation
1626 ** Whatever needed to be done is done.
1627 -----------------------------------------------------------------------*/
1629 void vmm_interrupt(ReturnHandler
*rh
, thread_act_t act
) {
1631 vmmCntrlTable
*CTable
;
1637 kfree((vm_offset_t
)rh
, sizeof(ReturnHandler
)); /* Release the return handler block */
1639 inter
= ml_set_interrupts_enabled(FALSE
); /* Disable interruptions for now */
1641 act
->mact
.emPendRupts
= 0; /* Say that there are no more interrupts pending */
1642 CTable
= act
->mact
.vmmControl
; /* Get the pointer to the table */
1644 if(!((unsigned int)CTable
& -2)) return; /* Leave if we aren't doing VMs any more... */
1646 if(act
->mact
.vmmCEntry
&& (act
->mact
.vmmCEntry
->vmmFlags
& vmmXStop
)) { /* Do we need to stop the running guy? */
1647 sv
= find_user_regs(act
); /* Get the user state registers */
1648 if(!sv
) { /* Did we find something? */
1649 panic("vmm_interrupt: no user context; act = %08X\n", act
);
1651 sv
->save_exception
= kVmmStopped
*4; /* Set a "stopped" exception */
1652 vmm_force_exit(act
, sv
); /* Intercept a running VM */
1654 ml_set_interrupts_enabled(inter
); /* Put interrupts back to what they were */