]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/vmachmon.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*-----------------------------------------------------------------------
26** vmachmon.c
27**
28** C routines that we are adding to the MacOS X kernel.
29**
de355530
A
30** Weird Apple PSL stuff goes here...
31**
32** Until then, Copyright 2000, Connectix
1c79356b
A
33-----------------------------------------------------------------------*/
34
35#include <mach/mach_types.h>
36#include <mach/kern_return.h>
37#include <mach/host_info.h>
38#include <kern/kern_types.h>
39#include <kern/host.h>
40#include <kern/task.h>
41#include <kern/thread.h>
0b4e3aa0 42#include <kern/thread_act.h>
1c79356b
A
43#include <ppc/exception.h>
44#include <ppc/mappings.h>
45#include <ppc/thread_act.h>
de355530 46#include <ppc/pmap_internals.h>
1c79356b
A
47#include <vm/vm_kern.h>
48
49#include <ppc/vmachmon.h>
50
51extern struct Saveanchor saveanchor; /* Aligned savearea anchor */
52extern double FloatInit;
53extern unsigned long QNaNbarbarian[4];
54
55/*************************************************************************************
56 Virtual Machine Monitor Internal Routines
57**************************************************************************************/
58
59/*-----------------------------------------------------------------------
60** vmm_get_entry
61**
62** This function verifies and return a vmm context entry index
63**
64** Inputs:
65** act - pointer to current thread activation
66** index - index into vmm control table (this is a "one based" value)
67**
68** Outputs:
69** address of a vmmCntrlEntry or 0 if not found
70-----------------------------------------------------------------------*/
71
72vmmCntrlEntry *vmm_get_entry(
73 thread_act_t act,
74 vmm_thread_index_t index)
75{
76 vmmCntrlTable *CTable;
77 vmmCntrlEntry *CEntry;
78
79 if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */
de355530 80 if ((index - 1) >= kVmmMaxContextsPerThread) return NULL; /* Index not in range */
1c79356b
A
81
82 CTable = act->mact.vmmControl; /* Make the address a bit more convienient */
83 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
84
85 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
86
87 return CEntry;
88}
89
90
91
92/*************************************************************************************
93 Virtual Machine Monitor Exported Functionality
94
95 The following routines are used to implement a quick-switch mechanism for
96 virtual machines that need to execute within their own processor envinroment
97 (including register and MMU state).
98**************************************************************************************/
99
100/*-----------------------------------------------------------------------
101** vmm_get_version
102**
103** This function returns the current version of the virtual machine
104** interface. It is divided into two portions. The top 16 bits
105** represent the major version number, and the bottom 16 bits
106** represent the minor version number. Clients using the Vmm
107** functionality should make sure they are using a verison new
108** enough for them.
109**
110** Inputs:
111** none
112**
113** Outputs:
114** 32-bit number representing major/minor version of
115** the Vmm module
116-----------------------------------------------------------------------*/
117
118int vmm_get_version(struct savearea *save)
119{
120 save->save_r3 = kVmmCurrentVersion; /* Return the version */
121 return 1;
122}
123
124
125/*-----------------------------------------------------------------------
126** Vmm_get_features
127**
128** This function returns a set of flags that represents the functionality
129** supported by the current verison of the Vmm interface. Clients should
130** use this to determine whether they can run on this system.
131**
132** Inputs:
133** none
134**
135** Outputs:
136** 32-bit number representing functionality supported by this
137** version of the Vmm module
138-----------------------------------------------------------------------*/
139
140int vmm_get_features(struct savearea *save)
141{
142 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
143 return 1;
144}
145
146
147/*-----------------------------------------------------------------------
148** vmm_init_context
149**
150** This function initializes an emulation context. It allocates
151** a new pmap (address space) and fills in the initial processor
152** state within the specified structure. The structure, mapped
153** into the client's logical address space, must be page-aligned.
154**
155** Inputs:
156** act - pointer to current thread activation
157** version - requested version of the Vmm interface (allowing
158** future versions of the interface to change, but still
159** support older clients)
160** vmm_user_state - pointer to a logical page within the
161** client's address space
162**
163** Outputs:
164** kernel return code indicating success or failure
165-----------------------------------------------------------------------*/
166
167int vmm_init_context(struct savearea *save)
168{
169
170 thread_act_t act;
171 vmm_version_t version;
172 vmm_state_page_t * vmm_user_state;
173 vmmCntrlTable *CTable;
174 vm_offset_t conkern;
175 vmm_state_page_t * vks;
de355530 176 vm_offset_t conphys;
1c79356b
A
177 kern_return_t ret;
178 pmap_t new_pmap;
179 int cvi, i;
180 task_t task;
181 thread_act_t fact, gact;
182
183 vmm_user_state = (vmm_state_page_t *)save->save_r4; /* Get the user address of the comm area */
184 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
185 save->save_r3 = KERN_FAILURE; /* Return failure */
186 return 1;
187 }
188
0b4e3aa0 189 /* Make sure that the version requested is supported */
1c79356b 190 version = save->save_r3; /* Pick up passed in version */
0b4e3aa0
A
191 if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
192 save->save_r3 = KERN_FAILURE; /* Return failure */
193 return 1;
1c79356b 194 }
0b4e3aa0
A
195
196 if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */
197 save->save_r3 = KERN_FAILURE; /* Return failure */
198 return 1;
199 }
200
1c79356b
A
201 act = current_act(); /* Pick up our activation */
202
203 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
204
205 task = current_task(); /* Figure out who we are */
206
207 task_lock(task); /* Lock our task */
208
209 fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */
210 gact = 0; /* Pretend we didn't find it yet */
211
212 for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */
213 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
214 gact = fact; /* Yeah... */
215 break; /* Bail the loop... */
216 }
217 fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */
218 }
219
220
221/*
222 * We only allow one thread per task to be a virtual machine monitor right now. This solves
223 * a number of potential problems that I can't put my finger on right now.
224 *
225 * Utlimately, I think we want to move the controls and make all this task based instead of
226 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
227 * VM (if they want) rather than hand dispatch contexts.
228 */
229
230 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
231 task_unlock(task); /* Release task lock */
232 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
233 save->save_r3 = KERN_FAILURE; /* We must play alone... */
234 return 1;
235 }
236
237 if(!gact) act->mact.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
238
239 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
240
241 CTable = act->mact.vmmControl; /* Get the control table address */
242 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
243 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
244 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
245 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
246 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
247 return 1;
248 }
249
250 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
251 act->mact.vmmControl = CTable; /* Initialize the table anchor */
252 }
253
de355530 254 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */
1c79356b
A
255 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
256 }
257
de355530 258 if(cvi >= kVmmMaxContextsPerThread) { /* Did we find one? */
1c79356b
A
259 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
260 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
261 return 1;
262 }
263
264 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
265 act->map,
266 (vm_offset_t)vmm_user_state,
267 (vm_offset_t)vmm_user_state + PAGE_SIZE,
268 VM_PROT_READ | VM_PROT_WRITE,
269 FALSE);
270
271 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
272 goto return_in_shame;
273
274 /* Map the vmm state into the kernel's address space. */
de355530 275 conphys = pmap_extract(act->map->pmap, (vm_offset_t)vmm_user_state);
1c79356b
A
276
277 /* Find a virtual address to use. */
278 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
279 if (ret != KERN_SUCCESS) { /* Did we find an address? */
280 (void) vm_map_unwire(act->map, /* No, unwire the context area */
281 (vm_offset_t)vmm_user_state,
282 (vm_offset_t)vmm_user_state + PAGE_SIZE,
283 TRUE);
284 goto return_in_shame;
285 }
286
287 /* Map it into the kernel's address space. */
9bccf70c
A
288 pmap_enter(kernel_pmap, conkern, conphys,
289 VM_PROT_READ | VM_PROT_WRITE,
290 VM_WIMG_USE_DEFAULT, TRUE);
1c79356b
A
291
292 /* Clear the vmm state structure. */
293 vks = (vmm_state_page_t *)conkern;
294 bzero((char *)vks, PAGE_SIZE);
295
de355530
A
296 /* Allocate a new pmap for the new vmm context. */
297 new_pmap = pmap_create(0);
298 if (new_pmap == PMAP_NULL) {
299 (void) vm_map_unwire(act->map, /* Couldn't get a pmap, unwire the user page */
300 (vm_offset_t)vmm_user_state,
301 (vm_offset_t)vmm_user_state + PAGE_SIZE,
302 TRUE);
303
304 kmem_free(kernel_map, conkern, PAGE_SIZE); /* Release the kernel address */
305 goto return_in_shame;
306 }
1c79356b
A
307
308 /* We're home free now. Simply fill in the necessary info and return. */
309
310 vks->interface_version = version; /* Set our version code */
311 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
312
313 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
de355530 314 CTable->vmmc[cvi].vmmPmap = new_pmap; /* Remember the pmap for this guy */
1c79356b 315 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
d7e50217 316 CTable->vmmc[cvi].vmmContextPhys = (vmm_state_page_t *)conphys; /* Remember the state page physical addr */
1c79356b 317 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
9bccf70c
A
318
319 CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */
320 CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
321 CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
322 CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */
323 CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
324 CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
325 CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */
326
327 hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */
d7e50217 328
de355530 329 if (!(act->map->pmap->vflags & pmapVMhost)) {
d7e50217 330 simple_lock(&(act->map->pmap->lock));
de355530 331 act->map->pmap->vflags |= pmapVMhost;
d7e50217
A
332 simple_unlock(&(act->map->pmap->lock));
333 }
1c79356b
A
334
335 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
336 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
337 return 1;
338
339return_in_shame:
340 if(!gact) kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
341 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
342 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
343 save->save_r3 = ret; /* Pass back return code... */
344 return 1;
345
346}
347
348
349/*-----------------------------------------------------------------------
350** vmm_tear_down_context
351**
352** This function uninitializes an emulation context. It deallocates
353** internal resources associated with the context block.
354**
355** Inputs:
356** act - pointer to current thread activation structure
357** index - index returned by vmm_init_context
358**
359** Outputs:
360** kernel return code indicating success or failure
361-----------------------------------------------------------------------*/
362
363kern_return_t vmm_tear_down_context(
364 thread_act_t act,
365 vmm_thread_index_t index)
366{
367 vmmCntrlEntry *CEntry;
368 vmmCntrlTable *CTable;
369 int cvi;
370 register savearea *sv;
371
372 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
373 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
374
375 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
376
9bccf70c 377 hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */
1c79356b 378
9bccf70c
A
379 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
380 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
381 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
1c79356b
A
382 }
383
9bccf70c
A
384 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
385 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
386 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
1c79356b 387 }
de355530
A
388
389 mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
390 pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */
391 pmap_destroy(CEntry->vmmPmap); /* Toss the pmap for this context */
392 CEntry->vmmPmap = NULL; /* Clean it up */
1c79356b
A
393
394 (void) vm_map_unwire( /* Unwire the user comm page */
395 act->map,
396 (vm_offset_t)CEntry->vmmContextUser,
397 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
398 FALSE);
399
400 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
401
402 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
de355530 403 CEntry->vmmPmap = 0; /* Clear pmap pointer */
1c79356b
A
404 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
405 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
9bccf70c
A
406
407 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
408 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
409 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
410 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
411 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
412 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
413 CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */
1c79356b 414
de355530
A
415 CTable = act->mact.vmmControl; /* Get the control table address */
416 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */
1c79356b
A
417 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
418 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
419 return KERN_SUCCESS; /* Leave... */
420 }
421 }
422
423 kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
424 act->mact.vmmControl = 0; /* Unmark us as vmm */
425
426 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
427
428 return KERN_SUCCESS;
429}
430
431/*-----------------------------------------------------------------------
432** vmm_tear_down_all
433**
434** This function uninitializes all emulation contexts. If there are
435** any vmm contexts, it calls vmm_tear_down_context for each one.
436**
437** Note: this can also be called from normal thread termination. Because of
438** that, we will context switch out of an alternate if we are currenty in it.
439** It will be terminated with no valid return code set because we don't expect
440** the activation to ever run again.
441**
442** Inputs:
443** activation to tear down
444**
445** Outputs:
446** All vmm contexts released and VMM shut down
447-----------------------------------------------------------------------*/
448void vmm_tear_down_all(thread_act_t act) {
449
450 vmmCntrlTable *CTable;
451 int cvi;
452 kern_return_t ret;
453 savearea *save;
454 spl_t s;
455
456 if(act->mact.specFlags & runningVM) { /* Are we actually in a context right now? */
9bccf70c 457 save = find_user_regs(act); /* Find the user state context */
1c79356b
A
458 if(!save) { /* Did we find it? */
459 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
460 return;
461 }
462
0b4e3aa0 463 save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */
1c79356b
A
464 s = splhigh(); /* Make sure interrupts are off */
465 vmm_force_exit(act, save); /* Force and exit from VM state */
466 splx(s); /* Restore interrupts */
467 }
468
469 if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */
470
de355530 471 for(cvi = 1; cvi <= kVmmMaxContextsPerThread; cvi++) { /* Look at all slots */
1c79356b
A
472 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
473 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
474 if(ret != KERN_SUCCESS) { /* Did it go away? */
475 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
476 ret, act, cvi);
477 }
478 }
479 }
480 if(act->mact.vmmControl) { /* Did we find one? */
481 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
482 }
483 }
484
485 return;
486}
487
488/*-----------------------------------------------------------------------
489** vmm_map_page
490**
491** This function maps a page from within the client's logical
de355530
A
492** address space into the alternate address space of the
493** Virtual Machine Monitor context.
1c79356b
A
494**
495** The page need not be locked or resident. If not resident, it will be faulted
496** in by this code, which may take some time. Also, if the page is not locked,
497** it, and this mapping may disappear at any time, even before it gets used. Note also
498** that reference and change information is NOT preserved when a page is unmapped, either
499** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
500** space). This means that if RC is needed, the page MUST be wired.
501**
502** Note that if there is already a mapping at the address, it is removed and all
503** information (including RC) is lost BEFORE an attempt is made to map it. Also,
504** if the map call fails, the old address is still unmapped..
505**
506** Inputs:
507** act - pointer to current thread activation
de355530 508** index - index of vmm state for this page
1c79356b 509** va - virtual address within the client's address
0b4e3aa0 510** space
1c79356b 511** ava - virtual address within the alternate address
0b4e3aa0 512** space
1c79356b
A
513** prot - protection flags
514**
515** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
516** areas are not allowed and will fail. Same with directly mapped I/O areas.
517**
518** Input conditions:
519** Interrupts disabled (from fast trap)
520**
521** Outputs:
522** kernel return code indicating success or failure
523** if success, va resident and alternate mapping made
524-----------------------------------------------------------------------*/
525
526kern_return_t vmm_map_page(
527 thread_act_t act,
de355530
A
528 vmm_thread_index_t index,
529 vm_offset_t cva,
530 vm_offset_t ava,
1c79356b
A
531 vm_prot_t prot)
532{
533 kern_return_t ret;
534 vmmCntrlEntry *CEntry;
de355530
A
535 vm_offset_t phys_addr;
536 register mapping *mpv, *mp, *nmpv, *nmp;
1c79356b 537 struct phys_entry *pp;
de355530 538 pmap_t mpmap;
1c79356b
A
539 vm_map_t map;
540
de355530
A
541 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
542 if (CEntry == NULL)return KERN_FAILURE; /* No good, failure... */
543
544/*
545 * Find out if we have already mapped the address and toss it out if so.
546 */
547 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, ava); /* See if there is already a mapping */
548 if((unsigned int)mp & 1) { /* Did we timeout? */
549 panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava); /* Yeah, scream about it! */
550 return KERN_FAILURE; /* Bad hair day, return FALSE... */
551 }
552 if(mp) { /* If it was there, toss it */
553 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
554 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
555 (void)mapping_remove(CEntry->vmmPmap, ava); /* Throw away the mapping. we're about to replace it */
556 }
1c79356b
A
557 map = current_act()->map; /* Get the current map */
558
559 while(1) { /* Keep trying until we get it or until we fail */
de355530 560 if(hw_cvp_blk(map->pmap, cva)) return KERN_FAILURE; /* Make sure that there is no block map at this address */
1c79356b 561
de355530
A
562 mp = hw_lock_phys_vir(map->pmap->space, cva); /* Lock the physical entry for emulator's page */
563 if((unsigned int)mp&1) { /* Did we timeout? */
564 panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva); /* Yeah, scream about it! */
565 return KERN_FAILURE; /* Bad hair day, return FALSE... */
566 }
1c79356b 567
de355530
A
568 if(mp) { /* We found it... */
569 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
570
571 if(!mpv->physent) return KERN_FAILURE; /* If there is no physical entry (e.g., I/O area), we won't map it */
572
573 if(!(mpv->PTEr & 1)) break; /* If we are writable go ahead and map it... */
574
575 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the map before we try to fault the write bit on */
576 }
1c79356b
A
577
578 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
de355530 579 ret = vm_fault(map, trunc_page(cva), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* Didn't find it, try to fault it in read/write... */
1c79356b
A
580 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
581 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
582 }
583
de355530
A
584/*
585 * Now we make a mapping using all of the attributes of the source page except for protection.
586 * Also specify that the physical entry is locked.
587 */
588 nmpv = mapping_make(CEntry->vmmPmap, mpv->physent, (ava & -PAGE_SIZE),
589 (mpv->physent->pte1 & -PAGE_SIZE), prot, ((mpv->physent->pte1 >> 3) & 0xF), 1);
d7e50217 590
de355530
A
591 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry now, we're done with it */
592
593 CEntry->vmmLastMap = ava & -PAGE_SIZE; /* Remember the last mapping we made */
594 if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode))
595 CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */
1c79356b
A
596
597 return KERN_SUCCESS;
598}
599
600
0b4e3aa0
A
601/*-----------------------------------------------------------------------
602** vmm_map_execute
603**
604** This function maps a page from within the client's logical
605** address space into the alternate address space of the
606** Virtual Machine Monitor context and then directly starts executing.
607**
608** See description of vmm_map_page for details.
609**
610** Outputs:
611** Normal exit is to run the VM. Abnormal exit is triggered via a
612** non-KERN_SUCCESS return from vmm_map_page or later during the
613** attempt to transition into the VM.
614-----------------------------------------------------------------------*/
615
616vmm_return_code_t vmm_map_execute(
617 thread_act_t act,
618 vmm_thread_index_t index,
de355530
A
619 vm_offset_t cva,
620 vm_offset_t ava,
0b4e3aa0
A
621 vm_prot_t prot)
622{
623 kern_return_t ret;
624 vmmCntrlEntry *CEntry;
625
de355530 626 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
0b4e3aa0
A
627
628 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
629
d7e50217
A
630 if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry))
631 return kVmmBogusContext; /* Yes, invalid index in Fam */
632
de355530 633 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
0b4e3aa0 634
d7e50217 635 if(ret == KERN_SUCCESS) {
de355530
A
636 CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */
637 vmm_execute_vm(act, index); /* Return was ok, launch the VM */
d7e50217
A
638 }
639
de355530 640 return kVmmInvalidAddress; /* We had trouble mapping in the page */
0b4e3aa0
A
641
642}
643
9bccf70c
A
644/*-----------------------------------------------------------------------
645** vmm_map_list
646**
de355530
A
647** This function maps a list of pages into the alternate's logical
648** address space.
9bccf70c
A
649**
650** Inputs:
651** act - pointer to current thread activation
de355530 652** index - index of vmm state for this page
9bccf70c
A
653** count - number of pages to release
654** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
655**
656** Outputs:
657** kernel return code indicating success or failure
658** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
659** or the vmm_map_page call fails.
660-----------------------------------------------------------------------*/
661
662kern_return_t vmm_map_list(
663 thread_act_t act,
de355530
A
664 vmm_thread_index_t index,
665 unsigned int cnt)
9bccf70c
A
666{
667 vmmCntrlEntry *CEntry;
668 boolean_t ret;
669 unsigned int i;
de355530
A
670 vmmMapList *lst;
671 vm_offset_t cva;
672 vm_offset_t ava;
9bccf70c
A
673 vm_prot_t prot;
674
de355530
A
675 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
676 if (CEntry == NULL)return -1; /* No good, failure... */
9bccf70c
A
677
678 if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */
679 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
680
de355530 681 lst = (vmmMapList *)(&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]); /* Point to the first entry */
9bccf70c
A
682
683 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
de355530
A
684 cva = lst[i].vmlva; /* Get the actual address */
685 ava = lst[i].vmlava & -vmlFlgs; /* Get the alternate address */
686 prot = lst[i].vmlava & vmlProt; /* Get the protection bits */
9bccf70c 687 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
de355530 688 if(ret != KERN_SUCCESS) return KERN_FAILURE; /* Bail if any error */
9bccf70c
A
689 }
690
691 return KERN_SUCCESS ; /* Return... */
692}
693
1c79356b
A
694/*-----------------------------------------------------------------------
695** vmm_get_page_mapping
696**
697** This function determines whether the specified VMM
698** virtual address is mapped.
699**
700** Inputs:
701** act - pointer to current thread activation
702** index - index of vmm state for this page
703** va - virtual address within the alternate's address
704** space
705**
706** Outputs:
707** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
708**
709** Note:
710** If there are aliases to the page in the non-alternate address space,
711** this call could return the wrong one. Moral of the story: no aliases.
712-----------------------------------------------------------------------*/
713
de355530 714vm_offset_t vmm_get_page_mapping(
1c79356b 715 thread_act_t act,
de355530
A
716 vmm_thread_index_t index,
717 vm_offset_t va)
1c79356b
A
718{
719 vmmCntrlEntry *CEntry;
de355530
A
720 vm_offset_t ova;
721 register mapping *mpv, *mp, *nmpv, *nmp;
1c79356b
A
722 pmap_t pmap;
723
de355530
A
724 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
725 if (CEntry == NULL)return -1; /* No good, failure... */
1c79356b 726
de355530
A
727 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
728 if((unsigned int)mp & 1) { /* Did we timeout? */
729 panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */
730 return -1; /* Bad hair day, return FALSE... */
731 }
1c79356b
A
732 if(!mp) return -1; /* Not mapped, return -1 */
733
de355530 734 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
d7e50217 735 pmap = current_act()->map->pmap; /* Get the current pmap */
de355530
A
736 ova = -1; /* Assume failure for now */
737
738 for(nmpv = hw_cpv(mpv->physent->phys_link); nmpv; nmpv = hw_cpv(nmpv->next)) { /* Scan 'em all */
739
740 if(nmpv->pmap != pmap) continue; /* Skip all the rest if this is not the right pmap... */
741
742 ova = ((((unsigned int)nmpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */
743 ova = ova | ((nmpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */
744 ova = ova | ((nmpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */
745 break; /* We're done now, pass virtual address back */
746 }
1c79356b 747
de355530 748 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
1c79356b 749
de355530 750 if(ova == -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va); /* We are bad wrong if we can't find it */
1c79356b 751
de355530 752 return ova;
1c79356b
A
753}
754
755/*-----------------------------------------------------------------------
756** vmm_unmap_page
757**
758** This function unmaps a page from the alternate's logical
759** address space.
760**
761** Inputs:
762** act - pointer to current thread activation
763** index - index of vmm state for this page
764** va - virtual address within the vmm's address
765** space
766**
767** Outputs:
768** kernel return code indicating success or failure
769-----------------------------------------------------------------------*/
770
771kern_return_t vmm_unmap_page(
772 thread_act_t act,
de355530
A
773 vmm_thread_index_t index,
774 vm_offset_t va)
1c79356b
A
775{
776 vmmCntrlEntry *CEntry;
de355530 777 boolean_t ret;
1c79356b
A
778 kern_return_t kern_result = KERN_SUCCESS;
779
de355530
A
780 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
781 if (CEntry == NULL)return -1; /* No good, failure... */
1c79356b 782
de355530 783 ret = mapping_remove(CEntry->vmmPmap, va); /* Toss the mapping */
1c79356b 784
de355530 785 return (ret ? KERN_SUCCESS : KERN_FAILURE); /* Return... */
1c79356b
A
786}
787
9bccf70c
A
788/*-----------------------------------------------------------------------
789** vmm_unmap_list
790**
791** This function unmaps a list of pages from the alternate's logical
792** address space.
793**
794** Inputs:
795** act - pointer to current thread activation
796** index - index of vmm state for this page
797** count - number of pages to release
798** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
799**
800** Outputs:
801** kernel return code indicating success or failure
802** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
803-----------------------------------------------------------------------*/
804
805kern_return_t vmm_unmap_list(
806 thread_act_t act,
de355530
A
807 vmm_thread_index_t index,
808 unsigned int cnt)
9bccf70c
A
809{
810 vmmCntrlEntry *CEntry;
811 boolean_t ret;
812 kern_return_t kern_result = KERN_SUCCESS;
813 unsigned int *pgaddr, i;
814
de355530
A
815 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
816 if (CEntry == NULL)return -1; /* No good, failure... */
9bccf70c 817
de355530
A
818 if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */
819 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
9bccf70c 820
de355530 821 pgaddr = &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
9bccf70c 822
de355530
A
823 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
824
825 (void)mapping_remove(CEntry->vmmPmap, pgaddr[i]); /* Toss the mapping */
9bccf70c
A
826 }
827
de355530 828 return KERN_SUCCESS ; /* Return... */
9bccf70c
A
829}
830
1c79356b
A
831/*-----------------------------------------------------------------------
832** vmm_unmap_all_pages
833**
834** This function unmaps all pages from the alternates's logical
835** address space.
836**
837** Inputs:
838** act - pointer to current thread activation
839** index - index of context state
840**
841** Outputs:
842** none
843**
844** Note:
845** All pages are unmapped, but the address space (i.e., pmap) is still alive
846-----------------------------------------------------------------------*/
847
848void vmm_unmap_all_pages(
849 thread_act_t act,
de355530 850 vmm_thread_index_t index)
1c79356b
A
851{
852 vmmCntrlEntry *CEntry;
853
de355530
A
854 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
855 if (CEntry == NULL) return; /* Either this isn't vmm thread or the index is bogus */
1c79356b
A
856
857/*
858 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
859 */
de355530
A
860 mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */
861 pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */
1c79356b
A
862 return;
863}
864
865
866/*-----------------------------------------------------------------------
867** vmm_get_page_dirty_flag
868**
869** This function returns the changed flag of the page
870** and optionally clears clears the flag.
871**
872** Inputs:
873** act - pointer to current thread activation
874** index - index of vmm state for this page
875** va - virtual address within the vmm's address
876** space
877** reset - Clears dirty if true, untouched if not
878**
879** Outputs:
880** the dirty bit
881** clears the dirty bit in the pte if requested
882**
883** Note:
884** The RC bits are merged into the global physical entry
885-----------------------------------------------------------------------*/
886
887boolean_t vmm_get_page_dirty_flag(
888 thread_act_t act,
de355530
A
889 vmm_thread_index_t index,
890 vm_offset_t va,
1c79356b
A
891 unsigned int reset)
892{
893 vmmCntrlEntry *CEntry;
894 register mapping *mpv, *mp;
895 unsigned int RC;
896
de355530
A
897 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
898 if (CEntry == NULL) return 1; /* Either this isn't vmm thread or the index is bogus */
1c79356b 899
de355530
A
900 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
901 if((unsigned int)mp & 1) { /* Did we timeout? */
902 panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */
903 return 1; /* Bad hair day, return dirty... */
d7e50217 904 }
de355530
A
905 if(!mp) return 1; /* Not mapped, return dirty... */
906
907 RC = hw_test_rc(mp, reset); /* Fetch the RC bits and clear if requested */
1c79356b 908
de355530
A
909 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
910 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
911
912 return (RC & 1); /* Return the change bit */
1c79356b
A
913}
914
0b4e3aa0
A
915
916/*-----------------------------------------------------------------------
917** vmm_protect_page
918**
919** This function sets the protection bits of a mapped page
920**
921** Inputs:
922** act - pointer to current thread activation
923** index - index of vmm state for this page
924** va - virtual address within the vmm's address
925** space
926** prot - Protection flags
927**
928** Outputs:
929** none
930** Protection bits of the mapping are modifed
931**
932-----------------------------------------------------------------------*/
933
934kern_return_t vmm_protect_page(
935 thread_act_t act,
de355530
A
936 vmm_thread_index_t index,
937 vm_offset_t va,
0b4e3aa0
A
938 vm_prot_t prot)
939{
940 vmmCntrlEntry *CEntry;
de355530
A
941 register mapping *mpv, *mp;
942 unsigned int RC;
0b4e3aa0 943
de355530
A
944 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
945 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
d7e50217 946
de355530
A
947 mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */
948 if((unsigned int)mp & 1) { /* Did we timeout? */
949 panic("vmm_protect_page: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */
950 return 1; /* Bad hair day, return dirty... */
d7e50217 951 }
de355530
A
952 if(!mp) return KERN_SUCCESS; /* Not mapped, just return... */
953
954 hw_prot_virt(mp, prot); /* Set the protection */
0b4e3aa0 955
de355530
A
956 mpv = hw_cpv(mp); /* Convert mapping block to virtual */
957 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */
958
959 CEntry->vmmLastMap = va & -PAGE_SIZE; /* Remember the last mapping we changed */
960 if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode))
961 CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */
0b4e3aa0
A
962
963 return KERN_SUCCESS; /* Return */
964}
965
966
967/*-----------------------------------------------------------------------
968** vmm_protect_execute
969**
970** This function sets the protection bits of a mapped page
971** and then directly starts executing.
972**
de355530 973** See description of vmm_protect_page for details.
0b4e3aa0
A
974**
975** Outputs:
976** Normal exit is to run the VM. Abnormal exit is triggered via a
977** non-KERN_SUCCESS return from vmm_map_page or later during the
978** attempt to transition into the VM.
979-----------------------------------------------------------------------*/
980
981vmm_return_code_t vmm_protect_execute(
982 thread_act_t act,
983 vmm_thread_index_t index,
de355530 984 vm_offset_t va,
0b4e3aa0
A
985 vm_prot_t prot)
986{
987 kern_return_t ret;
988 vmmCntrlEntry *CEntry;
989
de355530
A
990 CEntry = vmm_get_entry(act, index); /* Get and validate the index */
991
992 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
0b4e3aa0 993
d7e50217
A
994 if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry))
995 return kVmmBogusContext; /* Yes, invalid index in Fam */
996
de355530 997 ret = vmm_protect_page(act, index, va, prot); /* Go try to change access */
d7e50217
A
998
999 if(ret == KERN_SUCCESS) {
de355530
A
1000 CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */
1001 vmm_execute_vm(act, index); /* Return was ok, launch the VM */
d7e50217 1002 }
0b4e3aa0 1003
de355530 1004 return kVmmInvalidAddress; /* We had trouble of some kind (shouldn't happen) */
0b4e3aa0
A
1005
1006}
1007
1008
1c79356b
A
1009/*-----------------------------------------------------------------------
1010** vmm_get_float_state
1011**
1012** This function causes the current floating point state to
1013** be saved into the shared context area. It also clears the
1014** vmmFloatCngd changed flag.
1015**
1016** Inputs:
1017** act - pointer to current thread activation structure
1018** index - index returned by vmm_init_context
1019**
1020** Outputs:
1021** context saved
1022-----------------------------------------------------------------------*/
1023
1024kern_return_t vmm_get_float_state(
1025 thread_act_t act,
1026 vmm_thread_index_t index)
1027{
1028 vmmCntrlEntry *CEntry;
1029 vmmCntrlTable *CTable;
1030 int i;
9bccf70c 1031 register struct savearea_fpu *sv;
1c79356b
A
1032
1033 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1034 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1035
1036 act->mact.specFlags &= ~floatCng; /* Clear the special flag */
1037 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
9bccf70c
A
1038
1039 fpu_save(&CEntry->vmmFacCtx); /* Save context if live */
1040
de355530
A
1041 CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[0] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[0]; /* Copy FPSCR */
1042 CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[1] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[1]; /* Copy FPSCR */
1043
9bccf70c
A
1044 if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */
1045 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */
1c79356b
A
1046 return KERN_SUCCESS;
1047 }
1048
1c79356b 1049
9bccf70c 1050 for(i = 0; i < 32; i++) { /* Initialize floating points */
1c79356b
A
1051 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
1052 }
1053
1054 return KERN_SUCCESS;
1055}
1056
1057/*-----------------------------------------------------------------------
1058** vmm_get_vector_state
1059**
1060** This function causes the current vector state to
1061** be saved into the shared context area. It also clears the
1062** vmmVectorCngd changed flag.
1063**
1064** Inputs:
1065** act - pointer to current thread activation structure
1066** index - index returned by vmm_init_context
1067**
1068** Outputs:
1069** context saved
1070-----------------------------------------------------------------------*/
1071
1072kern_return_t vmm_get_vector_state(
1073 thread_act_t act,
1074 vmm_thread_index_t index)
1075{
1076 vmmCntrlEntry *CEntry;
1077 vmmCntrlTable *CTable;
1078 int i, j;
1079 unsigned int vrvalidwrk;
9bccf70c 1080 register struct savearea_vec *sv;
1c79356b
A
1081
1082 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1083 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
9bccf70c
A
1084
1085 vec_save(&CEntry->vmmFacCtx); /* Save context if live */
1c79356b
A
1086
1087 act->mact.specFlags &= ~vectorCng; /* Clear the special flag */
1088 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
1089
de355530
A
1090 for(j=0; j < 4; j++) { /* Set value for vscr */
1091 CEntry->vmmContextKern->vmm_proc_state.ppcVSCRshadow.i[j] = CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j];
1092 }
1093
9bccf70c 1094 if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */
1c79356b
A
1095
1096 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
1097
1c79356b
A
1098 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
1099 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
1100 for(j = 0; j < 4; j++) { /* If so, copy it over */
1101 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
1102 }
1103 }
1104 else {
1105 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
1106 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
1107 }
1108 }
1109
1110 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
1111
1112 }
1113
1114 return KERN_SUCCESS;
1115 }
1116
1c79356b
A
1117 for(i = 0; i < 32; i++) { /* Initialize vector registers */
1118 for(j=0; j < 4; j++) { /* Do words */
1119 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
1120 }
1121 }
1122
1123 return KERN_SUCCESS;
1124}
1125
1126/*-----------------------------------------------------------------------
1127** vmm_set_timer
1128**
1129** This function causes a timer (in AbsoluteTime) for a specific time
1130** to be set It also clears the vmmTimerPop flag if the timer is actually
1131** set, it is cleared otherwise.
1132**
1133** A timer is cleared by setting setting the time to 0. This will clear
1134** the vmmTimerPop bit. Simply setting the timer to earlier than the
1135** current time clears the internal timer request, but leaves the
1136** vmmTimerPop flag set.
1137**
1138**
1139** Inputs:
1140** act - pointer to current thread activation structure
1141** index - index returned by vmm_init_context
1142** timerhi - high order word of AbsoluteTime to pop
1143** timerlo - low order word of AbsoluteTime to pop
1144**
1145** Outputs:
1146** timer set, vmmTimerPop cleared
1147-----------------------------------------------------------------------*/
1148
1149kern_return_t vmm_set_timer(
1150 thread_act_t act,
1151 vmm_thread_index_t index,
1152 unsigned int timerhi,
1153 unsigned int timerlo)
1154{
1155 vmmCntrlEntry *CEntry;
1156
1157 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1158 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1159
0b4e3aa0 1160 CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
1c79356b
A
1161
1162 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
1163 return KERN_SUCCESS; /* Leave now... */
1164}
1165
1166
1167/*-----------------------------------------------------------------------
1168** vmm_get_timer
1169**
1170** This function causes the timer for a specified VM to be
1171** returned in return_params[0] and return_params[1].
1172**
1173**
1174** Inputs:
1175** act - pointer to current thread activation structure
1176** index - index returned by vmm_init_context
1177**
1178** Outputs:
1179** Timer value set in return_params[0] and return_params[1].
1180** Set to 0 if timer is not set.
1181-----------------------------------------------------------------------*/
1182
1183kern_return_t vmm_get_timer(
1184 thread_act_t act,
1185 vmm_thread_index_t index)
1186{
1187 vmmCntrlEntry *CEntry;
1188 vmmCntrlTable *CTable;
1189
1190 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1191 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1192
de355530
A
1193 CEntry->vmmContextKern->return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */
1194 CEntry->vmmContextKern->return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1195
1c79356b
A
1196 return KERN_SUCCESS;
1197}
1198
1199
de355530 1200
1c79356b
A
1201/*-----------------------------------------------------------------------
1202** vmm_timer_pop
1203**
1204** This function causes all timers in the array of VMs to be updated.
1205** All appropriate flags are set or reset. If a VM is currently
1206** running and its timer expired, it is intercepted.
1207**
1208** The qactTimer value is set to the lowest unexpired timer. It is
1209** zeroed if all timers are expired or have been reset.
1210**
1211** Inputs:
1212** act - pointer to current thread activation structure
1213**
1214** Outputs:
1215** timers set, vmmTimerPop cleared or set
1216-----------------------------------------------------------------------*/
1217
1218void vmm_timer_pop(
1219 thread_act_t act)
1220{
1221 vmmCntrlEntry *CEntry;
1222 vmmCntrlTable *CTable;
1223 int cvi, any;
0b4e3aa0 1224 uint64_t now, soonest;
1c79356b
A
1225 savearea *sv;
1226
1227 if(!((unsigned int)act->mact.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
1228 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
1229 }
1230
0b4e3aa0 1231 soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */
1c79356b 1232
0b4e3aa0 1233 clock_get_uptime(&now); /* What time is it? */
1c79356b
A
1234
1235 CTable = act->mact.vmmControl; /* Make this easier */
1236 any = 0; /* Haven't found a running unexpired timer yet */
1237
de355530 1238 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Cycle through all and check time now */
1c79356b
A
1239
1240 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
1241
9bccf70c 1242 if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */
1c79356b
A
1243 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
1244 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
1245 continue; /* Check next */
1246 }
1247
0b4e3aa0 1248 if (CTable->vmmc[cvi].vmmTimer <= now) {
1c79356b
A
1249 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
1250 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
1251 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->mact.vmmCEntry) { /* Is this the running VM? */
9bccf70c 1252 sv = find_user_regs(act); /* Get the user state registers */
1c79356b
A
1253 if(!sv) { /* Did we find something? */
1254 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1255 }
0b4e3aa0 1256 sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */
1c79356b
A
1257 vmm_force_exit(act, sv); /* Intercept a running VM */
1258 }
1259 continue; /* Check the rest */
1260 }
1261 else { /* It hasn't popped yet */
1262 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1263 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1264 }
1265
1266 any = 1; /* Show we found an active unexpired timer */
1267
0b4e3aa0
A
1268 if (CTable->vmmc[cvi].vmmTimer < soonest)
1269 soonest = CTable->vmmc[cvi].vmmTimer;
1c79356b
A
1270 }
1271
1272 if(any) {
0b4e3aa0
A
1273 if (act->mact.qactTimer == 0 || soonest <= act->mact.qactTimer)
1274 act->mact.qactTimer = soonest; /* Set lowest timer */
1275 }
1276
1277 return;
1278}
1279
1280
1281
1282/*-----------------------------------------------------------------------
1283** vmm_stop_vm
1284**
1285** This function prevents the specified VM(s) to from running.
1286** If any is currently executing, the execution is intercepted
1287** with a code of kVmmStopped. Note that execution of the VM is
1288** blocked until a vmmExecuteVM is called with the start flag set to 1.
1289** This provides the ability for a thread to stop execution of a VM and
1290** insure that it will not be run until the emulator has processed the
1291** "virtual" interruption.
1292**
1293** Inputs:
1294** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1295** NOTE: if this mask is all 0s, any executing VM is intercepted with
1296* a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1297** note that there is a potential race here and the VM may not stop.
1298**
1299** Outputs:
1300** kernel return code indicating success
1301** or if no VMs are enabled, an invalid syscall exception.
1302-----------------------------------------------------------------------*/
1303
1304int vmm_stop_vm(struct savearea *save)
1305{
1306
1307 thread_act_t act;
1308 vmmCntrlTable *CTable;
1309 int cvi, i;
1310 task_t task;
1311 thread_act_t fact;
1312 unsigned int vmmask;
1313 ReturnHandler *stopapc;
1314
1315 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1316
1317 task = current_task(); /* Figure out who we are */
1318
1319 task_lock(task); /* Lock our task */
1320
1321 fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */
1322 act = 0; /* Pretend we didn't find it yet */
1323
1324 for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */
1325 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
1326 act = fact; /* Yeah... */
1327 break; /* Bail the loop... */
1328 }
1329 fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */
1330 }
1331
1332 if(!((unsigned int)act)) { /* See if we have VMMs yet */
1333 task_unlock(task); /* No, unlock the task */
1334 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1335 return 0; /* Go generate a syscall exception */
1336 }
1337
1338 act_lock_thread(act); /* Make sure this stays 'round */
1339 task_unlock(task); /* Safe to release now */
1340
1341 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1342
1343 if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */
1344 act_unlock_thread(act); /* Unlock the activation */
1345 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1346 return 0; /* Go generate a syscall exception */
1347 }
1348
1349 if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */
1350 act_unlock_thread(act); /* Unlock the activation */
1351 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1352 save->save_r3 = KERN_SUCCESS; /* Set success */
1353 return 1; /* Return... */
1354 }
1355
de355530 1356 for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search slots */
0b4e3aa0
A
1357 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */
1358 hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */
1359 }
1360 vmmask = vmmask << 1; /* Slide mask over */
1361 }
1362
1363 if(hw_compare_and_store(0, 1, &act->mact.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */
1364 act_unlock_thread(act); /* Already one pending, unlock the activation */
1365 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1366 save->save_r3 = KERN_SUCCESS; /* Say we did it... */
1367 return 1; /* Leave */
1368 }
1369
1370 if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */
1371 act->mact.emPendRupts = 0; /* No memory, say we have given up request */
1372 act_unlock_thread(act); /* Unlock the activation */
1373 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1374 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
1375 return 1; /* Return... */
1376 }
1377
1378 ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1379
1380 stopapc->handler = vmm_interrupt; /* Set interruption routine */
1381
1382 stopapc->next = act->handlers; /* Put our interrupt at the start of the list */
1383 act->handlers = stopapc; /* Point to us */
1384
1385 act_set_apc(act); /* Set an APC AST */
1386 ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */
1387
1388 act_unlock_thread(act); /* Unlock the activation */
1389
1390 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1391 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
1392 return 1;
1393}
1394
1395/*-----------------------------------------------------------------------
1396** vmm_interrupt
1397**
1398** This function is executed asynchronously from an APC AST.
1399** It is to be used for anything that needs to interrupt a running VM.
1400** This include any kind of interruption generation (other than timer pop)
1401** or entering the stopped state.
1402**
1403** Inputs:
1404** ReturnHandler *rh - the return handler control block as required by the APC.
1405** thread_act_t act - the activation
1406**
1407** Outputs:
1408** Whatever needed to be done is done.
1409-----------------------------------------------------------------------*/
1410
1411void vmm_interrupt(ReturnHandler *rh, thread_act_t act) {
1412
1413 vmmCntrlTable *CTable;
1414 savearea *sv;
1415 boolean_t inter;
1416
1417
1418
1419 kfree((vm_offset_t)rh, sizeof(ReturnHandler)); /* Release the return handler block */
1420
1421 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1422
1423 act->mact.emPendRupts = 0; /* Say that there are no more interrupts pending */
1424 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1425
1426 if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */
1427
1428 if(act->mact.vmmCEntry && (act->mact.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */
9bccf70c 1429 sv = find_user_regs(act); /* Get the user state registers */
0b4e3aa0
A
1430 if(!sv) { /* Did we find something? */
1431 panic("vmm_interrupt: no user context; act = %08X\n", act);
1c79356b 1432 }
0b4e3aa0
A
1433 sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */
1434 vmm_force_exit(act, sv); /* Intercept a running VM */
1c79356b 1435 }
0b4e3aa0 1436 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
1c79356b
A
1437
1438 return;
1439}