]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
de355530 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
1c79356b | 11 | * |
de355530 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
de355530 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
1c79356b A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /*----------------------------------------------------------------------- | |
23 | ** vmachmon.c | |
24 | ** | |
25 | ** C routines that we are adding to the MacOS X kernel. | |
26 | ** | |
de355530 A |
27 | ** Weird Apple PSL stuff goes here... |
28 | ** | |
29 | ** Until then, Copyright 2000, Connectix | |
1c79356b A |
30 | -----------------------------------------------------------------------*/ |
31 | ||
32 | #include <mach/mach_types.h> | |
33 | #include <mach/kern_return.h> | |
34 | #include <mach/host_info.h> | |
35 | #include <kern/kern_types.h> | |
36 | #include <kern/host.h> | |
37 | #include <kern/task.h> | |
38 | #include <kern/thread.h> | |
0b4e3aa0 | 39 | #include <kern/thread_act.h> |
1c79356b A |
40 | #include <ppc/exception.h> |
41 | #include <ppc/mappings.h> | |
42 | #include <ppc/thread_act.h> | |
de355530 | 43 | #include <ppc/pmap_internals.h> |
1c79356b A |
44 | #include <vm/vm_kern.h> |
45 | ||
46 | #include <ppc/vmachmon.h> | |
47 | ||
48 | extern struct Saveanchor saveanchor; /* Aligned savearea anchor */ | |
49 | extern double FloatInit; | |
50 | extern unsigned long QNaNbarbarian[4]; | |
51 | ||
52 | /************************************************************************************* | |
53 | Virtual Machine Monitor Internal Routines | |
54 | **************************************************************************************/ | |
55 | ||
56 | /*----------------------------------------------------------------------- | |
57 | ** vmm_get_entry | |
58 | ** | |
59 | ** This function verifies and return a vmm context entry index | |
60 | ** | |
61 | ** Inputs: | |
62 | ** act - pointer to current thread activation | |
63 | ** index - index into vmm control table (this is a "one based" value) | |
64 | ** | |
65 | ** Outputs: | |
66 | ** address of a vmmCntrlEntry or 0 if not found | |
67 | -----------------------------------------------------------------------*/ | |
68 | ||
69 | vmmCntrlEntry *vmm_get_entry( | |
70 | thread_act_t act, | |
71 | vmm_thread_index_t index) | |
72 | { | |
73 | vmmCntrlTable *CTable; | |
74 | vmmCntrlEntry *CEntry; | |
75 | ||
76 | if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */ | |
de355530 | 77 | if ((index - 1) >= kVmmMaxContextsPerThread) return NULL; /* Index not in range */ |
1c79356b A |
78 | |
79 | CTable = act->mact.vmmControl; /* Make the address a bit more convienient */ | |
80 | CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */ | |
81 | ||
82 | if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */ | |
83 | ||
84 | return CEntry; | |
85 | } | |
86 | ||
87 | ||
88 | ||
89 | /************************************************************************************* | |
90 | Virtual Machine Monitor Exported Functionality | |
91 | ||
92 | The following routines are used to implement a quick-switch mechanism for | |
93 | virtual machines that need to execute within their own processor envinroment | |
94 | (including register and MMU state). | |
95 | **************************************************************************************/ | |
96 | ||
97 | /*----------------------------------------------------------------------- | |
98 | ** vmm_get_version | |
99 | ** | |
100 | ** This function returns the current version of the virtual machine | |
101 | ** interface. It is divided into two portions. The top 16 bits | |
102 | ** represent the major version number, and the bottom 16 bits | |
103 | ** represent the minor version number. Clients using the Vmm | |
104 | ** functionality should make sure they are using a verison new | |
105 | ** enough for them. | |
106 | ** | |
107 | ** Inputs: | |
108 | ** none | |
109 | ** | |
110 | ** Outputs: | |
111 | ** 32-bit number representing major/minor version of | |
112 | ** the Vmm module | |
113 | -----------------------------------------------------------------------*/ | |
114 | ||
115 | int vmm_get_version(struct savearea *save) | |
116 | { | |
117 | save->save_r3 = kVmmCurrentVersion; /* Return the version */ | |
118 | return 1; | |
119 | } | |
120 | ||
121 | ||
122 | /*----------------------------------------------------------------------- | |
123 | ** Vmm_get_features | |
124 | ** | |
125 | ** This function returns a set of flags that represents the functionality | |
126 | ** supported by the current verison of the Vmm interface. Clients should | |
127 | ** use this to determine whether they can run on this system. | |
128 | ** | |
129 | ** Inputs: | |
130 | ** none | |
131 | ** | |
132 | ** Outputs: | |
133 | ** 32-bit number representing functionality supported by this | |
134 | ** version of the Vmm module | |
135 | -----------------------------------------------------------------------*/ | |
136 | ||
137 | int vmm_get_features(struct savearea *save) | |
138 | { | |
139 | save->save_r3 = kVmmCurrentFeatures; /* Return the features */ | |
140 | return 1; | |
141 | } | |
142 | ||
143 | ||
144 | /*----------------------------------------------------------------------- | |
145 | ** vmm_init_context | |
146 | ** | |
147 | ** This function initializes an emulation context. It allocates | |
148 | ** a new pmap (address space) and fills in the initial processor | |
149 | ** state within the specified structure. The structure, mapped | |
150 | ** into the client's logical address space, must be page-aligned. | |
151 | ** | |
152 | ** Inputs: | |
153 | ** act - pointer to current thread activation | |
154 | ** version - requested version of the Vmm interface (allowing | |
155 | ** future versions of the interface to change, but still | |
156 | ** support older clients) | |
157 | ** vmm_user_state - pointer to a logical page within the | |
158 | ** client's address space | |
159 | ** | |
160 | ** Outputs: | |
161 | ** kernel return code indicating success or failure | |
162 | -----------------------------------------------------------------------*/ | |
163 | ||
164 | int vmm_init_context(struct savearea *save) | |
165 | { | |
166 | ||
167 | thread_act_t act; | |
168 | vmm_version_t version; | |
169 | vmm_state_page_t * vmm_user_state; | |
170 | vmmCntrlTable *CTable; | |
171 | vm_offset_t conkern; | |
172 | vmm_state_page_t * vks; | |
de355530 | 173 | vm_offset_t conphys; |
1c79356b A |
174 | kern_return_t ret; |
175 | pmap_t new_pmap; | |
176 | int cvi, i; | |
177 | task_t task; | |
178 | thread_act_t fact, gact; | |
179 | ||
180 | vmm_user_state = (vmm_state_page_t *)save->save_r4; /* Get the user address of the comm area */ | |
181 | if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */ | |
182 | save->save_r3 = KERN_FAILURE; /* Return failure */ | |
183 | return 1; | |
184 | } | |
185 | ||
0b4e3aa0 | 186 | /* Make sure that the version requested is supported */ |
1c79356b | 187 | version = save->save_r3; /* Pick up passed in version */ |
0b4e3aa0 A |
188 | if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) { |
189 | save->save_r3 = KERN_FAILURE; /* Return failure */ | |
190 | return 1; | |
1c79356b | 191 | } |
0b4e3aa0 A |
192 | |
193 | if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */ | |
194 | save->save_r3 = KERN_FAILURE; /* Return failure */ | |
195 | return 1; | |
196 | } | |
197 | ||
1c79356b A |
198 | act = current_act(); /* Pick up our activation */ |
199 | ||
200 | ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ | |
201 | ||
202 | task = current_task(); /* Figure out who we are */ | |
203 | ||
204 | task_lock(task); /* Lock our task */ | |
205 | ||
206 | fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ | |
207 | gact = 0; /* Pretend we didn't find it yet */ | |
208 | ||
209 | for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */ | |
210 | if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */ | |
211 | gact = fact; /* Yeah... */ | |
212 | break; /* Bail the loop... */ | |
213 | } | |
214 | fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ | |
215 | } | |
216 | ||
217 | ||
218 | /* | |
219 | * We only allow one thread per task to be a virtual machine monitor right now. This solves | |
220 | * a number of potential problems that I can't put my finger on right now. | |
221 | * | |
222 | * Utlimately, I think we want to move the controls and make all this task based instead of | |
223 | * thread based. That would allow an emulator architecture to spawn a kernel thread for each | |
224 | * VM (if they want) rather than hand dispatch contexts. | |
225 | */ | |
226 | ||
227 | if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */ | |
228 | task_unlock(task); /* Release task lock */ | |
229 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
230 | save->save_r3 = KERN_FAILURE; /* We must play alone... */ | |
231 | return 1; | |
232 | } | |
233 | ||
234 | if(!gact) act->mact.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */ | |
235 | ||
236 | task_unlock(task); /* Safe to release now (because we've marked ourselves) */ | |
237 | ||
238 | CTable = act->mact.vmmControl; /* Get the control table address */ | |
239 | if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */ | |
240 | if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */ | |
241 | act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */ | |
242 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
243 | save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */ | |
244 | return 1; | |
245 | } | |
246 | ||
247 | bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */ | |
248 | act->mact.vmmControl = CTable; /* Initialize the table anchor */ | |
249 | } | |
250 | ||
de355530 | 251 | for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */ |
1c79356b A |
252 | if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */ |
253 | } | |
254 | ||
de355530 | 255 | if(cvi >= kVmmMaxContextsPerThread) { /* Did we find one? */ |
1c79356b A |
256 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ |
257 | save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */ | |
258 | return 1; | |
259 | } | |
260 | ||
261 | ret = vm_map_wire( /* Wire the virtual machine monitor's context area */ | |
262 | act->map, | |
263 | (vm_offset_t)vmm_user_state, | |
264 | (vm_offset_t)vmm_user_state + PAGE_SIZE, | |
265 | VM_PROT_READ | VM_PROT_WRITE, | |
266 | FALSE); | |
267 | ||
268 | if (ret != KERN_SUCCESS) /* The wire failed, return the code */ | |
269 | goto return_in_shame; | |
270 | ||
271 | /* Map the vmm state into the kernel's address space. */ | |
de355530 | 272 | conphys = pmap_extract(act->map->pmap, (vm_offset_t)vmm_user_state); |
1c79356b A |
273 | |
274 | /* Find a virtual address to use. */ | |
275 | ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE); | |
276 | if (ret != KERN_SUCCESS) { /* Did we find an address? */ | |
277 | (void) vm_map_unwire(act->map, /* No, unwire the context area */ | |
278 | (vm_offset_t)vmm_user_state, | |
279 | (vm_offset_t)vmm_user_state + PAGE_SIZE, | |
280 | TRUE); | |
281 | goto return_in_shame; | |
282 | } | |
283 | ||
284 | /* Map it into the kernel's address space. */ | |
9bccf70c A |
285 | pmap_enter(kernel_pmap, conkern, conphys, |
286 | VM_PROT_READ | VM_PROT_WRITE, | |
287 | VM_WIMG_USE_DEFAULT, TRUE); | |
1c79356b A |
288 | |
289 | /* Clear the vmm state structure. */ | |
290 | vks = (vmm_state_page_t *)conkern; | |
291 | bzero((char *)vks, PAGE_SIZE); | |
292 | ||
de355530 A |
293 | /* Allocate a new pmap for the new vmm context. */ |
294 | new_pmap = pmap_create(0); | |
295 | if (new_pmap == PMAP_NULL) { | |
296 | (void) vm_map_unwire(act->map, /* Couldn't get a pmap, unwire the user page */ | |
297 | (vm_offset_t)vmm_user_state, | |
298 | (vm_offset_t)vmm_user_state + PAGE_SIZE, | |
299 | TRUE); | |
300 | ||
301 | kmem_free(kernel_map, conkern, PAGE_SIZE); /* Release the kernel address */ | |
302 | goto return_in_shame; | |
303 | } | |
1c79356b A |
304 | |
305 | /* We're home free now. Simply fill in the necessary info and return. */ | |
306 | ||
307 | vks->interface_version = version; /* Set our version code */ | |
308 | vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */ | |
309 | ||
310 | CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */ | |
de355530 | 311 | CTable->vmmc[cvi].vmmPmap = new_pmap; /* Remember the pmap for this guy */ |
1c79356b | 312 | CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */ |
d7e50217 | 313 | CTable->vmmc[cvi].vmmContextPhys = (vmm_state_page_t *)conphys; /* Remember the state page physical addr */ |
1c79356b | 314 | CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */ |
9bccf70c A |
315 | |
316 | CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */ | |
317 | CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */ | |
318 | CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */ | |
319 | CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */ | |
320 | CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */ | |
321 | CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ | |
322 | CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */ | |
323 | ||
324 | hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */ | |
d7e50217 | 325 | |
de355530 | 326 | if (!(act->map->pmap->vflags & pmapVMhost)) { |
d7e50217 | 327 | simple_lock(&(act->map->pmap->lock)); |
de355530 | 328 | act->map->pmap->vflags |= pmapVMhost; |
d7e50217 A |
329 | simple_unlock(&(act->map->pmap->lock)); |
330 | } | |
1c79356b A |
331 | |
332 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
333 | save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */ | |
334 | return 1; | |
335 | ||
336 | return_in_shame: | |
337 | if(!gact) kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */ | |
338 | act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */ | |
339 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
340 | save->save_r3 = ret; /* Pass back return code... */ | |
341 | return 1; | |
342 | ||
343 | } | |
344 | ||
345 | ||
346 | /*----------------------------------------------------------------------- | |
347 | ** vmm_tear_down_context | |
348 | ** | |
349 | ** This function uninitializes an emulation context. It deallocates | |
350 | ** internal resources associated with the context block. | |
351 | ** | |
352 | ** Inputs: | |
353 | ** act - pointer to current thread activation structure | |
354 | ** index - index returned by vmm_init_context | |
355 | ** | |
356 | ** Outputs: | |
357 | ** kernel return code indicating success or failure | |
358 | -----------------------------------------------------------------------*/ | |
359 | ||
360 | kern_return_t vmm_tear_down_context( | |
361 | thread_act_t act, | |
362 | vmm_thread_index_t index) | |
363 | { | |
364 | vmmCntrlEntry *CEntry; | |
365 | vmmCntrlTable *CTable; | |
366 | int cvi; | |
367 | register savearea *sv; | |
368 | ||
369 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
370 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
371 | ||
372 | ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ | |
373 | ||
9bccf70c | 374 | hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */ |
1c79356b | 375 | |
9bccf70c A |
376 | if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */ |
377 | toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */ | |
378 | save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */ | |
1c79356b A |
379 | } |
380 | ||
9bccf70c A |
381 | if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */ |
382 | toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */ | |
383 | save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */ | |
1c79356b | 384 | } |
de355530 A |
385 | |
386 | mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */ | |
387 | pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */ | |
388 | pmap_destroy(CEntry->vmmPmap); /* Toss the pmap for this context */ | |
389 | CEntry->vmmPmap = NULL; /* Clean it up */ | |
1c79356b A |
390 | |
391 | (void) vm_map_unwire( /* Unwire the user comm page */ | |
392 | act->map, | |
393 | (vm_offset_t)CEntry->vmmContextUser, | |
394 | (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE, | |
395 | FALSE); | |
396 | ||
397 | kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */ | |
398 | ||
399 | CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */ | |
de355530 | 400 | CEntry->vmmPmap = 0; /* Clear pmap pointer */ |
1c79356b A |
401 | CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */ |
402 | CEntry->vmmContextUser = 0; /* Clear the user address of comm area */ | |
9bccf70c A |
403 | |
404 | CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */ | |
405 | CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */ | |
406 | CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */ | |
407 | CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */ | |
408 | CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */ | |
409 | CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */ | |
410 | CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */ | |
1c79356b | 411 | |
de355530 A |
412 | CTable = act->mact.vmmControl; /* Get the control table address */ |
413 | for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */ | |
1c79356b A |
414 | if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */ |
415 | ml_set_interrupts_enabled(FALSE); /* No more interruptions */ | |
416 | return KERN_SUCCESS; /* Leave... */ | |
417 | } | |
418 | } | |
419 | ||
420 | kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */ | |
421 | act->mact.vmmControl = 0; /* Unmark us as vmm */ | |
422 | ||
423 | ml_set_interrupts_enabled(FALSE); /* No more interruptions */ | |
424 | ||
425 | return KERN_SUCCESS; | |
426 | } | |
427 | ||
428 | /*----------------------------------------------------------------------- | |
429 | ** vmm_tear_down_all | |
430 | ** | |
431 | ** This function uninitializes all emulation contexts. If there are | |
432 | ** any vmm contexts, it calls vmm_tear_down_context for each one. | |
433 | ** | |
434 | ** Note: this can also be called from normal thread termination. Because of | |
435 | ** that, we will context switch out of an alternate if we are currenty in it. | |
436 | ** It will be terminated with no valid return code set because we don't expect | |
437 | ** the activation to ever run again. | |
438 | ** | |
439 | ** Inputs: | |
440 | ** activation to tear down | |
441 | ** | |
442 | ** Outputs: | |
443 | ** All vmm contexts released and VMM shut down | |
444 | -----------------------------------------------------------------------*/ | |
445 | void vmm_tear_down_all(thread_act_t act) { | |
446 | ||
447 | vmmCntrlTable *CTable; | |
448 | int cvi; | |
449 | kern_return_t ret; | |
450 | savearea *save; | |
451 | spl_t s; | |
452 | ||
453 | if(act->mact.specFlags & runningVM) { /* Are we actually in a context right now? */ | |
9bccf70c | 454 | save = find_user_regs(act); /* Find the user state context */ |
1c79356b A |
455 | if(!save) { /* Did we find it? */ |
456 | panic("vmm_tear_down_all: runningVM marked but no user state context\n"); | |
457 | return; | |
458 | } | |
459 | ||
0b4e3aa0 | 460 | save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */ |
1c79356b A |
461 | s = splhigh(); /* Make sure interrupts are off */ |
462 | vmm_force_exit(act, save); /* Force and exit from VM state */ | |
463 | splx(s); /* Restore interrupts */ | |
464 | } | |
465 | ||
466 | if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */ | |
467 | ||
de355530 | 468 | for(cvi = 1; cvi <= kVmmMaxContextsPerThread; cvi++) { /* Look at all slots */ |
1c79356b A |
469 | if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */ |
470 | ret = vmm_tear_down_context(act, cvi); /* Take down the found context */ | |
471 | if(ret != KERN_SUCCESS) { /* Did it go away? */ | |
472 | panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n", | |
473 | ret, act, cvi); | |
474 | } | |
475 | } | |
476 | } | |
477 | if(act->mact.vmmControl) { /* Did we find one? */ | |
478 | panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */ | |
479 | } | |
480 | } | |
481 | ||
482 | return; | |
483 | } | |
484 | ||
485 | /*----------------------------------------------------------------------- | |
486 | ** vmm_map_page | |
487 | ** | |
488 | ** This function maps a page from within the client's logical | |
de355530 A |
489 | ** address space into the alternate address space of the |
490 | ** Virtual Machine Monitor context. | |
1c79356b A |
491 | ** |
492 | ** The page need not be locked or resident. If not resident, it will be faulted | |
493 | ** in by this code, which may take some time. Also, if the page is not locked, | |
494 | ** it, and this mapping may disappear at any time, even before it gets used. Note also | |
495 | ** that reference and change information is NOT preserved when a page is unmapped, either | |
496 | ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address | |
497 | ** space). This means that if RC is needed, the page MUST be wired. | |
498 | ** | |
499 | ** Note that if there is already a mapping at the address, it is removed and all | |
500 | ** information (including RC) is lost BEFORE an attempt is made to map it. Also, | |
501 | ** if the map call fails, the old address is still unmapped.. | |
502 | ** | |
503 | ** Inputs: | |
504 | ** act - pointer to current thread activation | |
de355530 | 505 | ** index - index of vmm state for this page |
1c79356b | 506 | ** va - virtual address within the client's address |
0b4e3aa0 | 507 | ** space |
1c79356b | 508 | ** ava - virtual address within the alternate address |
0b4e3aa0 | 509 | ** space |
1c79356b A |
510 | ** prot - protection flags |
511 | ** | |
512 | ** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped | |
513 | ** areas are not allowed and will fail. Same with directly mapped I/O areas. | |
514 | ** | |
515 | ** Input conditions: | |
516 | ** Interrupts disabled (from fast trap) | |
517 | ** | |
518 | ** Outputs: | |
519 | ** kernel return code indicating success or failure | |
520 | ** if success, va resident and alternate mapping made | |
521 | -----------------------------------------------------------------------*/ | |
522 | ||
523 | kern_return_t vmm_map_page( | |
524 | thread_act_t act, | |
de355530 A |
525 | vmm_thread_index_t index, |
526 | vm_offset_t cva, | |
527 | vm_offset_t ava, | |
1c79356b A |
528 | vm_prot_t prot) |
529 | { | |
530 | kern_return_t ret; | |
531 | vmmCntrlEntry *CEntry; | |
de355530 A |
532 | vm_offset_t phys_addr; |
533 | register mapping *mpv, *mp, *nmpv, *nmp; | |
1c79356b | 534 | struct phys_entry *pp; |
de355530 | 535 | pmap_t mpmap; |
1c79356b A |
536 | vm_map_t map; |
537 | ||
de355530 A |
538 | CEntry = vmm_get_entry(act, index); /* Get and validate the index */ |
539 | if (CEntry == NULL)return KERN_FAILURE; /* No good, failure... */ | |
540 | ||
541 | /* | |
542 | * Find out if we have already mapped the address and toss it out if so. | |
543 | */ | |
544 | mp = hw_lock_phys_vir(CEntry->vmmPmap->space, ava); /* See if there is already a mapping */ | |
545 | if((unsigned int)mp & 1) { /* Did we timeout? */ | |
546 | panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava); /* Yeah, scream about it! */ | |
547 | return KERN_FAILURE; /* Bad hair day, return FALSE... */ | |
548 | } | |
549 | if(mp) { /* If it was there, toss it */ | |
550 | mpv = hw_cpv(mp); /* Convert mapping block to virtual */ | |
551 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ | |
552 | (void)mapping_remove(CEntry->vmmPmap, ava); /* Throw away the mapping. we're about to replace it */ | |
553 | } | |
1c79356b A |
554 | map = current_act()->map; /* Get the current map */ |
555 | ||
556 | while(1) { /* Keep trying until we get it or until we fail */ | |
de355530 | 557 | if(hw_cvp_blk(map->pmap, cva)) return KERN_FAILURE; /* Make sure that there is no block map at this address */ |
1c79356b | 558 | |
de355530 A |
559 | mp = hw_lock_phys_vir(map->pmap->space, cva); /* Lock the physical entry for emulator's page */ |
560 | if((unsigned int)mp&1) { /* Did we timeout? */ | |
561 | panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva); /* Yeah, scream about it! */ | |
562 | return KERN_FAILURE; /* Bad hair day, return FALSE... */ | |
563 | } | |
1c79356b | 564 | |
de355530 A |
565 | if(mp) { /* We found it... */ |
566 | mpv = hw_cpv(mp); /* Convert mapping block to virtual */ | |
567 | ||
568 | if(!mpv->physent) return KERN_FAILURE; /* If there is no physical entry (e.g., I/O area), we won't map it */ | |
569 | ||
570 | if(!(mpv->PTEr & 1)) break; /* If we are writable go ahead and map it... */ | |
571 | ||
572 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the map before we try to fault the write bit on */ | |
573 | } | |
1c79356b A |
574 | |
575 | ml_set_interrupts_enabled(TRUE); /* Enable interruptions */ | |
de355530 | 576 | ret = vm_fault(map, trunc_page(cva), VM_PROT_READ | VM_PROT_WRITE, FALSE, NULL, 0); /* Didn't find it, try to fault it in read/write... */ |
1c79356b A |
577 | ml_set_interrupts_enabled(FALSE); /* Disable interruptions */ |
578 | if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */ | |
579 | } | |
580 | ||
de355530 A |
581 | /* |
582 | * Now we make a mapping using all of the attributes of the source page except for protection. | |
583 | * Also specify that the physical entry is locked. | |
584 | */ | |
585 | nmpv = mapping_make(CEntry->vmmPmap, mpv->physent, (ava & -PAGE_SIZE), | |
586 | (mpv->physent->pte1 & -PAGE_SIZE), prot, ((mpv->physent->pte1 >> 3) & 0xF), 1); | |
d7e50217 | 587 | |
de355530 A |
588 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry now, we're done with it */ |
589 | ||
590 | CEntry->vmmLastMap = ava & -PAGE_SIZE; /* Remember the last mapping we made */ | |
591 | if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) | |
592 | CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ | |
1c79356b A |
593 | |
594 | return KERN_SUCCESS; | |
595 | } | |
596 | ||
597 | ||
0b4e3aa0 A |
598 | /*----------------------------------------------------------------------- |
599 | ** vmm_map_execute | |
600 | ** | |
601 | ** This function maps a page from within the client's logical | |
602 | ** address space into the alternate address space of the | |
603 | ** Virtual Machine Monitor context and then directly starts executing. | |
604 | ** | |
605 | ** See description of vmm_map_page for details. | |
606 | ** | |
607 | ** Outputs: | |
608 | ** Normal exit is to run the VM. Abnormal exit is triggered via a | |
609 | ** non-KERN_SUCCESS return from vmm_map_page or later during the | |
610 | ** attempt to transition into the VM. | |
611 | -----------------------------------------------------------------------*/ | |
612 | ||
613 | vmm_return_code_t vmm_map_execute( | |
614 | thread_act_t act, | |
615 | vmm_thread_index_t index, | |
de355530 A |
616 | vm_offset_t cva, |
617 | vm_offset_t ava, | |
0b4e3aa0 A |
618 | vm_prot_t prot) |
619 | { | |
620 | kern_return_t ret; | |
621 | vmmCntrlEntry *CEntry; | |
622 | ||
de355530 | 623 | CEntry = vmm_get_entry(act, index); /* Get and validate the index */ |
0b4e3aa0 A |
624 | |
625 | if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ | |
626 | ||
d7e50217 A |
627 | if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry)) |
628 | return kVmmBogusContext; /* Yes, invalid index in Fam */ | |
629 | ||
de355530 | 630 | ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */ |
0b4e3aa0 | 631 | |
d7e50217 | 632 | if(ret == KERN_SUCCESS) { |
de355530 A |
633 | CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ |
634 | vmm_execute_vm(act, index); /* Return was ok, launch the VM */ | |
d7e50217 A |
635 | } |
636 | ||
de355530 | 637 | return kVmmInvalidAddress; /* We had trouble mapping in the page */ |
0b4e3aa0 A |
638 | |
639 | } | |
640 | ||
9bccf70c A |
641 | /*----------------------------------------------------------------------- |
642 | ** vmm_map_list | |
643 | ** | |
de355530 A |
644 | ** This function maps a list of pages into the alternate's logical |
645 | ** address space. | |
9bccf70c A |
646 | ** |
647 | ** Inputs: | |
648 | ** act - pointer to current thread activation | |
de355530 | 649 | ** index - index of vmm state for this page |
9bccf70c A |
650 | ** count - number of pages to release |
651 | ** vmcpComm in the comm page contains up to kVmmMaxMapPages to map | |
652 | ** | |
653 | ** Outputs: | |
654 | ** kernel return code indicating success or failure | |
655 | ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded | |
656 | ** or the vmm_map_page call fails. | |
657 | -----------------------------------------------------------------------*/ | |
658 | ||
659 | kern_return_t vmm_map_list( | |
660 | thread_act_t act, | |
de355530 A |
661 | vmm_thread_index_t index, |
662 | unsigned int cnt) | |
9bccf70c A |
663 | { |
664 | vmmCntrlEntry *CEntry; | |
665 | boolean_t ret; | |
666 | unsigned int i; | |
de355530 A |
667 | vmmMapList *lst; |
668 | vm_offset_t cva; | |
669 | vm_offset_t ava; | |
9bccf70c A |
670 | vm_prot_t prot; |
671 | ||
de355530 A |
672 | CEntry = vmm_get_entry(act, index); /* Get and validate the index */ |
673 | if (CEntry == NULL)return -1; /* No good, failure... */ | |
9bccf70c A |
674 | |
675 | if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */ | |
676 | if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */ | |
677 | ||
de355530 | 678 | lst = (vmmMapList *)(&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]); /* Point to the first entry */ |
9bccf70c A |
679 | |
680 | for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ | |
de355530 A |
681 | cva = lst[i].vmlva; /* Get the actual address */ |
682 | ava = lst[i].vmlava & -vmlFlgs; /* Get the alternate address */ | |
683 | prot = lst[i].vmlava & vmlProt; /* Get the protection bits */ | |
9bccf70c | 684 | ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */ |
de355530 | 685 | if(ret != KERN_SUCCESS) return KERN_FAILURE; /* Bail if any error */ |
9bccf70c A |
686 | } |
687 | ||
688 | return KERN_SUCCESS ; /* Return... */ | |
689 | } | |
690 | ||
1c79356b A |
691 | /*----------------------------------------------------------------------- |
692 | ** vmm_get_page_mapping | |
693 | ** | |
694 | ** This function determines whether the specified VMM | |
695 | ** virtual address is mapped. | |
696 | ** | |
697 | ** Inputs: | |
698 | ** act - pointer to current thread activation | |
699 | ** index - index of vmm state for this page | |
700 | ** va - virtual address within the alternate's address | |
701 | ** space | |
702 | ** | |
703 | ** Outputs: | |
704 | ** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure | |
705 | ** | |
706 | ** Note: | |
707 | ** If there are aliases to the page in the non-alternate address space, | |
708 | ** this call could return the wrong one. Moral of the story: no aliases. | |
709 | -----------------------------------------------------------------------*/ | |
710 | ||
de355530 | 711 | vm_offset_t vmm_get_page_mapping( |
1c79356b | 712 | thread_act_t act, |
de355530 A |
713 | vmm_thread_index_t index, |
714 | vm_offset_t va) | |
1c79356b A |
715 | { |
716 | vmmCntrlEntry *CEntry; | |
de355530 A |
717 | vm_offset_t ova; |
718 | register mapping *mpv, *mp, *nmpv, *nmp; | |
1c79356b A |
719 | pmap_t pmap; |
720 | ||
de355530 A |
721 | CEntry = vmm_get_entry(act, index); /* Get and validate the index */ |
722 | if (CEntry == NULL)return -1; /* No good, failure... */ | |
1c79356b | 723 | |
de355530 A |
724 | mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ |
725 | if((unsigned int)mp & 1) { /* Did we timeout? */ | |
726 | panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */ | |
727 | return -1; /* Bad hair day, return FALSE... */ | |
728 | } | |
1c79356b A |
729 | if(!mp) return -1; /* Not mapped, return -1 */ |
730 | ||
de355530 | 731 | mpv = hw_cpv(mp); /* Convert mapping block to virtual */ |
d7e50217 | 732 | pmap = current_act()->map->pmap; /* Get the current pmap */ |
de355530 A |
733 | ova = -1; /* Assume failure for now */ |
734 | ||
735 | for(nmpv = hw_cpv(mpv->physent->phys_link); nmpv; nmpv = hw_cpv(nmpv->next)) { /* Scan 'em all */ | |
736 | ||
737 | if(nmpv->pmap != pmap) continue; /* Skip all the rest if this is not the right pmap... */ | |
738 | ||
739 | ova = ((((unsigned int)nmpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */ | |
740 | ova = ova | ((nmpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */ | |
741 | ova = ova | ((nmpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */ | |
742 | break; /* We're done now, pass virtual address back */ | |
743 | } | |
1c79356b | 744 | |
de355530 | 745 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ |
1c79356b | 746 | |
de355530 | 747 | if(ova == -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va); /* We are bad wrong if we can't find it */ |
1c79356b | 748 | |
de355530 | 749 | return ova; |
1c79356b A |
750 | } |
751 | ||
752 | /*----------------------------------------------------------------------- | |
753 | ** vmm_unmap_page | |
754 | ** | |
755 | ** This function unmaps a page from the alternate's logical | |
756 | ** address space. | |
757 | ** | |
758 | ** Inputs: | |
759 | ** act - pointer to current thread activation | |
760 | ** index - index of vmm state for this page | |
761 | ** va - virtual address within the vmm's address | |
762 | ** space | |
763 | ** | |
764 | ** Outputs: | |
765 | ** kernel return code indicating success or failure | |
766 | -----------------------------------------------------------------------*/ | |
767 | ||
768 | kern_return_t vmm_unmap_page( | |
769 | thread_act_t act, | |
de355530 A |
770 | vmm_thread_index_t index, |
771 | vm_offset_t va) | |
1c79356b A |
772 | { |
773 | vmmCntrlEntry *CEntry; | |
de355530 | 774 | boolean_t ret; |
1c79356b A |
775 | kern_return_t kern_result = KERN_SUCCESS; |
776 | ||
de355530 A |
777 | CEntry = vmm_get_entry(act, index); /* Get and validate the index */ |
778 | if (CEntry == NULL)return -1; /* No good, failure... */ | |
1c79356b | 779 | |
de355530 | 780 | ret = mapping_remove(CEntry->vmmPmap, va); /* Toss the mapping */ |
1c79356b | 781 | |
de355530 | 782 | return (ret ? KERN_SUCCESS : KERN_FAILURE); /* Return... */ |
1c79356b A |
783 | } |
784 | ||
9bccf70c A |
785 | /*----------------------------------------------------------------------- |
786 | ** vmm_unmap_list | |
787 | ** | |
788 | ** This function unmaps a list of pages from the alternate's logical | |
789 | ** address space. | |
790 | ** | |
791 | ** Inputs: | |
792 | ** act - pointer to current thread activation | |
793 | ** index - index of vmm state for this page | |
794 | ** count - number of pages to release | |
795 | ** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap | |
796 | ** | |
797 | ** Outputs: | |
798 | ** kernel return code indicating success or failure | |
799 | ** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded | |
800 | -----------------------------------------------------------------------*/ | |
801 | ||
802 | kern_return_t vmm_unmap_list( | |
803 | thread_act_t act, | |
de355530 A |
804 | vmm_thread_index_t index, |
805 | unsigned int cnt) | |
9bccf70c A |
806 | { |
807 | vmmCntrlEntry *CEntry; | |
808 | boolean_t ret; | |
809 | kern_return_t kern_result = KERN_SUCCESS; | |
810 | unsigned int *pgaddr, i; | |
811 | ||
de355530 A |
812 | CEntry = vmm_get_entry(act, index); /* Get and validate the index */ |
813 | if (CEntry == NULL)return -1; /* No good, failure... */ | |
9bccf70c | 814 | |
de355530 A |
815 | if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */ |
816 | if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */ | |
9bccf70c | 817 | |
de355530 | 818 | pgaddr = &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */ |
9bccf70c | 819 | |
de355530 A |
820 | for(i = 0; i < cnt; i++) { /* Step and release all pages in list */ |
821 | ||
822 | (void)mapping_remove(CEntry->vmmPmap, pgaddr[i]); /* Toss the mapping */ | |
9bccf70c A |
823 | } |
824 | ||
de355530 | 825 | return KERN_SUCCESS ; /* Return... */ |
9bccf70c A |
826 | } |
827 | ||
1c79356b A |
828 | /*----------------------------------------------------------------------- |
829 | ** vmm_unmap_all_pages | |
830 | ** | |
831 | ** This function unmaps all pages from the alternates's logical | |
832 | ** address space. | |
833 | ** | |
834 | ** Inputs: | |
835 | ** act - pointer to current thread activation | |
836 | ** index - index of context state | |
837 | ** | |
838 | ** Outputs: | |
839 | ** none | |
840 | ** | |
841 | ** Note: | |
842 | ** All pages are unmapped, but the address space (i.e., pmap) is still alive | |
843 | -----------------------------------------------------------------------*/ | |
844 | ||
845 | void vmm_unmap_all_pages( | |
846 | thread_act_t act, | |
de355530 | 847 | vmm_thread_index_t index) |
1c79356b A |
848 | { |
849 | vmmCntrlEntry *CEntry; | |
850 | ||
de355530 A |
851 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ |
852 | if (CEntry == NULL) return; /* Either this isn't vmm thread or the index is bogus */ | |
1c79356b A |
853 | |
854 | /* | |
855 | * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly | |
856 | */ | |
de355530 A |
857 | mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */ |
858 | pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */ | |
1c79356b A |
859 | return; |
860 | } | |
861 | ||
862 | ||
863 | /*----------------------------------------------------------------------- | |
864 | ** vmm_get_page_dirty_flag | |
865 | ** | |
866 | ** This function returns the changed flag of the page | |
867 | ** and optionally clears clears the flag. | |
868 | ** | |
869 | ** Inputs: | |
870 | ** act - pointer to current thread activation | |
871 | ** index - index of vmm state for this page | |
872 | ** va - virtual address within the vmm's address | |
873 | ** space | |
874 | ** reset - Clears dirty if true, untouched if not | |
875 | ** | |
876 | ** Outputs: | |
877 | ** the dirty bit | |
878 | ** clears the dirty bit in the pte if requested | |
879 | ** | |
880 | ** Note: | |
881 | ** The RC bits are merged into the global physical entry | |
882 | -----------------------------------------------------------------------*/ | |
883 | ||
884 | boolean_t vmm_get_page_dirty_flag( | |
885 | thread_act_t act, | |
de355530 A |
886 | vmm_thread_index_t index, |
887 | vm_offset_t va, | |
1c79356b A |
888 | unsigned int reset) |
889 | { | |
890 | vmmCntrlEntry *CEntry; | |
891 | register mapping *mpv, *mp; | |
892 | unsigned int RC; | |
893 | ||
de355530 A |
894 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ |
895 | if (CEntry == NULL) return 1; /* Either this isn't vmm thread or the index is bogus */ | |
1c79356b | 896 | |
de355530 A |
897 | mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ |
898 | if((unsigned int)mp & 1) { /* Did we timeout? */ | |
899 | panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */ | |
900 | return 1; /* Bad hair day, return dirty... */ | |
d7e50217 | 901 | } |
de355530 A |
902 | if(!mp) return 1; /* Not mapped, return dirty... */ |
903 | ||
904 | RC = hw_test_rc(mp, reset); /* Fetch the RC bits and clear if requested */ | |
1c79356b | 905 | |
de355530 A |
906 | mpv = hw_cpv(mp); /* Convert mapping block to virtual */ |
907 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ | |
908 | ||
909 | return (RC & 1); /* Return the change bit */ | |
1c79356b A |
910 | } |
911 | ||
0b4e3aa0 A |
912 | |
913 | /*----------------------------------------------------------------------- | |
914 | ** vmm_protect_page | |
915 | ** | |
916 | ** This function sets the protection bits of a mapped page | |
917 | ** | |
918 | ** Inputs: | |
919 | ** act - pointer to current thread activation | |
920 | ** index - index of vmm state for this page | |
921 | ** va - virtual address within the vmm's address | |
922 | ** space | |
923 | ** prot - Protection flags | |
924 | ** | |
925 | ** Outputs: | |
926 | ** none | |
927 | ** Protection bits of the mapping are modifed | |
928 | ** | |
929 | -----------------------------------------------------------------------*/ | |
930 | ||
931 | kern_return_t vmm_protect_page( | |
932 | thread_act_t act, | |
de355530 A |
933 | vmm_thread_index_t index, |
934 | vm_offset_t va, | |
0b4e3aa0 A |
935 | vm_prot_t prot) |
936 | { | |
937 | vmmCntrlEntry *CEntry; | |
de355530 A |
938 | register mapping *mpv, *mp; |
939 | unsigned int RC; | |
0b4e3aa0 | 940 | |
de355530 A |
941 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ |
942 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
d7e50217 | 943 | |
de355530 A |
944 | mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ |
945 | if((unsigned int)mp & 1) { /* Did we timeout? */ | |
946 | panic("vmm_protect_page: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */ | |
947 | return 1; /* Bad hair day, return dirty... */ | |
d7e50217 | 948 | } |
de355530 A |
949 | if(!mp) return KERN_SUCCESS; /* Not mapped, just return... */ |
950 | ||
951 | hw_prot_virt(mp, prot); /* Set the protection */ | |
0b4e3aa0 | 952 | |
de355530 A |
953 | mpv = hw_cpv(mp); /* Convert mapping block to virtual */ |
954 | hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ | |
955 | ||
956 | CEntry->vmmLastMap = va & -PAGE_SIZE; /* Remember the last mapping we changed */ | |
957 | if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) | |
958 | CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ | |
0b4e3aa0 A |
959 | |
960 | return KERN_SUCCESS; /* Return */ | |
961 | } | |
962 | ||
963 | ||
964 | /*----------------------------------------------------------------------- | |
965 | ** vmm_protect_execute | |
966 | ** | |
967 | ** This function sets the protection bits of a mapped page | |
968 | ** and then directly starts executing. | |
969 | ** | |
de355530 | 970 | ** See description of vmm_protect_page for details. |
0b4e3aa0 A |
971 | ** |
972 | ** Outputs: | |
973 | ** Normal exit is to run the VM. Abnormal exit is triggered via a | |
974 | ** non-KERN_SUCCESS return from vmm_map_page or later during the | |
975 | ** attempt to transition into the VM. | |
976 | -----------------------------------------------------------------------*/ | |
977 | ||
978 | vmm_return_code_t vmm_protect_execute( | |
979 | thread_act_t act, | |
980 | vmm_thread_index_t index, | |
de355530 | 981 | vm_offset_t va, |
0b4e3aa0 A |
982 | vm_prot_t prot) |
983 | { | |
984 | kern_return_t ret; | |
985 | vmmCntrlEntry *CEntry; | |
986 | ||
de355530 A |
987 | CEntry = vmm_get_entry(act, index); /* Get and validate the index */ |
988 | ||
989 | if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */ | |
0b4e3aa0 | 990 | |
d7e50217 A |
991 | if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry)) |
992 | return kVmmBogusContext; /* Yes, invalid index in Fam */ | |
993 | ||
de355530 | 994 | ret = vmm_protect_page(act, index, va, prot); /* Go try to change access */ |
d7e50217 A |
995 | |
996 | if(ret == KERN_SUCCESS) { | |
de355530 A |
997 | CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ |
998 | vmm_execute_vm(act, index); /* Return was ok, launch the VM */ | |
d7e50217 | 999 | } |
0b4e3aa0 | 1000 | |
de355530 | 1001 | return kVmmInvalidAddress; /* We had trouble of some kind (shouldn't happen) */ |
0b4e3aa0 A |
1002 | |
1003 | } | |
1004 | ||
1005 | ||
1c79356b A |
1006 | /*----------------------------------------------------------------------- |
1007 | ** vmm_get_float_state | |
1008 | ** | |
1009 | ** This function causes the current floating point state to | |
1010 | ** be saved into the shared context area. It also clears the | |
1011 | ** vmmFloatCngd changed flag. | |
1012 | ** | |
1013 | ** Inputs: | |
1014 | ** act - pointer to current thread activation structure | |
1015 | ** index - index returned by vmm_init_context | |
1016 | ** | |
1017 | ** Outputs: | |
1018 | ** context saved | |
1019 | -----------------------------------------------------------------------*/ | |
1020 | ||
1021 | kern_return_t vmm_get_float_state( | |
1022 | thread_act_t act, | |
1023 | vmm_thread_index_t index) | |
1024 | { | |
1025 | vmmCntrlEntry *CEntry; | |
1026 | vmmCntrlTable *CTable; | |
1027 | int i; | |
9bccf70c | 1028 | register struct savearea_fpu *sv; |
1c79356b A |
1029 | |
1030 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1031 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
1032 | ||
1033 | act->mact.specFlags &= ~floatCng; /* Clear the special flag */ | |
1034 | CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */ | |
9bccf70c A |
1035 | |
1036 | fpu_save(&CEntry->vmmFacCtx); /* Save context if live */ | |
1037 | ||
de355530 A |
1038 | CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[0] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[0]; /* Copy FPSCR */ |
1039 | CEntry->vmmContextKern->vmm_proc_state.ppcFPSCRshadow.i[1] = CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[1]; /* Copy FPSCR */ | |
1040 | ||
9bccf70c A |
1041 | if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */ |
1042 | bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */ | |
1c79356b A |
1043 | return KERN_SUCCESS; |
1044 | } | |
1045 | ||
1c79356b | 1046 | |
9bccf70c | 1047 | for(i = 0; i < 32; i++) { /* Initialize floating points */ |
1c79356b A |
1048 | CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */ |
1049 | } | |
1050 | ||
1051 | return KERN_SUCCESS; | |
1052 | } | |
1053 | ||
1054 | /*----------------------------------------------------------------------- | |
1055 | ** vmm_get_vector_state | |
1056 | ** | |
1057 | ** This function causes the current vector state to | |
1058 | ** be saved into the shared context area. It also clears the | |
1059 | ** vmmVectorCngd changed flag. | |
1060 | ** | |
1061 | ** Inputs: | |
1062 | ** act - pointer to current thread activation structure | |
1063 | ** index - index returned by vmm_init_context | |
1064 | ** | |
1065 | ** Outputs: | |
1066 | ** context saved | |
1067 | -----------------------------------------------------------------------*/ | |
1068 | ||
1069 | kern_return_t vmm_get_vector_state( | |
1070 | thread_act_t act, | |
1071 | vmm_thread_index_t index) | |
1072 | { | |
1073 | vmmCntrlEntry *CEntry; | |
1074 | vmmCntrlTable *CTable; | |
1075 | int i, j; | |
1076 | unsigned int vrvalidwrk; | |
9bccf70c | 1077 | register struct savearea_vec *sv; |
1c79356b A |
1078 | |
1079 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1080 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
9bccf70c A |
1081 | |
1082 | vec_save(&CEntry->vmmFacCtx); /* Save context if live */ | |
1c79356b A |
1083 | |
1084 | act->mact.specFlags &= ~vectorCng; /* Clear the special flag */ | |
1085 | CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */ | |
1086 | ||
de355530 A |
1087 | for(j=0; j < 4; j++) { /* Set value for vscr */ |
1088 | CEntry->vmmContextKern->vmm_proc_state.ppcVSCRshadow.i[j] = CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j]; | |
1089 | } | |
1090 | ||
9bccf70c | 1091 | if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */ |
1c79356b A |
1092 | |
1093 | vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */ | |
1094 | ||
1c79356b A |
1095 | for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */ |
1096 | if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */ | |
1097 | for(j = 0; j < 4; j++) { /* If so, copy it over */ | |
1098 | CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j]; | |
1099 | } | |
1100 | } | |
1101 | else { | |
1102 | for(j = 0; j < 4; j++) { /* Otherwise set to empty value */ | |
1103 | CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; | |
1104 | } | |
1105 | } | |
1106 | ||
1107 | vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */ | |
1108 | ||
1109 | } | |
1110 | ||
1111 | return KERN_SUCCESS; | |
1112 | } | |
1113 | ||
1c79356b A |
1114 | for(i = 0; i < 32; i++) { /* Initialize vector registers */ |
1115 | for(j=0; j < 4; j++) { /* Do words */ | |
1116 | CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */ | |
1117 | } | |
1118 | } | |
1119 | ||
1120 | return KERN_SUCCESS; | |
1121 | } | |
1122 | ||
1123 | /*----------------------------------------------------------------------- | |
1124 | ** vmm_set_timer | |
1125 | ** | |
1126 | ** This function causes a timer (in AbsoluteTime) for a specific time | |
1127 | ** to be set It also clears the vmmTimerPop flag if the timer is actually | |
1128 | ** set, it is cleared otherwise. | |
1129 | ** | |
1130 | ** A timer is cleared by setting setting the time to 0. This will clear | |
1131 | ** the vmmTimerPop bit. Simply setting the timer to earlier than the | |
1132 | ** current time clears the internal timer request, but leaves the | |
1133 | ** vmmTimerPop flag set. | |
1134 | ** | |
1135 | ** | |
1136 | ** Inputs: | |
1137 | ** act - pointer to current thread activation structure | |
1138 | ** index - index returned by vmm_init_context | |
1139 | ** timerhi - high order word of AbsoluteTime to pop | |
1140 | ** timerlo - low order word of AbsoluteTime to pop | |
1141 | ** | |
1142 | ** Outputs: | |
1143 | ** timer set, vmmTimerPop cleared | |
1144 | -----------------------------------------------------------------------*/ | |
1145 | ||
1146 | kern_return_t vmm_set_timer( | |
1147 | thread_act_t act, | |
1148 | vmm_thread_index_t index, | |
1149 | unsigned int timerhi, | |
1150 | unsigned int timerlo) | |
1151 | { | |
1152 | vmmCntrlEntry *CEntry; | |
1153 | ||
1154 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1155 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
1156 | ||
0b4e3aa0 | 1157 | CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo; |
1c79356b A |
1158 | |
1159 | vmm_timer_pop(act); /* Go adjust all of the timer stuff */ | |
1160 | return KERN_SUCCESS; /* Leave now... */ | |
1161 | } | |
1162 | ||
1163 | ||
1164 | /*----------------------------------------------------------------------- | |
1165 | ** vmm_get_timer | |
1166 | ** | |
1167 | ** This function causes the timer for a specified VM to be | |
1168 | ** returned in return_params[0] and return_params[1]. | |
1169 | ** | |
1170 | ** | |
1171 | ** Inputs: | |
1172 | ** act - pointer to current thread activation structure | |
1173 | ** index - index returned by vmm_init_context | |
1174 | ** | |
1175 | ** Outputs: | |
1176 | ** Timer value set in return_params[0] and return_params[1]. | |
1177 | ** Set to 0 if timer is not set. | |
1178 | -----------------------------------------------------------------------*/ | |
1179 | ||
1180 | kern_return_t vmm_get_timer( | |
1181 | thread_act_t act, | |
1182 | vmm_thread_index_t index) | |
1183 | { | |
1184 | vmmCntrlEntry *CEntry; | |
1185 | vmmCntrlTable *CTable; | |
1186 | ||
1187 | CEntry = vmm_get_entry(act, index); /* Convert index to entry */ | |
1188 | if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ | |
1189 | ||
de355530 A |
1190 | CEntry->vmmContextKern->return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */ |
1191 | CEntry->vmmContextKern->return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */ | |
1192 | ||
1c79356b A |
1193 | return KERN_SUCCESS; |
1194 | } | |
1195 | ||
1196 | ||
de355530 | 1197 | |
1c79356b A |
1198 | /*----------------------------------------------------------------------- |
1199 | ** vmm_timer_pop | |
1200 | ** | |
1201 | ** This function causes all timers in the array of VMs to be updated. | |
1202 | ** All appropriate flags are set or reset. If a VM is currently | |
1203 | ** running and its timer expired, it is intercepted. | |
1204 | ** | |
1205 | ** The qactTimer value is set to the lowest unexpired timer. It is | |
1206 | ** zeroed if all timers are expired or have been reset. | |
1207 | ** | |
1208 | ** Inputs: | |
1209 | ** act - pointer to current thread activation structure | |
1210 | ** | |
1211 | ** Outputs: | |
1212 | ** timers set, vmmTimerPop cleared or set | |
1213 | -----------------------------------------------------------------------*/ | |
1214 | ||
1215 | void vmm_timer_pop( | |
1216 | thread_act_t act) | |
1217 | { | |
1218 | vmmCntrlEntry *CEntry; | |
1219 | vmmCntrlTable *CTable; | |
1220 | int cvi, any; | |
0b4e3aa0 | 1221 | uint64_t now, soonest; |
1c79356b A |
1222 | savearea *sv; |
1223 | ||
1224 | if(!((unsigned int)act->mact.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */ | |
1225 | panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act); | |
1226 | } | |
1227 | ||
0b4e3aa0 | 1228 | soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */ |
1c79356b | 1229 | |
0b4e3aa0 | 1230 | clock_get_uptime(&now); /* What time is it? */ |
1c79356b A |
1231 | |
1232 | CTable = act->mact.vmmControl; /* Make this easier */ | |
1233 | any = 0; /* Haven't found a running unexpired timer yet */ | |
1234 | ||
de355530 | 1235 | for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Cycle through all and check time now */ |
1c79356b A |
1236 | |
1237 | if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */ | |
1238 | ||
9bccf70c | 1239 | if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */ |
1c79356b A |
1240 | CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */ |
1241 | CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */ | |
1242 | continue; /* Check next */ | |
1243 | } | |
1244 | ||
0b4e3aa0 | 1245 | if (CTable->vmmc[cvi].vmmTimer <= now) { |
1c79356b A |
1246 | CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */ |
1247 | CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */ | |
1248 | if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->mact.vmmCEntry) { /* Is this the running VM? */ | |
9bccf70c | 1249 | sv = find_user_regs(act); /* Get the user state registers */ |
1c79356b A |
1250 | if(!sv) { /* Did we find something? */ |
1251 | panic("vmm_timer_pop: no user context; act = %08X\n", act); | |
1252 | } | |
0b4e3aa0 | 1253 | sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */ |
1c79356b A |
1254 | vmm_force_exit(act, sv); /* Intercept a running VM */ |
1255 | } | |
1256 | continue; /* Check the rest */ | |
1257 | } | |
1258 | else { /* It hasn't popped yet */ | |
1259 | CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */ | |
1260 | CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */ | |
1261 | } | |
1262 | ||
1263 | any = 1; /* Show we found an active unexpired timer */ | |
1264 | ||
0b4e3aa0 A |
1265 | if (CTable->vmmc[cvi].vmmTimer < soonest) |
1266 | soonest = CTable->vmmc[cvi].vmmTimer; | |
1c79356b A |
1267 | } |
1268 | ||
1269 | if(any) { | |
0b4e3aa0 A |
1270 | if (act->mact.qactTimer == 0 || soonest <= act->mact.qactTimer) |
1271 | act->mact.qactTimer = soonest; /* Set lowest timer */ | |
1272 | } | |
1273 | ||
1274 | return; | |
1275 | } | |
1276 | ||
1277 | ||
1278 | ||
1279 | /*----------------------------------------------------------------------- | |
1280 | ** vmm_stop_vm | |
1281 | ** | |
1282 | ** This function prevents the specified VM(s) to from running. | |
1283 | ** If any is currently executing, the execution is intercepted | |
1284 | ** with a code of kVmmStopped. Note that execution of the VM is | |
1285 | ** blocked until a vmmExecuteVM is called with the start flag set to 1. | |
1286 | ** This provides the ability for a thread to stop execution of a VM and | |
1287 | ** insure that it will not be run until the emulator has processed the | |
1288 | ** "virtual" interruption. | |
1289 | ** | |
1290 | ** Inputs: | |
1291 | ** vmmask - 32 bit mask corresponding to the VMs to put in stop state | |
1292 | ** NOTE: if this mask is all 0s, any executing VM is intercepted with | |
1293 | * a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there | |
1294 | ** note that there is a potential race here and the VM may not stop. | |
1295 | ** | |
1296 | ** Outputs: | |
1297 | ** kernel return code indicating success | |
1298 | ** or if no VMs are enabled, an invalid syscall exception. | |
1299 | -----------------------------------------------------------------------*/ | |
1300 | ||
1301 | int vmm_stop_vm(struct savearea *save) | |
1302 | { | |
1303 | ||
1304 | thread_act_t act; | |
1305 | vmmCntrlTable *CTable; | |
1306 | int cvi, i; | |
1307 | task_t task; | |
1308 | thread_act_t fact; | |
1309 | unsigned int vmmask; | |
1310 | ReturnHandler *stopapc; | |
1311 | ||
1312 | ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ | |
1313 | ||
1314 | task = current_task(); /* Figure out who we are */ | |
1315 | ||
1316 | task_lock(task); /* Lock our task */ | |
1317 | ||
1318 | fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ | |
1319 | act = 0; /* Pretend we didn't find it yet */ | |
1320 | ||
1321 | for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */ | |
1322 | if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */ | |
1323 | act = fact; /* Yeah... */ | |
1324 | break; /* Bail the loop... */ | |
1325 | } | |
1326 | fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ | |
1327 | } | |
1328 | ||
1329 | if(!((unsigned int)act)) { /* See if we have VMMs yet */ | |
1330 | task_unlock(task); /* No, unlock the task */ | |
1331 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1332 | return 0; /* Go generate a syscall exception */ | |
1333 | } | |
1334 | ||
1335 | act_lock_thread(act); /* Make sure this stays 'round */ | |
1336 | task_unlock(task); /* Safe to release now */ | |
1337 | ||
1338 | CTable = act->mact.vmmControl; /* Get the pointer to the table */ | |
1339 | ||
1340 | if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */ | |
1341 | act_unlock_thread(act); /* Unlock the activation */ | |
1342 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1343 | return 0; /* Go generate a syscall exception */ | |
1344 | } | |
1345 | ||
1346 | if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */ | |
1347 | act_unlock_thread(act); /* Unlock the activation */ | |
1348 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1349 | save->save_r3 = KERN_SUCCESS; /* Set success */ | |
1350 | return 1; /* Return... */ | |
1351 | } | |
1352 | ||
de355530 | 1353 | for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search slots */ |
0b4e3aa0 A |
1354 | if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */ |
1355 | hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */ | |
1356 | } | |
1357 | vmmask = vmmask << 1; /* Slide mask over */ | |
1358 | } | |
1359 | ||
1360 | if(hw_compare_and_store(0, 1, &act->mact.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */ | |
1361 | act_unlock_thread(act); /* Already one pending, unlock the activation */ | |
1362 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1363 | save->save_r3 = KERN_SUCCESS; /* Say we did it... */ | |
1364 | return 1; /* Leave */ | |
1365 | } | |
1366 | ||
1367 | if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */ | |
1368 | act->mact.emPendRupts = 0; /* No memory, say we have given up request */ | |
1369 | act_unlock_thread(act); /* Unlock the activation */ | |
1370 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1371 | save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */ | |
1372 | return 1; /* Return... */ | |
1373 | } | |
1374 | ||
1375 | ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */ | |
1376 | ||
1377 | stopapc->handler = vmm_interrupt; /* Set interruption routine */ | |
1378 | ||
1379 | stopapc->next = act->handlers; /* Put our interrupt at the start of the list */ | |
1380 | act->handlers = stopapc; /* Point to us */ | |
1381 | ||
1382 | act_set_apc(act); /* Set an APC AST */ | |
1383 | ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */ | |
1384 | ||
1385 | act_unlock_thread(act); /* Unlock the activation */ | |
1386 | ||
1387 | ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ | |
1388 | save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */ | |
1389 | return 1; | |
1390 | } | |
1391 | ||
1392 | /*----------------------------------------------------------------------- | |
1393 | ** vmm_interrupt | |
1394 | ** | |
1395 | ** This function is executed asynchronously from an APC AST. | |
1396 | ** It is to be used for anything that needs to interrupt a running VM. | |
1397 | ** This include any kind of interruption generation (other than timer pop) | |
1398 | ** or entering the stopped state. | |
1399 | ** | |
1400 | ** Inputs: | |
1401 | ** ReturnHandler *rh - the return handler control block as required by the APC. | |
1402 | ** thread_act_t act - the activation | |
1403 | ** | |
1404 | ** Outputs: | |
1405 | ** Whatever needed to be done is done. | |
1406 | -----------------------------------------------------------------------*/ | |
1407 | ||
1408 | void vmm_interrupt(ReturnHandler *rh, thread_act_t act) { | |
1409 | ||
1410 | vmmCntrlTable *CTable; | |
1411 | savearea *sv; | |
1412 | boolean_t inter; | |
1413 | ||
1414 | ||
1415 | ||
1416 | kfree((vm_offset_t)rh, sizeof(ReturnHandler)); /* Release the return handler block */ | |
1417 | ||
1418 | inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */ | |
1419 | ||
1420 | act->mact.emPendRupts = 0; /* Say that there are no more interrupts pending */ | |
1421 | CTable = act->mact.vmmControl; /* Get the pointer to the table */ | |
1422 | ||
1423 | if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */ | |
1424 | ||
1425 | if(act->mact.vmmCEntry && (act->mact.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */ | |
9bccf70c | 1426 | sv = find_user_regs(act); /* Get the user state registers */ |
0b4e3aa0 A |
1427 | if(!sv) { /* Did we find something? */ |
1428 | panic("vmm_interrupt: no user context; act = %08X\n", act); | |
1c79356b | 1429 | } |
0b4e3aa0 A |
1430 | sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */ |
1431 | vmm_force_exit(act, sv); /* Intercept a running VM */ | |
1c79356b | 1432 | } |
0b4e3aa0 | 1433 | ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */ |
1c79356b A |
1434 | |
1435 | return; | |
1436 | } |