]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/vmachmon.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*-----------------------------------------------------------------------
26** vmachmon.c
27**
28** C routines that we are adding to the MacOS X kernel.
29**
1c79356b
A
30-----------------------------------------------------------------------*/
31
32#include <mach/mach_types.h>
33#include <mach/kern_return.h>
34#include <mach/host_info.h>
35#include <kern/kern_types.h>
36#include <kern/host.h>
37#include <kern/task.h>
38#include <kern/thread.h>
0b4e3aa0 39#include <kern/thread_act.h>
1c79356b
A
40#include <ppc/exception.h>
41#include <ppc/mappings.h>
42#include <ppc/thread_act.h>
1c79356b
A
43#include <vm/vm_kern.h>
44
45#include <ppc/vmachmon.h>
46
47extern struct Saveanchor saveanchor; /* Aligned savearea anchor */
48extern double FloatInit;
49extern unsigned long QNaNbarbarian[4];
50
51/*************************************************************************************
52 Virtual Machine Monitor Internal Routines
53**************************************************************************************/
54
55/*-----------------------------------------------------------------------
56** vmm_get_entry
57**
58** This function verifies and return a vmm context entry index
59**
60** Inputs:
61** act - pointer to current thread activation
62** index - index into vmm control table (this is a "one based" value)
63**
64** Outputs:
65** address of a vmmCntrlEntry or 0 if not found
66-----------------------------------------------------------------------*/
67
68vmmCntrlEntry *vmm_get_entry(
69 thread_act_t act,
70 vmm_thread_index_t index)
71{
72 vmmCntrlTable *CTable;
73 vmmCntrlEntry *CEntry;
74
55e303ae
A
75 index = index & vmmTInum; /* Clean up the index */
76
1c79356b 77 if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */
55e303ae 78 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
1c79356b
A
79
80 CTable = act->mact.vmmControl; /* Make the address a bit more convienient */
81 CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */
82
83 if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */
84
85 return CEntry;
86}
87
55e303ae
A
88/*-----------------------------------------------------------------------
89** vmm_get_adsp
90**
91** This function verifies and returns the pmap for an address space.
92** If there is none and the request is valid, a pmap will be created.
93**
94** Inputs:
95** act - pointer to current thread activation
96** index - index into vmm control table (this is a "one based" value)
97**
98** Outputs:
99** address of a pmap or 0 if not found or could no be created
100** Note that if there is no pmap for the address space it will be created.
101-----------------------------------------------------------------------*/
102
103pmap_t vmm_get_adsp(thread_act_t act, vmm_thread_index_t index)
104{
105 pmap_t pmap;
106
107 if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */
108 if ((index - 1) >= kVmmMaxContexts) return NULL; /* Index not in range */
109
110 pmap = act->mact.vmmControl->vmmAdsp[index - 1]; /* Get the pmap */
111 if(pmap) return pmap; /* We've got it... */
112
113 pmap = pmap_create(0); /* Make a fresh one */
114 act->mact.vmmControl->vmmAdsp[index - 1] = pmap; /* Remember it */
115/*
116 * Note that if the create fails, we will return a null.
117 */
118 return pmap; /* Return it... */
119}
120
1c79356b
A
121
122
123/*************************************************************************************
124 Virtual Machine Monitor Exported Functionality
125
126 The following routines are used to implement a quick-switch mechanism for
127 virtual machines that need to execute within their own processor envinroment
128 (including register and MMU state).
129**************************************************************************************/
130
131/*-----------------------------------------------------------------------
132** vmm_get_version
133**
134** This function returns the current version of the virtual machine
135** interface. It is divided into two portions. The top 16 bits
136** represent the major version number, and the bottom 16 bits
137** represent the minor version number. Clients using the Vmm
138** functionality should make sure they are using a verison new
139** enough for them.
140**
141** Inputs:
142** none
143**
144** Outputs:
145** 32-bit number representing major/minor version of
146** the Vmm module
147-----------------------------------------------------------------------*/
148
149int vmm_get_version(struct savearea *save)
150{
151 save->save_r3 = kVmmCurrentVersion; /* Return the version */
152 return 1;
153}
154
155
156/*-----------------------------------------------------------------------
157** Vmm_get_features
158**
159** This function returns a set of flags that represents the functionality
160** supported by the current verison of the Vmm interface. Clients should
161** use this to determine whether they can run on this system.
162**
163** Inputs:
164** none
165**
166** Outputs:
167** 32-bit number representing functionality supported by this
168** version of the Vmm module
169-----------------------------------------------------------------------*/
170
171int vmm_get_features(struct savearea *save)
172{
173 save->save_r3 = kVmmCurrentFeatures; /* Return the features */
55e303ae
A
174 if(per_proc_info->pf.Available & pf64Bit) {
175 save->save_r3 &= ~kVmmFeature_LittleEndian; /* No little endian here */
176 save->save_r3 |= kVmmFeature_SixtyFourBit; /* Set that we can do 64-bit */
177 }
1c79356b
A
178 return 1;
179}
180
181
55e303ae
A
182/*-----------------------------------------------------------------------
183** vmm_max_addr
184**
185** This function returns the maximum addressable virtual address sported
186**
187** Outputs:
188** Returns max address
189-----------------------------------------------------------------------*/
190
191addr64_t vmm_max_addr(thread_act_t act)
192{
193 return vm_max_address; /* Return the maximum address */
194}
195
196/*-----------------------------------------------------------------------
197** vmm_get_XA
198**
199** This function retrieves the eXtended Architecture flags for the specifed VM.
200**
201** We need to return the result in the return code rather than in the return parameters
202** because we need an architecture independent format so the results are actually
203** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
204** 4 for 32-bit.
205**
206**
207** Inputs:
208** act - pointer to current thread activation structure
209** index - index returned by vmm_init_context
210**
211** Outputs:
212** Return code is set to the XA flags. If the index is invalid or the
213** context has not been created, we return 0.
214-----------------------------------------------------------------------*/
215
216unsigned int vmm_get_XA(
217 thread_act_t act,
218 vmm_thread_index_t index)
219{
220 vmmCntrlEntry *CEntry;
221
222 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
223 if (CEntry == NULL) return 0; /* Either this isn't a vmm or the index is bogus */
224
225 return CEntry->vmmXAFlgs; /* Return the flags */
226}
227
1c79356b
A
228/*-----------------------------------------------------------------------
229** vmm_init_context
230**
231** This function initializes an emulation context. It allocates
232** a new pmap (address space) and fills in the initial processor
233** state within the specified structure. The structure, mapped
234** into the client's logical address space, must be page-aligned.
235**
236** Inputs:
237** act - pointer to current thread activation
238** version - requested version of the Vmm interface (allowing
239** future versions of the interface to change, but still
240** support older clients)
241** vmm_user_state - pointer to a logical page within the
242** client's address space
243**
244** Outputs:
245** kernel return code indicating success or failure
246-----------------------------------------------------------------------*/
247
248int vmm_init_context(struct savearea *save)
249{
250
251 thread_act_t act;
252 vmm_version_t version;
253 vmm_state_page_t * vmm_user_state;
254 vmmCntrlTable *CTable;
255 vm_offset_t conkern;
256 vmm_state_page_t * vks;
55e303ae 257 ppnum_t conphys;
1c79356b
A
258 kern_return_t ret;
259 pmap_t new_pmap;
260 int cvi, i;
261 task_t task;
262 thread_act_t fact, gact;
263
55e303ae 264 vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4); /* Get the user address of the comm area */
1c79356b
A
265 if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */
266 save->save_r3 = KERN_FAILURE; /* Return failure */
267 return 1;
268 }
269
0b4e3aa0 270 /* Make sure that the version requested is supported */
1c79356b 271 version = save->save_r3; /* Pick up passed in version */
0b4e3aa0
A
272 if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
273 save->save_r3 = KERN_FAILURE; /* Return failure */
274 return 1;
1c79356b 275 }
0b4e3aa0
A
276
277 if((version & 0xFFFF) > kVmmCurMinorVersion) { /* Check for valid minor */
278 save->save_r3 = KERN_FAILURE; /* Return failure */
279 return 1;
280 }
281
1c79356b
A
282 act = current_act(); /* Pick up our activation */
283
284 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
285
286 task = current_task(); /* Figure out who we are */
287
288 task_lock(task); /* Lock our task */
289
55e303ae 290 fact = (thread_act_t)task->threads.next; /* Get the first activation on task */
1c79356b
A
291 gact = 0; /* Pretend we didn't find it yet */
292
55e303ae 293 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
1c79356b
A
294 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
295 gact = fact; /* Yeah... */
296 break; /* Bail the loop... */
297 }
55e303ae 298 fact = (thread_act_t)fact->task_threads.next; /* Go to the next one */
1c79356b
A
299 }
300
301
302/*
303 * We only allow one thread per task to be a virtual machine monitor right now. This solves
304 * a number of potential problems that I can't put my finger on right now.
305 *
306 * Utlimately, I think we want to move the controls and make all this task based instead of
307 * thread based. That would allow an emulator architecture to spawn a kernel thread for each
308 * VM (if they want) rather than hand dispatch contexts.
309 */
310
311 if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */
312 task_unlock(task); /* Release task lock */
313 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
314 save->save_r3 = KERN_FAILURE; /* We must play alone... */
315 return 1;
316 }
317
318 if(!gact) act->mact.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
319
320 task_unlock(task); /* Safe to release now (because we've marked ourselves) */
321
322 CTable = act->mact.vmmControl; /* Get the control table address */
323 if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */
324 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */
325 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
326 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
327 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
328 return 1;
329 }
330
331 bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */
332 act->mact.vmmControl = CTable; /* Initialize the table anchor */
333 }
334
55e303ae 335 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
1c79356b
A
336 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */
337 }
338
55e303ae 339 if(cvi >= kVmmMaxContexts) { /* Did we find one? */
1c79356b
A
340 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
341 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */
342 return 1;
343 }
344
345 ret = vm_map_wire( /* Wire the virtual machine monitor's context area */
346 act->map,
347 (vm_offset_t)vmm_user_state,
348 (vm_offset_t)vmm_user_state + PAGE_SIZE,
349 VM_PROT_READ | VM_PROT_WRITE,
350 FALSE);
351
352 if (ret != KERN_SUCCESS) /* The wire failed, return the code */
353 goto return_in_shame;
354
355 /* Map the vmm state into the kernel's address space. */
55e303ae 356 conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state));
1c79356b
A
357
358 /* Find a virtual address to use. */
359 ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
360 if (ret != KERN_SUCCESS) { /* Did we find an address? */
361 (void) vm_map_unwire(act->map, /* No, unwire the context area */
362 (vm_offset_t)vmm_user_state,
363 (vm_offset_t)vmm_user_state + PAGE_SIZE,
364 TRUE);
365 goto return_in_shame;
366 }
367
368 /* Map it into the kernel's address space. */
55e303ae 369
9bccf70c
A
370 pmap_enter(kernel_pmap, conkern, conphys,
371 VM_PROT_READ | VM_PROT_WRITE,
372 VM_WIMG_USE_DEFAULT, TRUE);
1c79356b
A
373
374 /* Clear the vmm state structure. */
375 vks = (vmm_state_page_t *)conkern;
376 bzero((char *)vks, PAGE_SIZE);
377
1c79356b
A
378
379 /* We're home free now. Simply fill in the necessary info and return. */
380
381 vks->interface_version = version; /* Set our version code */
382 vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */
383
384 CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */
1c79356b 385 CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */
d7e50217 386 CTable->vmmc[cvi].vmmContextPhys = (vmm_state_page_t *)conphys; /* Remember the state page physical addr */
1c79356b 387 CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */
9bccf70c
A
388
389 CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0; /* Clear facility context control */
390 CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
391 CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
392 CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0; /* Clear facility context control */
393 CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
394 CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
395 CTable->vmmc[cvi].vmmFacCtx.facAct = act; /* Point back to the activation */
396
397 hw_atomic_add((int *)&saveanchor.savetarget, 2); /* Account for the number of extra saveareas we think we might "need" */
d7e50217 398
55e303ae 399 if (!(act->map->pmap->pmapFlags & pmapVMhost)) {
d7e50217 400 simple_lock(&(act->map->pmap->lock));
55e303ae 401 act->map->pmap->pmapFlags |= pmapVMhost;
d7e50217
A
402 simple_unlock(&(act->map->pmap->lock));
403 }
1c79356b
A
404
405 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
406 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
407 return 1;
408
409return_in_shame:
410 if(!gact) kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
411 act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */
412 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
413 save->save_r3 = ret; /* Pass back return code... */
414 return 1;
415
416}
417
418
419/*-----------------------------------------------------------------------
420** vmm_tear_down_context
421**
422** This function uninitializes an emulation context. It deallocates
423** internal resources associated with the context block.
424**
425** Inputs:
426** act - pointer to current thread activation structure
427** index - index returned by vmm_init_context
428**
429** Outputs:
430** kernel return code indicating success or failure
55e303ae
A
431**
432** Strangeness note:
433** This call will also trash the address space with the same ID. While this
434** is really not too cool, we have to do it because we need to make
435** sure that old VMM users (not that we really have any) who depend upon
436** the address space going away with the context still work the same.
1c79356b
A
437-----------------------------------------------------------------------*/
438
439kern_return_t vmm_tear_down_context(
440 thread_act_t act,
441 vmm_thread_index_t index)
442{
443 vmmCntrlEntry *CEntry;
444 vmmCntrlTable *CTable;
445 int cvi;
446 register savearea *sv;
447
448 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
449 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
450
451 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
452
9bccf70c 453 hw_atomic_sub((int *)&saveanchor.savetarget, 2); /* We don't need these extra saveareas anymore */
1c79356b 454
9bccf70c
A
455 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
456 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
457 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
1c79356b
A
458 }
459
9bccf70c
A
460 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
461 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
462 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
1c79356b 463 }
55e303ae
A
464
465 CEntry->vmmPmap = 0; /* Remove this trace */
466 if(act->mact.vmmControl->vmmAdsp[index - 1]) { /* Check if there is an address space assigned here */
467 mapping_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
468 pmap_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
469 pmap_destroy(act->mact.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */
470 act->mact.vmmControl->vmmAdsp[index - 1] = NULL; /* Clean it up */
471 }
1c79356b
A
472
473 (void) vm_map_unwire( /* Unwire the user comm page */
474 act->map,
475 (vm_offset_t)CEntry->vmmContextUser,
476 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
477 FALSE);
478
479 kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */
480
55e303ae
A
481 CTable = act->mact.vmmControl; /* Get the control table address */
482 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
483
1c79356b 484 CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */
1c79356b
A
485 CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */
486 CEntry->vmmContextUser = 0; /* Clear the user address of comm area */
9bccf70c
A
487
488 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
489 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
490 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
491 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
492 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
493 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
494 CEntry->vmmFacCtx.facAct = 0; /* Clear facility context control */
1c79356b 495
55e303ae 496 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search to find a free slot */
1c79356b
A
497 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */
498 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
499 return KERN_SUCCESS; /* Leave... */
500 }
501 }
502
55e303ae
A
503/*
504 * When we have tossed the last context, toss any address spaces left over before releasing
505 * the VMM control block
506 */
507
508 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
509 if(!act->mact.vmmControl->vmmAdsp[index - 1]) continue; /* Nothing to remove here */
510 mapping_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
511 pmap_remove(act->mact.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
512 pmap_destroy(act->mact.vmmControl->vmmAdsp[index - 1]); /* Toss the pmap for this context */
513 act->mact.vmmControl->vmmAdsp[index - 1] = 0; /* Clear just in case */
514 }
515
1c79356b
A
516 kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */
517 act->mact.vmmControl = 0; /* Unmark us as vmm */
518
519 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
520
521 return KERN_SUCCESS;
522}
523
55e303ae
A
524
525/*-----------------------------------------------------------------------
526** vmm_set_XA
527**
528** This function sets the eXtended Architecture flags for the specifed VM.
529**
530** We need to return the result in the return code rather than in the return parameters
531** because we need an architecture independent format so the results are actually
532** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
533** 4 for 32-bit.
534**
535** Note that this function does a lot of the same stuff as vmm_tear_down_context
536** and vmm_init_context.
537**
538** Inputs:
539** act - pointer to current thread activation structure
540** index - index returned by vmm_init_context
541** flags - the extended architecture flags
542**
543**
544** Outputs:
545** KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
546** Also, the internal flags are set and, additionally, the VM is completely reset.
547-----------------------------------------------------------------------*/
548
549kern_return_t vmm_set_XA(
550 thread_act_t act,
551 vmm_thread_index_t index,
552 unsigned int xaflags)
553{
554 vmmCntrlEntry *CEntry;
555 vmmCntrlTable *CTable;
556 vmm_state_page_t *vks;
557 vmm_version_t version;
558
559 if(xaflags & ~vmm64Bit) return KERN_FAILURE; /* We only support this one kind now */
560
561 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
562 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
563
564 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
565
566 if(CEntry->vmmFacCtx.FPUsave) { /* Is there any floating point context? */
567 toss_live_fpu(&CEntry->vmmFacCtx); /* Get rid of any live context here */
568 save_release((savearea *)CEntry->vmmFacCtx.FPUsave); /* Release it */
569 }
570
571 if(CEntry->vmmFacCtx.VMXsave) { /* Is there any vector context? */
572 toss_live_vec(&CEntry->vmmFacCtx); /* Get rid of any live context here */
573 save_release((savearea *)CEntry->vmmFacCtx.VMXsave); /* Release it */
574 }
575
576 CTable = act->mact.vmmControl; /* Get the control table address */
577 CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp; /* Make sure we don't try to automap into this */
578
579 CEntry->vmmFlags &= vmmInUse; /* Clear out all of the flags for this entry except in use */
580 CEntry->vmmXAFlgs = (xaflags & vmm64Bit) | (CEntry->vmmXAFlgs & ~vmm64Bit); /* Set the XA flags */
581 CEntry->vmmFacCtx.FPUsave = 0; /* Clear facility context control */
582 CEntry->vmmFacCtx.FPUlevel = 0; /* Clear facility context control */
583 CEntry->vmmFacCtx.FPUcpu = 0; /* Clear facility context control */
584 CEntry->vmmFacCtx.VMXsave = 0; /* Clear facility context control */
585 CEntry->vmmFacCtx.VMXlevel = 0; /* Clear facility context control */
586 CEntry->vmmFacCtx.VMXcpu = 0; /* Clear facility context control */
587
588 vks = CEntry->vmmContextKern; /* Get address of the context page */
589 version = vks->interface_version; /* Save the version code */
590 bzero((char *)vks, 4096); /* Clear all */
591
592 vks->interface_version = version; /* Set our version code */
593 vks->thread_index = index % vmmTInum; /* Tell the user the index for this virtual machine */
594
595 ml_set_interrupts_enabled(FALSE); /* No more interruptions */
596
597 return KERN_SUCCESS; /* Return the flags */
598}
599
600
1c79356b
A
601/*-----------------------------------------------------------------------
602** vmm_tear_down_all
603**
604** This function uninitializes all emulation contexts. If there are
605** any vmm contexts, it calls vmm_tear_down_context for each one.
606**
607** Note: this can also be called from normal thread termination. Because of
608** that, we will context switch out of an alternate if we are currenty in it.
609** It will be terminated with no valid return code set because we don't expect
610** the activation to ever run again.
611**
612** Inputs:
613** activation to tear down
614**
615** Outputs:
616** All vmm contexts released and VMM shut down
617-----------------------------------------------------------------------*/
618void vmm_tear_down_all(thread_act_t act) {
619
620 vmmCntrlTable *CTable;
621 int cvi;
622 kern_return_t ret;
623 savearea *save;
624 spl_t s;
625
626 if(act->mact.specFlags & runningVM) { /* Are we actually in a context right now? */
9bccf70c 627 save = find_user_regs(act); /* Find the user state context */
1c79356b
A
628 if(!save) { /* Did we find it? */
629 panic("vmm_tear_down_all: runningVM marked but no user state context\n");
630 return;
631 }
632
0b4e3aa0 633 save->save_exception = kVmmBogusContext*4; /* Indicate that this context is bogus now */
1c79356b
A
634 s = splhigh(); /* Make sure interrupts are off */
635 vmm_force_exit(act, save); /* Force and exit from VM state */
636 splx(s); /* Restore interrupts */
637 }
638
639 if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */
640
55e303ae
A
641
642 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) { /* Look at all slots */
1c79356b
A
643 if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
644 ret = vmm_tear_down_context(act, cvi); /* Take down the found context */
645 if(ret != KERN_SUCCESS) { /* Did it go away? */
646 panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
647 ret, act, cvi);
648 }
649 }
650 }
55e303ae
A
651
652/*
653 * Note that all address apces should be gone here.
654 */
1c79356b
A
655 if(act->mact.vmmControl) { /* Did we find one? */
656 panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */
657 }
658 }
659
660 return;
661}
662
663/*-----------------------------------------------------------------------
664** vmm_map_page
665**
666** This function maps a page from within the client's logical
55e303ae 667** address space into the alternate address space.
1c79356b
A
668**
669** The page need not be locked or resident. If not resident, it will be faulted
670** in by this code, which may take some time. Also, if the page is not locked,
671** it, and this mapping may disappear at any time, even before it gets used. Note also
672** that reference and change information is NOT preserved when a page is unmapped, either
673** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
674** space). This means that if RC is needed, the page MUST be wired.
675**
676** Note that if there is already a mapping at the address, it is removed and all
677** information (including RC) is lost BEFORE an attempt is made to map it. Also,
678** if the map call fails, the old address is still unmapped..
679**
680** Inputs:
681** act - pointer to current thread activation
55e303ae 682** index - index of address space to map into
1c79356b 683** va - virtual address within the client's address
0b4e3aa0 684** space
1c79356b 685** ava - virtual address within the alternate address
0b4e3aa0 686** space
1c79356b
A
687** prot - protection flags
688**
689** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
690** areas are not allowed and will fail. Same with directly mapped I/O areas.
691**
692** Input conditions:
693** Interrupts disabled (from fast trap)
694**
695** Outputs:
696** kernel return code indicating success or failure
697** if success, va resident and alternate mapping made
698-----------------------------------------------------------------------*/
699
700kern_return_t vmm_map_page(
701 thread_act_t act,
55e303ae
A
702 vmm_adsp_id_t index,
703 addr64_t cva,
704 addr64_t ava,
1c79356b
A
705 vm_prot_t prot)
706{
707 kern_return_t ret;
708 vmmCntrlEntry *CEntry;
55e303ae 709 register mapping *mp;
1c79356b 710 struct phys_entry *pp;
1c79356b 711 vm_map_t map;
55e303ae
A
712 addr64_t ova, nextva;
713 pmap_t pmap;
714
715 pmap = vmm_get_adsp(act, index); /* Get the pmap for this address space */
716 if(!pmap) return KERN_FAILURE; /* Bogus address space, no VMs, or we can't make a pmap, failure... */
717
718 if(ava > vm_max_address) return kVmmInvalidAddress; /* Does the machine support an address of this size? */
1c79356b 719
1c79356b
A
720 map = current_act()->map; /* Get the current map */
721
722 while(1) { /* Keep trying until we get it or until we fail */
1c79356b 723
55e303ae 724 mp = mapping_find(map->pmap, cva, &nextva, 0); /* Find the mapping for this address */
1c79356b 725
55e303ae 726 if(mp) break; /* We found it */
1c79356b
A
727
728 ml_set_interrupts_enabled(TRUE); /* Enable interruptions */
55e303ae 729 ret = vm_fault(map, trunc_page_32((vm_offset_t)cva), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* Didn't find it, try to fault it in read/write... */
1c79356b
A
730 ml_set_interrupts_enabled(FALSE); /* Disable interruptions */
731 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */
732 }
733
55e303ae
A
734 if(mp->mpFlags & (mpBlock | mpNest | mpSpecial)) { /* If this is a block, a nest, or some other special thing, we can't map it */
735 mapping_drop_busy(mp); /* We have everything we need from the mapping */
736 return KERN_FAILURE; /* Leave in shame */
737 }
de355530 738
55e303ae
A
739 while(1) { /* Keep trying the enter until it goes in */
740 ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */
741 if(!ova) break; /* If there were no collisions, we are done... */
742 mapping_remove(pmap, ova); /* Remove the mapping that collided */
743 }
744
745 mapping_drop_busy(mp); /* We have everything we need from the mapping */
746
747 if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) {
748 act->mact.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
749 act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
750 }
1c79356b
A
751
752 return KERN_SUCCESS;
753}
754
755
0b4e3aa0
A
756/*-----------------------------------------------------------------------
757** vmm_map_execute
758**
759** This function maps a page from within the client's logical
760** address space into the alternate address space of the
761** Virtual Machine Monitor context and then directly starts executing.
762**
763** See description of vmm_map_page for details.
764**
55e303ae
A
765** Inputs:
766** Index is used for both the context and the address space ID.
767** index[24:31] is the context id and index[16:23] is the address space.
768** if the address space ID is 0, the context ID is used for it.
769**
0b4e3aa0
A
770** Outputs:
771** Normal exit is to run the VM. Abnormal exit is triggered via a
772** non-KERN_SUCCESS return from vmm_map_page or later during the
773** attempt to transition into the VM.
774-----------------------------------------------------------------------*/
775
776vmm_return_code_t vmm_map_execute(
777 thread_act_t act,
778 vmm_thread_index_t index,
55e303ae
A
779 addr64_t cva,
780 addr64_t ava,
0b4e3aa0
A
781 vm_prot_t prot)
782{
783 kern_return_t ret;
784 vmmCntrlEntry *CEntry;
55e303ae
A
785 unsigned int adsp;
786 vmm_thread_index_t cndx;
0b4e3aa0 787
55e303ae 788 cndx = index & 0xFF; /* Clean it up */
0b4e3aa0 789
55e303ae 790 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
0b4e3aa0
A
791 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
792
d7e50217
A
793 if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry))
794 return kVmmBogusContext; /* Yes, invalid index in Fam */
795
55e303ae
A
796 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
797 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
798
799 ret = vmm_map_page(act, adsp, cva, ava, prot); /* Go try to map the page on in */
800
0b4e3aa0 801
d7e50217 802 if(ret == KERN_SUCCESS) {
55e303ae
A
803 act->mact.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
804 act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
805 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
d7e50217
A
806 }
807
55e303ae 808 return ret; /* We had trouble mapping in the page */
0b4e3aa0
A
809
810}
811
9bccf70c
A
812/*-----------------------------------------------------------------------
813** vmm_map_list
814**
55e303ae 815** This function maps a list of pages into various address spaces
9bccf70c
A
816**
817** Inputs:
818** act - pointer to current thread activation
55e303ae 819** index - index of default address space (used if not specifed in list entry
9bccf70c 820** count - number of pages to release
55e303ae 821** flavor - 0 if 32-bit version, 1 if 64-bit
9bccf70c
A
822** vmcpComm in the comm page contains up to kVmmMaxMapPages to map
823**
824** Outputs:
825** kernel return code indicating success or failure
826** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
827** or the vmm_map_page call fails.
55e303ae 828** We return kVmmInvalidAddress if virtual address size is not supported
9bccf70c
A
829-----------------------------------------------------------------------*/
830
831kern_return_t vmm_map_list(
832 thread_act_t act,
55e303ae
A
833 vmm_adsp_id_t index,
834 unsigned int cnt,
835 unsigned int flavor)
9bccf70c
A
836{
837 vmmCntrlEntry *CEntry;
838 boolean_t ret;
839 unsigned int i;
55e303ae
A
840 vmmMList *lst;
841 vmmMList64 *lstx;
842 addr64_t cva;
843 addr64_t ava;
9bccf70c 844 vm_prot_t prot;
55e303ae 845 vmm_adsp_id_t adsp;
9bccf70c 846
55e303ae
A
847 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
848 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
9bccf70c
A
849
850 if(cnt > kVmmMaxMapPages) return KERN_FAILURE; /* They tried to map too many */
851 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
852
55e303ae
A
853 lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
854 lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
9bccf70c
A
855
856 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
55e303ae
A
857 if(flavor) { /* Check if 32- or 64-bit addresses */
858 cva = lstx[i].vmlva; /* Get the 64-bit actual address */
859 ava = lstx[i].vmlava; /* Get the 64-bit guest address */
860 }
861 else {
862 cva = lst[i].vmlva; /* Get the 32-bit actual address */
863 ava = lst[i].vmlava; /* Get the 32-bit guest address */
864 }
865
866 prot = ava & vmmlProt; /* Extract the protection bits */
867 adsp = (ava & vmmlAdID) >> 4; /* Extract an explicit address space request */
868 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
869 ava = ava &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
870
9bccf70c 871 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
55e303ae 872 if(ret != KERN_SUCCESS) return ret; /* Bail if any error */
9bccf70c
A
873 }
874
875 return KERN_SUCCESS ; /* Return... */
876}
877
1c79356b
A
878/*-----------------------------------------------------------------------
879** vmm_get_page_mapping
880**
881** This function determines whether the specified VMM
882** virtual address is mapped.
883**
884** Inputs:
885** act - pointer to current thread activation
886** index - index of vmm state for this page
887** va - virtual address within the alternate's address
888** space
889**
890** Outputs:
891** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure
892**
893** Note:
894** If there are aliases to the page in the non-alternate address space,
895** this call could return the wrong one. Moral of the story: no aliases.
896-----------------------------------------------------------------------*/
897
55e303ae 898addr64_t vmm_get_page_mapping(
1c79356b 899 thread_act_t act,
55e303ae
A
900 vmm_adsp_id_t index,
901 addr64_t va)
1c79356b
A
902{
903 vmmCntrlEntry *CEntry;
55e303ae 904 register mapping *mp;
1c79356b 905 pmap_t pmap;
55e303ae
A
906 addr64_t nextva, sva;
907 ppnum_t pa;
1c79356b 908
55e303ae
A
909 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
910 if (!pmap)return -1; /* No good, failure... */
1c79356b 911
55e303ae
A
912 mp = mapping_find(pmap, va, &nextva, 0); /* Find our page */
913
1c79356b
A
914 if(!mp) return -1; /* Not mapped, return -1 */
915
55e303ae
A
916 pa = mp->mpPAddr; /* Remember the page address */
917
918 mapping_drop_busy(mp); /* Go ahead and relase the mapping now */
de355530 919
55e303ae
A
920 pmap = current_act()->map->pmap; /* Get the current pmap */
921 sva = mapping_p2v(pmap, pa); /* Now find the source virtual */
1c79356b 922
55e303ae 923 if(sva != 0) return sva; /* We found it... */
1c79356b 924
55e303ae 925 panic("vmm_get_page_mapping: could not back-map alternate va (%016llX)\n", va); /* We are bad wrong if we can't find it */
1c79356b 926
55e303ae 927 return -1;
1c79356b
A
928}
929
930/*-----------------------------------------------------------------------
931** vmm_unmap_page
932**
933** This function unmaps a page from the alternate's logical
934** address space.
935**
936** Inputs:
937** act - pointer to current thread activation
938** index - index of vmm state for this page
939** va - virtual address within the vmm's address
940** space
941**
942** Outputs:
943** kernel return code indicating success or failure
944-----------------------------------------------------------------------*/
945
946kern_return_t vmm_unmap_page(
947 thread_act_t act,
55e303ae
A
948 vmm_adsp_id_t index,
949 addr64_t va)
1c79356b
A
950{
951 vmmCntrlEntry *CEntry;
55e303ae
A
952 addr64_t nadd;
953 pmap_t pmap;
1c79356b
A
954 kern_return_t kern_result = KERN_SUCCESS;
955
55e303ae
A
956 pmap = vmm_get_adsp(act, index); /* Get and validate the index */
957 if (!pmap)return -1; /* No good, failure... */
1c79356b 958
55e303ae 959 nadd = mapping_remove(pmap, va); /* Toss the mapping */
1c79356b 960
55e303ae 961 return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS); /* Return... */
1c79356b
A
962}
963
9bccf70c
A
964/*-----------------------------------------------------------------------
965** vmm_unmap_list
966**
967** This function unmaps a list of pages from the alternate's logical
968** address space.
969**
970** Inputs:
971** act - pointer to current thread activation
972** index - index of vmm state for this page
973** count - number of pages to release
55e303ae 974** flavor - 0 if 32-bit, 1 if 64-bit
9bccf70c
A
975** vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
976**
977** Outputs:
978** kernel return code indicating success or failure
979** KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
980-----------------------------------------------------------------------*/
981
982kern_return_t vmm_unmap_list(
983 thread_act_t act,
55e303ae
A
984 vmm_adsp_id_t index,
985 unsigned int cnt,
986 unsigned int flavor)
9bccf70c
A
987{
988 vmmCntrlEntry *CEntry;
989 boolean_t ret;
990 kern_return_t kern_result = KERN_SUCCESS;
991 unsigned int *pgaddr, i;
55e303ae
A
992 addr64_t gva;
993 vmmUMList *lst;
994 vmmUMList64 *lstx;
995 pmap_t pmap;
996 int adsp;
9bccf70c 997
55e303ae
A
998 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
999 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't a vmm or the index is bogus */
9bccf70c 1000
55e303ae
A
1001 if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE; /* They tried to unmap too many */
1002 if(!cnt) return KERN_SUCCESS; /* If they said none, we're done... */
9bccf70c 1003
55e303ae 1004 lst = (vmmUMList *)lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
de355530 1005
55e303ae
A
1006 for(i = 0; i < cnt; i++) { /* Step and release all pages in list */
1007 if(flavor) { /* Check if 32- or 64-bit addresses */
1008 gva = lstx[i].vmlava; /* Get the 64-bit guest address */
1009 }
1010 else {
1011 gva = lst[i].vmlava; /* Get the 32-bit guest address */
1012 }
1013
1014 adsp = (gva & vmmlAdID) >> 4; /* Extract an explicit address space request */
1015 if(!adsp) adsp = index - 1; /* If no explicit, use supplied default */
1016 pmap = act->mact.vmmControl->vmmAdsp[adsp]; /* Get the pmap for this request */
1017 if(!pmap) continue; /* Ain't nuthin' mapped here, no durn map... */
1018
1019 gva = gva &= 0xFFFFFFFFFFFFF000ULL; /* Clean up the address */
1020 (void)mapping_remove(pmap, gva); /* Toss the mapping */
9bccf70c
A
1021 }
1022
55e303ae 1023 return KERN_SUCCESS ; /* Return... */
9bccf70c
A
1024}
1025
1c79356b
A
1026/*-----------------------------------------------------------------------
1027** vmm_unmap_all_pages
1028**
1029** This function unmaps all pages from the alternates's logical
1030** address space.
1031**
1032** Inputs:
1033** act - pointer to current thread activation
1034** index - index of context state
1035**
1036** Outputs:
1037** none
1038**
1039** Note:
1040** All pages are unmapped, but the address space (i.e., pmap) is still alive
1041-----------------------------------------------------------------------*/
1042
1043void vmm_unmap_all_pages(
1044 thread_act_t act,
55e303ae 1045 vmm_adsp_id_t index)
1c79356b
A
1046{
1047 vmmCntrlEntry *CEntry;
55e303ae 1048 pmap_t pmap;
1c79356b 1049
55e303ae
A
1050 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1051 if (!pmap) return; /* Either this isn't vmm thread or the index is bogus */
1c79356b
A
1052
1053/*
1054 * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
1055 */
55e303ae
A
1056 mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL); /* Remove final page explicitly because we might have mapped it */
1057 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL); /* Remove all entries from this map */
1c79356b
A
1058 return;
1059}
1060
1061
1062/*-----------------------------------------------------------------------
1063** vmm_get_page_dirty_flag
1064**
1065** This function returns the changed flag of the page
1066** and optionally clears clears the flag.
1067**
1068** Inputs:
1069** act - pointer to current thread activation
1070** index - index of vmm state for this page
1071** va - virtual address within the vmm's address
1072** space
1073** reset - Clears dirty if true, untouched if not
1074**
1075** Outputs:
1076** the dirty bit
1077** clears the dirty bit in the pte if requested
1078**
1079** Note:
1080** The RC bits are merged into the global physical entry
1081-----------------------------------------------------------------------*/
1082
1083boolean_t vmm_get_page_dirty_flag(
1084 thread_act_t act,
55e303ae
A
1085 vmm_adsp_id_t index,
1086 addr64_t va,
1c79356b
A
1087 unsigned int reset)
1088{
1089 vmmCntrlEntry *CEntry;
1090 register mapping *mpv, *mp;
1091 unsigned int RC;
55e303ae 1092 pmap_t pmap;
1c79356b 1093
55e303ae
A
1094 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1095 if (!pmap) return 1; /* Either this isn't vmm thread or the index is bogus */
de355530 1096
55e303ae 1097 RC = hw_test_rc(pmap, (addr64_t)va, reset); /* Fetch the RC bits and clear if requested */
1c79356b 1098
55e303ae
A
1099 switch (RC & mapRetCode) { /* Decode return code */
1100
1101 case mapRtOK: /* Changed */
1102 return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */
1103 break;
1104
1105 case mapRtNotFnd: /* Didn't find it */
1106 return 1; /* Return dirty */
1107 break;
1108
1109 default:
1110 panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC, pmap, va);
1111
1112 }
de355530 1113
55e303ae 1114 return 1; /* Return the change bit */
1c79356b
A
1115}
1116
0b4e3aa0
A
1117
1118/*-----------------------------------------------------------------------
1119** vmm_protect_page
1120**
1121** This function sets the protection bits of a mapped page
1122**
1123** Inputs:
1124** act - pointer to current thread activation
1125** index - index of vmm state for this page
1126** va - virtual address within the vmm's address
1127** space
1128** prot - Protection flags
1129**
1130** Outputs:
1131** none
1132** Protection bits of the mapping are modifed
1133**
1134-----------------------------------------------------------------------*/
1135
1136kern_return_t vmm_protect_page(
1137 thread_act_t act,
55e303ae
A
1138 vmm_adsp_id_t index,
1139 addr64_t va,
0b4e3aa0
A
1140 vm_prot_t prot)
1141{
1142 vmmCntrlEntry *CEntry;
55e303ae
A
1143 addr64_t nextva;
1144 int ret;
1145 pmap_t pmap;
0b4e3aa0 1146
55e303ae
A
1147 pmap = vmm_get_adsp(act, index); /* Convert index to entry */
1148 if (!pmap) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
d7e50217 1149
55e303ae 1150 ret = hw_protect(pmap, va, prot, &nextva); /* Try to change the protect here */
0b4e3aa0 1151
55e303ae
A
1152 switch (ret) { /* Decode return code */
1153
1154 case mapRtOK: /* All ok... */
1155 break; /* Outta here */
1156
1157 case mapRtNotFnd: /* Didn't find it */
1158 return KERN_SUCCESS; /* Ok, return... */
1159 break;
1160
1161 default:
1162 panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, (addr64_t)va);
1163
1164 }
de355530 1165
55e303ae
A
1166 if (!((per_proc_info[cpu_number()].spcFlags) & FamVMmode)) {
1167 act->mact.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1168 act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | index; /* Remember last address space */
1169 }
0b4e3aa0
A
1170
1171 return KERN_SUCCESS; /* Return */
1172}
1173
1174
1175/*-----------------------------------------------------------------------
1176** vmm_protect_execute
1177**
1178** This function sets the protection bits of a mapped page
1179** and then directly starts executing.
1180**
55e303ae
A
1181** See description of vmm_protect_page for details
1182**
1183** Inputs:
1184** See vmm_protect_page and vmm_map_execute
0b4e3aa0
A
1185**
1186** Outputs:
1187** Normal exit is to run the VM. Abnormal exit is triggered via a
1188** non-KERN_SUCCESS return from vmm_map_page or later during the
1189** attempt to transition into the VM.
1190-----------------------------------------------------------------------*/
1191
1192vmm_return_code_t vmm_protect_execute(
1193 thread_act_t act,
1194 vmm_thread_index_t index,
55e303ae 1195 addr64_t va,
0b4e3aa0
A
1196 vm_prot_t prot)
1197{
1198 kern_return_t ret;
1199 vmmCntrlEntry *CEntry;
55e303ae
A
1200 unsigned int adsp;
1201 vmm_thread_index_t cndx;
0b4e3aa0 1202
55e303ae
A
1203 cndx = index & 0xFF; /* Clean it up */
1204 CEntry = vmm_get_entry(act, cndx); /* Get and validate the index */
1205 if (CEntry == NULL) return kVmmBogusContext; /* Return bogus context */
1206
1207 adsp = (index >> 8) & 0xFF; /* Get any requested address space */
1208 if(!adsp) adsp = (index & 0xFF); /* If 0, use context ID as address space ID */
0b4e3aa0 1209
d7e50217
A
1210 if (((per_proc_info[cpu_number()].spcFlags) & FamVMmode) && (CEntry != act->mact.vmmCEntry))
1211 return kVmmBogusContext; /* Yes, invalid index in Fam */
1212
55e303ae 1213 ret = vmm_protect_page(act, adsp, va, prot); /* Go try to change access */
d7e50217
A
1214
1215 if(ret == KERN_SUCCESS) {
55e303ae
A
1216 act->mact.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL; /* Remember the last mapping we made */
1217 act->mact.vmmControl->vmmGFlags = (act->mact.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx; /* Remember last address space */
1218 vmm_execute_vm(act, cndx); /* Return was ok, launch the VM */
d7e50217 1219 }
0b4e3aa0 1220
55e303ae 1221 return ret; /* We had trouble of some kind (shouldn't happen) */
0b4e3aa0
A
1222
1223}
1224
1225
1c79356b
A
1226/*-----------------------------------------------------------------------
1227** vmm_get_float_state
1228**
1229** This function causes the current floating point state to
1230** be saved into the shared context area. It also clears the
1231** vmmFloatCngd changed flag.
1232**
1233** Inputs:
1234** act - pointer to current thread activation structure
1235** index - index returned by vmm_init_context
1236**
1237** Outputs:
1238** context saved
1239-----------------------------------------------------------------------*/
1240
1241kern_return_t vmm_get_float_state(
1242 thread_act_t act,
1243 vmm_thread_index_t index)
1244{
1245 vmmCntrlEntry *CEntry;
1246 vmmCntrlTable *CTable;
1247 int i;
9bccf70c 1248 register struct savearea_fpu *sv;
1c79356b
A
1249
1250 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1251 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1252
1253 act->mact.specFlags &= ~floatCng; /* Clear the special flag */
1254 CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */
9bccf70c
A
1255
1256 fpu_save(&CEntry->vmmFacCtx); /* Save context if live */
1257
9bccf70c
A
1258 if(sv = CEntry->vmmFacCtx.FPUsave) { /* Is there context yet? */
1259 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */
1c79356b
A
1260 return KERN_SUCCESS;
1261 }
1262
1c79356b 1263
9bccf70c 1264 for(i = 0; i < 32; i++) { /* Initialize floating points */
1c79356b
A
1265 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */
1266 }
1267
1268 return KERN_SUCCESS;
1269}
1270
1271/*-----------------------------------------------------------------------
1272** vmm_get_vector_state
1273**
1274** This function causes the current vector state to
1275** be saved into the shared context area. It also clears the
1276** vmmVectorCngd changed flag.
1277**
1278** Inputs:
1279** act - pointer to current thread activation structure
1280** index - index returned by vmm_init_context
1281**
1282** Outputs:
1283** context saved
1284-----------------------------------------------------------------------*/
1285
1286kern_return_t vmm_get_vector_state(
1287 thread_act_t act,
1288 vmm_thread_index_t index)
1289{
1290 vmmCntrlEntry *CEntry;
1291 vmmCntrlTable *CTable;
1292 int i, j;
1293 unsigned int vrvalidwrk;
9bccf70c 1294 register struct savearea_vec *sv;
1c79356b
A
1295
1296 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1297 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
9bccf70c
A
1298
1299 vec_save(&CEntry->vmmFacCtx); /* Save context if live */
1c79356b
A
1300
1301 act->mact.specFlags &= ~vectorCng; /* Clear the special flag */
1302 CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */
1303
9bccf70c 1304 if(sv = CEntry->vmmFacCtx.VMXsave) { /* Is there context yet? */
1c79356b
A
1305
1306 vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */
1307
1c79356b
A
1308 for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */
1309 if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */
1310 for(j = 0; j < 4; j++) { /* If so, copy it over */
1311 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
1312 }
1313 }
1314 else {
1315 for(j = 0; j < 4; j++) { /* Otherwise set to empty value */
1316 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
1317 }
1318 }
1319
1320 vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */
1321
1322 }
1323
1324 return KERN_SUCCESS;
1325 }
1326
1c79356b
A
1327 for(i = 0; i < 32; i++) { /* Initialize vector registers */
1328 for(j=0; j < 4; j++) { /* Do words */
1329 CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */
1330 }
1331 }
1332
1333 return KERN_SUCCESS;
1334}
1335
1336/*-----------------------------------------------------------------------
1337** vmm_set_timer
1338**
1339** This function causes a timer (in AbsoluteTime) for a specific time
1340** to be set It also clears the vmmTimerPop flag if the timer is actually
1341** set, it is cleared otherwise.
1342**
1343** A timer is cleared by setting setting the time to 0. This will clear
1344** the vmmTimerPop bit. Simply setting the timer to earlier than the
1345** current time clears the internal timer request, but leaves the
1346** vmmTimerPop flag set.
1347**
1348**
1349** Inputs:
1350** act - pointer to current thread activation structure
1351** index - index returned by vmm_init_context
1352** timerhi - high order word of AbsoluteTime to pop
1353** timerlo - low order word of AbsoluteTime to pop
1354**
1355** Outputs:
1356** timer set, vmmTimerPop cleared
1357-----------------------------------------------------------------------*/
1358
1359kern_return_t vmm_set_timer(
1360 thread_act_t act,
1361 vmm_thread_index_t index,
1362 unsigned int timerhi,
1363 unsigned int timerlo)
1364{
1365 vmmCntrlEntry *CEntry;
1366
1367 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1368 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1369
0b4e3aa0 1370 CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
1c79356b
A
1371
1372 vmm_timer_pop(act); /* Go adjust all of the timer stuff */
1373 return KERN_SUCCESS; /* Leave now... */
1374}
1375
1376
1377/*-----------------------------------------------------------------------
1378** vmm_get_timer
1379**
1380** This function causes the timer for a specified VM to be
1381** returned in return_params[0] and return_params[1].
55e303ae
A
1382** Note that this is kind of funky for 64-bit VMs because we
1383** split the timer into two parts so that we still set parms 0 and 1.
1384** Obviously, we don't need to do this because the parms are 8 bytes
1385** wide.
1c79356b
A
1386**
1387**
1388** Inputs:
1389** act - pointer to current thread activation structure
1390** index - index returned by vmm_init_context
1391**
1392** Outputs:
1393** Timer value set in return_params[0] and return_params[1].
1394** Set to 0 if timer is not set.
1395-----------------------------------------------------------------------*/
1396
1397kern_return_t vmm_get_timer(
1398 thread_act_t act,
1399 vmm_thread_index_t index)
1400{
1401 vmmCntrlEntry *CEntry;
1402 vmmCntrlTable *CTable;
1403
1404 CEntry = vmm_get_entry(act, index); /* Convert index to entry */
1405 if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */
1406
55e303ae
A
1407 if(CEntry->vmmXAFlgs & vmm64Bit) { /* A 64-bit virtual machine? */
1408 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32); /* Return the last timer value */
1409 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1410 }
1411 else {
1412 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32); /* Return the last timer value */
1413 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer; /* Return the last timer value */
1414 }
1c79356b
A
1415 return KERN_SUCCESS;
1416}
1417
1418
1c79356b
A
1419/*-----------------------------------------------------------------------
1420** vmm_timer_pop
1421**
1422** This function causes all timers in the array of VMs to be updated.
1423** All appropriate flags are set or reset. If a VM is currently
1424** running and its timer expired, it is intercepted.
1425**
1426** The qactTimer value is set to the lowest unexpired timer. It is
1427** zeroed if all timers are expired or have been reset.
1428**
1429** Inputs:
1430** act - pointer to current thread activation structure
1431**
1432** Outputs:
1433** timers set, vmmTimerPop cleared or set
1434-----------------------------------------------------------------------*/
1435
1436void vmm_timer_pop(
1437 thread_act_t act)
1438{
1439 vmmCntrlEntry *CEntry;
1440 vmmCntrlTable *CTable;
1441 int cvi, any;
0b4e3aa0 1442 uint64_t now, soonest;
1c79356b
A
1443 savearea *sv;
1444
1445 if(!((unsigned int)act->mact.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */
1446 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
1447 }
1448
0b4e3aa0 1449 soonest = 0xFFFFFFFFFFFFFFFFULL; /* Max time */
1c79356b 1450
0b4e3aa0 1451 clock_get_uptime(&now); /* What time is it? */
1c79356b
A
1452
1453 CTable = act->mact.vmmControl; /* Make this easier */
1454 any = 0; /* Haven't found a running unexpired timer yet */
1455
55e303ae 1456 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Cycle through all and check time now */
1c79356b
A
1457
1458 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */
1459
9bccf70c 1460 if(CTable->vmmc[cvi].vmmTimer == 0) { /* Is the timer reset? */
1c79356b
A
1461 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */
1462 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */
1463 continue; /* Check next */
1464 }
1465
0b4e3aa0 1466 if (CTable->vmmc[cvi].vmmTimer <= now) {
1c79356b
A
1467 CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */
1468 CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */
1469 if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->mact.vmmCEntry) { /* Is this the running VM? */
9bccf70c 1470 sv = find_user_regs(act); /* Get the user state registers */
1c79356b
A
1471 if(!sv) { /* Did we find something? */
1472 panic("vmm_timer_pop: no user context; act = %08X\n", act);
1473 }
0b4e3aa0 1474 sv->save_exception = kVmmReturnNull*4; /* Indicate that this is a null exception */
1c79356b
A
1475 vmm_force_exit(act, sv); /* Intercept a running VM */
1476 }
1477 continue; /* Check the rest */
1478 }
1479 else { /* It hasn't popped yet */
1480 CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */
1481 CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */
1482 }
1483
1484 any = 1; /* Show we found an active unexpired timer */
1485
0b4e3aa0
A
1486 if (CTable->vmmc[cvi].vmmTimer < soonest)
1487 soonest = CTable->vmmc[cvi].vmmTimer;
1c79356b
A
1488 }
1489
1490 if(any) {
0b4e3aa0
A
1491 if (act->mact.qactTimer == 0 || soonest <= act->mact.qactTimer)
1492 act->mact.qactTimer = soonest; /* Set lowest timer */
1493 }
1494
1495 return;
1496}
1497
1498
1499
1500/*-----------------------------------------------------------------------
1501** vmm_stop_vm
1502**
1503** This function prevents the specified VM(s) to from running.
1504** If any is currently executing, the execution is intercepted
1505** with a code of kVmmStopped. Note that execution of the VM is
1506** blocked until a vmmExecuteVM is called with the start flag set to 1.
1507** This provides the ability for a thread to stop execution of a VM and
1508** insure that it will not be run until the emulator has processed the
1509** "virtual" interruption.
1510**
1511** Inputs:
1512** vmmask - 32 bit mask corresponding to the VMs to put in stop state
1513** NOTE: if this mask is all 0s, any executing VM is intercepted with
1514* a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
1515** note that there is a potential race here and the VM may not stop.
1516**
1517** Outputs:
1518** kernel return code indicating success
1519** or if no VMs are enabled, an invalid syscall exception.
1520-----------------------------------------------------------------------*/
1521
1522int vmm_stop_vm(struct savearea *save)
1523{
1524
1525 thread_act_t act;
1526 vmmCntrlTable *CTable;
1527 int cvi, i;
1528 task_t task;
1529 thread_act_t fact;
1530 unsigned int vmmask;
1531 ReturnHandler *stopapc;
1532
1533 ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */
1534
1535 task = current_task(); /* Figure out who we are */
1536
1537 task_lock(task); /* Lock our task */
1538
55e303ae 1539 fact = (thread_act_t)task->threads.next; /* Get the first activation on task */
0b4e3aa0
A
1540 act = 0; /* Pretend we didn't find it yet */
1541
55e303ae 1542 for(i = 0; i < task->thread_count; i++) { /* All of the activations */
0b4e3aa0
A
1543 if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */
1544 act = fact; /* Yeah... */
1545 break; /* Bail the loop... */
1546 }
55e303ae 1547 fact = (thread_act_t)fact->task_threads.next; /* Go to the next one */
0b4e3aa0
A
1548 }
1549
1550 if(!((unsigned int)act)) { /* See if we have VMMs yet */
1551 task_unlock(task); /* No, unlock the task */
1552 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1553 return 0; /* Go generate a syscall exception */
1554 }
1555
1556 act_lock_thread(act); /* Make sure this stays 'round */
1557 task_unlock(task); /* Safe to release now */
1558
1559 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1560
1561 if(!((unsigned int)CTable & -2)) { /* Are there any all the way up yet? */
1562 act_unlock_thread(act); /* Unlock the activation */
1563 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1564 return 0; /* Go generate a syscall exception */
1565 }
1566
1567 if(!(vmmask = save->save_r3)) { /* Get the stop mask and check if all zeros */
1568 act_unlock_thread(act); /* Unlock the activation */
1569 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1570 save->save_r3 = KERN_SUCCESS; /* Set success */
1571 return 1; /* Return... */
1572 }
1573
55e303ae 1574 for(cvi = 0; cvi < kVmmMaxContexts; cvi++) { /* Search slots */
0b4e3aa0
A
1575 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) { /* See if we need to stop and if it is in use */
1576 hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop); /* Set this one to stop */
1577 }
1578 vmmask = vmmask << 1; /* Slide mask over */
1579 }
1580
1581 if(hw_compare_and_store(0, 1, &act->mact.emPendRupts)) { /* See if there is already a stop pending and lock out others if not */
1582 act_unlock_thread(act); /* Already one pending, unlock the activation */
1583 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1584 save->save_r3 = KERN_SUCCESS; /* Say we did it... */
1585 return 1; /* Leave */
1586 }
1587
1588 if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) { /* Get a return handler control block */
1589 act->mact.emPendRupts = 0; /* No memory, say we have given up request */
1590 act_unlock_thread(act); /* Unlock the activation */
1591 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1592 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
1593 return 1; /* Return... */
1594 }
1595
1596 ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1597
1598 stopapc->handler = vmm_interrupt; /* Set interruption routine */
1599
1600 stopapc->next = act->handlers; /* Put our interrupt at the start of the list */
1601 act->handlers = stopapc; /* Point to us */
1602
1603 act_set_apc(act); /* Set an APC AST */
1604 ml_set_interrupts_enabled(TRUE); /* Enable interruptions now */
1605
1606 act_unlock_thread(act); /* Unlock the activation */
1607
1608 ml_set_interrupts_enabled(FALSE); /* Set back interruptions */
1609 save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */
1610 return 1;
1611}
1612
1613/*-----------------------------------------------------------------------
1614** vmm_interrupt
1615**
1616** This function is executed asynchronously from an APC AST.
1617** It is to be used for anything that needs to interrupt a running VM.
1618** This include any kind of interruption generation (other than timer pop)
1619** or entering the stopped state.
1620**
1621** Inputs:
1622** ReturnHandler *rh - the return handler control block as required by the APC.
1623** thread_act_t act - the activation
1624**
1625** Outputs:
1626** Whatever needed to be done is done.
1627-----------------------------------------------------------------------*/
1628
1629void vmm_interrupt(ReturnHandler *rh, thread_act_t act) {
1630
1631 vmmCntrlTable *CTable;
1632 savearea *sv;
1633 boolean_t inter;
1634
1635
1636
1637 kfree((vm_offset_t)rh, sizeof(ReturnHandler)); /* Release the return handler block */
1638
1639 inter = ml_set_interrupts_enabled(FALSE); /* Disable interruptions for now */
1640
1641 act->mact.emPendRupts = 0; /* Say that there are no more interrupts pending */
1642 CTable = act->mact.vmmControl; /* Get the pointer to the table */
1643
1644 if(!((unsigned int)CTable & -2)) return; /* Leave if we aren't doing VMs any more... */
1645
1646 if(act->mact.vmmCEntry && (act->mact.vmmCEntry->vmmFlags & vmmXStop)) { /* Do we need to stop the running guy? */
9bccf70c 1647 sv = find_user_regs(act); /* Get the user state registers */
0b4e3aa0
A
1648 if(!sv) { /* Did we find something? */
1649 panic("vmm_interrupt: no user context; act = %08X\n", act);
1c79356b 1650 }
0b4e3aa0
A
1651 sv->save_exception = kVmmStopped*4; /* Set a "stopped" exception */
1652 vmm_force_exit(act, sv); /* Intercept a running VM */
1c79356b 1653 }
0b4e3aa0 1654 ml_set_interrupts_enabled(inter); /* Put interrupts back to what they were */
1c79356b
A
1655
1656 return;
1657}