]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/vmachmon.h
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.h
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30/*-----------------------------------------------------------------------
31** vmachmon.h
32**
33** C routines that we are adding to the MacOS X kernel.
34**
1c79356b
A
35-----------------------------------------------------------------------*/
36
37#include <ppc/exception.h>
38
39#ifndef _VEMULATION_H_
40#define _VEMULATION_H_
41
42/*************************************************************************************
43 External Emulation Types
44**************************************************************************************/
45
46typedef union vmm_vector_register_t {
47 unsigned long i[4];
48 unsigned short s[8];
49 unsigned char b[16];
50} vmm_vector_register_t;
51
52typedef union vmm_fp_register_t {
53 double d;
54 unsigned long i[2];
55 unsigned short s[4];
56 unsigned char b[8];
57} vmm_fp_register_t;
58
9bccf70c 59
55e303ae 60typedef struct vmm_regs32_t {
d7e50217 61
55e303ae
A
62 unsigned long ppcPC; /* 000 */
63 unsigned long ppcMSR; /* 004 */
d7e50217 64
55e303ae
A
65 unsigned long ppcGPRs[32]; /* 008 */
66
67 unsigned long ppcCR; /* 088 */
68 unsigned long ppcXER; /* 08C */
69 unsigned long ppcLR; /* 090 */
70 unsigned long ppcCTR; /* 094 */
71 unsigned long ppcMQ; /* 098 - Obsolete */
72 unsigned long ppcVRSave; /* 09C */
73 unsigned long ppcRsrvd0A0[40]; /* 0A0 */
74 /* 140 */
75} vmm_regs32_t;
76
77#pragma pack(4) /* Make sure the structure stays as we defined it */
78typedef struct vmm_regs64_t {
79
80 unsigned long long ppcPC; /* 000 */
81 unsigned long long ppcMSR; /* 008 */
82
83 unsigned long long ppcGPRs[32]; /* 010 */
84
85 unsigned long long ppcXER; /* 110 */
86 unsigned long long ppcLR; /* 118 */
87 unsigned long long ppcCTR; /* 120 */
88 unsigned long ppcCR; /* 128 */
89 unsigned long ppcVRSave; /* 12C */
90 unsigned long ppcRsvd130[4]; /* 130 */
91 /* 140 */
92} vmm_regs64_t;
93#pragma pack()
de355530 94
55e303ae
A
95
96#pragma pack(4) /* Make sure the structure stays as we defined it */
97typedef union vmm_regs_t {
98 vmm_regs32_t ppcRegs32;
99 vmm_regs64_t ppcRegs64;
100} vmm_regs_t;
101#pragma pack()
102
103#pragma pack(4) /* Make sure the structure stays as we defined it */
104typedef struct vmm_processor_state_t {
105 /* 32-byte bndry */
106 vmm_regs_t ppcRegs; /* Define registers areas */
1c79356b
A
107
108/* We must be 16-byte aligned here */
109
55e303ae
A
110 vmm_vector_register_t ppcVRs[32]; /* These are only valid after a kVmmGetVectorState */
111 vmm_vector_register_t ppcVSCR; /* This is always loaded/saved at host/guest transition */
1c79356b
A
112
113/* We must be 8-byte aligned here */
114
55e303ae
A
115 vmm_fp_register_t ppcFPRs[32]; /* These are only valid after a kVmmGetFloatState */
116 vmm_fp_register_t ppcFPSCR; /* This is always loaded/saved at host/guest transition */
1c79356b
A
117 unsigned long ppcReserved2[2]; /* Pad out to multiple of 16 bytes */
118} vmm_processor_state_t;
55e303ae 119#pragma pack()
1c79356b
A
120
121typedef unsigned long vmm_return_code_t;
122
123typedef unsigned long vmm_thread_index_t;
55e303ae
A
124#define vmmTInum 0x000000FF
125#define vmmTIadsp 0x0000FF00
126typedef unsigned long vmm_adsp_id_t;
0b4e3aa0 127
1c79356b 128enum {
0b4e3aa0 129 kVmmCurMajorVersion = 0x0001,
91447636 130 kVmmCurMinorVersion = 0x0007,
0b4e3aa0 131 kVmmMinMajorVersion = 0x0001,
1c79356b 132};
0b4e3aa0 133#define kVmmCurrentVersion ((kVmmCurMajorVersion << 16) | kVmmCurMinorVersion)
1c79356b
A
134
135typedef unsigned long vmm_features_t;
136enum {
0b4e3aa0
A
137 kVmmFeature_LittleEndian = 0x00000001,
138 kVmmFeature_Stop = 0x00000002,
139 kVmmFeature_ExtendedMapping = 0x00000004,
9bccf70c 140 kVmmFeature_ListMapping = 0x00000008,
d7e50217 141 kVmmFeature_FastAssist = 0x00000010,
55e303ae
A
142 kVmmFeature_XA = 0x00000020,
143 kVmmFeature_SixtyFourBit = 0x00000040,
144 kVmmFeature_MultAddrSpace = 0x00000080,
91447636
A
145 kVmmFeature_GuestShadowAssist = 0x00000100, /* Guest->physical shadow hash table */
146 kVmmFeature_GlobalMappingAssist = 0x00000200, /* Global shadow mapping support */
147 kVmmFeature_HostShadowAssist = 0x00000400, /* Linear shadow mapping of an area of
148 host virtual as guest physical */
149 kVmmFeature_MultAddrSpaceAssist = 0x00000800, /* Expanded pool of guest virtual
150 address spaces */
55e303ae
A
151};
152#define kVmmCurrentFeatures (kVmmFeature_LittleEndian | kVmmFeature_Stop | kVmmFeature_ExtendedMapping \
91447636
A
153 | kVmmFeature_ListMapping | kVmmFeature_FastAssist | kVmmFeature_XA \
154 | kVmmFeature_GuestShadowAssist)
55e303ae
A
155
156enum {
91447636
A
157 vmm64Bit = 0x80000000, /* Make guest 64-bit */
158 vmmGSA = 0x40000000, /* Enable guest shadow assist (GSA) */
159 vmmGMA = 0x20000000, /* Enable global shadow mapping assist (GMA) */
1c79356b 160};
d7e50217 161
91447636 162#define kVmmSupportedSetXA (vmm64Bit | vmmGSA | vmmGMA)
1c79356b
A
163
164typedef unsigned long vmm_version_t;
165
55e303ae
A
166typedef struct vmm_ret_parms32_t {
167 unsigned long return_params[4];
168} vmm_ret_parms32_t;
169
170typedef struct vmm_ret_parms64_t {
171 unsigned long long return_params[4];
172} vmm_ret_parms64_t;
173
174#pragma pack(4) /* Make sure the structure stays as we defined it */
175typedef union vmm_ret_parms_t {
176 vmm_ret_parms64_t vmmrp64; /* 64-bit flavor */
177 vmm_ret_parms32_t vmmrp32; /* 32-bit flavor */
178 unsigned int retgas[11]; /* Force this to be 11 words long */
179} vmm_ret_parms_t;
180#pragma pack()
181
182#pragma pack(4) /* Make sure the structure stays as we defined it */
183typedef struct vmm_fastassist_state32_t {
d7e50217
A
184 unsigned long fastassist_dispatch;
185 unsigned long fastassist_refcon;
186
187 unsigned long fastassist_dispatch_code;
188 unsigned long fastassist_parameter[5];
189
190 unsigned long guest_register[8];
191
192 unsigned long guest_pc;
193 unsigned long guest_msr;
194
195 unsigned long fastassist_intercepts;
196 unsigned long fastassist_reserved1;
55e303ae
A
197} vmm_fastassist_state32_t;
198
199typedef struct vmm_fastassist_state64_t {
200 unsigned long long fastassist_dispatch;
201 unsigned long long fastassist_refcon;
202
203 unsigned long long fastassist_dispatch_code;
204 unsigned long long fastassist_parameter[5];
205
206 unsigned long long guest_register[8];
207
208 unsigned long long guest_pc;
209 unsigned long long guest_msr;
210
211 unsigned long fastassist_intercepts;
212 unsigned long fastassist_reserved1;
213} vmm_fastassist_state64_t;
214
215typedef union vmm_fastassist_state_t {
216 vmm_fastassist_state64_t vmmfs64; /* 64-bit flavor */
217 vmm_fastassist_state32_t vmmfs32; /* 32-bit flavor */
d7e50217 218} vmm_fastassist_state_t;
55e303ae 219#pragma pack()
d7e50217 220
55e303ae 221#pragma pack(4) /* Make sure the structure stays as we defined it */
1c79356b
A
222typedef struct vmm_state_page_t {
223 /* This structure must remain below 4Kb (one page) in size */
224 vmm_version_t interface_version;
225 vmm_thread_index_t thread_index;
226 unsigned int vmmStat; /* Note: this field is identical to vmmFlags in vmmCntrlEntry */
227 unsigned int vmmCntrl;
228#define vmmFloatLoad 0x80000000
229#define vmmFloatLoadb 0
230#define vmmVectLoad 0x40000000
231#define vmmVectLoadb 1
232#define vmmVectVRall 0x20000000
233#define vmmVectVRallb 2
234#define vmmVectVAss 0x10000000
235#define vmmVectVAssb 3
0b4e3aa0
A
236#define vmmXStart 0x08000000
237#define vmmXStartb 4
238#define vmmKey 0x04000000
239#define vmmKeyb 5
d7e50217
A
240#define vmmFamEna 0x02000000
241#define vmmFamEnab 6
242#define vmmFamSet 0x01000000
243#define vmmFamSetb 7
244
1c79356b 245 vmm_return_code_t return_code;
55e303ae 246 vmm_ret_parms_t vmmRet;
1c79356b
A
247
248 /* The next portion of the structure must remain 32-byte aligned */
249 vmm_processor_state_t vmm_proc_state;
250
d7e50217
A
251 /* The next portion of the structure must remain 16-byte aligned */
252 vmm_fastassist_state_t vmm_fastassist_state;
253
1c79356b 254} vmm_state_page_t;
55e303ae 255#pragma pack()
1c79356b 256
55e303ae 257#pragma pack(4) /* Make sure the structure stays as we defined it */
9bccf70c
A
258typedef struct vmm_comm_page_t {
259 union {
260 vmm_state_page_t vmcpState; /* Reserve area for state */
261 unsigned int vmcpPad[768]; /* Reserve space for 3/4 page state area */
262 } vmcpfirst;
263 unsigned int vmcpComm[256]; /* Define last 1024 bytes as a communications area - function specific */
264} vmm_comm_page_t;
55e303ae 265#pragma pack()
9bccf70c 266
1c79356b
A
267enum {
268 /* Function Indices (passed in r3) */
55e303ae
A
269 kVmmGetVersion = 0, /* Get VMM system version */
270 kVmmvGetFeatures, /* Get VMM supported features */
271 kVmmInitContext, /* Initialize a context */
272 kVmmTearDownContext, /* Destroy a context */
273 kVmmTearDownAll, /* Destory all contexts */
274 kVmmMapPage, /* Map a host to guest address space */
275 kVmmGetPageMapping, /* Get host address of a guest page */
276 kVmmUnmapPage, /* Unmap a guest page */
277 kVmmUnmapAllPages, /* Unmap all pages in a guest address space */
278 kVmmGetPageDirtyFlag, /* Check if guest page modified */
279 kVmmGetFloatState, /* Retrieve guest floating point context */
280 kVmmGetVectorState, /* Retrieve guest vector context */
281 kVmmSetTimer, /* Set a guest timer */
282 kVmmGetTimer, /* Get a guest timer */
283 kVmmExecuteVM, /* Launch a guest */
284 kVmmProtectPage, /* Set protection attributes for a guest page */
285 kVmmMapExecute, /* Map guest page and launch */
286 kVmmProtectExecute, /* Set prot attributes and launch */
287 kVmmMapList, /* Map a list of pages into guest address spaces */
288 kVmmUnmapList, /* Unmap a list of pages from guest address spaces */
91447636
A
289 kvmmExitToHost, /* Exit from FAM to host -- fast-path syscall */
290 kvmmResumeGuest, /* Resume guest from FAM -- fast-path syscall */
291 kvmmGetGuestRegister, /* Get guest register from FAM -- fast-path syscall */
292 kvmmSetGuestRegister, /* Set guest register from FAM -- fast-path syscall */
55e303ae 293
91447636
A
294 kVmmActivateXA, /* Activate extended architecture features for a VM */
295 kVmmDeactivateXA, /* Deactivate extended architecture features for a VM */
55e303ae
A
296 kVmmGetXA, /* Get extended architecture features from a VM */
297
298 kVmmMapPage64, /* Map a host to guest address space - supports 64-bit */
299 kVmmGetPageMapping64, /* Get host address of a guest page - supports 64-bit */
300 kVmmUnmapPage64, /* Unmap a guest page - supports 64-bit */
301 kVmmGetPageDirtyFlag64, /* Check if guest page modified - supports 64-bit */
302 kVmmProtectPage64, /* Set protection attributes for a guest page - supports 64-bit */
303 kVmmMapExecute64, /* Map guest page and launch - supports 64-bit */
304 kVmmProtectExecute64, /* Set prot attributes and launch - supports 64-bit */
305 kVmmMapList64, /* Map a list of pages into guest address spaces - supports 64-bit */
306 kVmmUnmapList64, /* Unmap a list of pages from guest address spaces - supports 64-bit */
307 kVmmMaxAddr, /* Returns the maximum virtual address that is mappable */
91447636
A
308
309 kVmmSetGuestMemory, /* Sets base and extent of guest physical memory in host address space */
310 kVmmPurgeLocal, /* Purges all non-global mappings for a given guest address space */
1c79356b
A
311};
312
313#define kVmmReturnNull 0
314#define kVmmBogusContext 1
0b4e3aa0 315#define kVmmStopped 2
1c79356b
A
316#define kVmmReturnDataPageFault 3
317#define kVmmReturnInstrPageFault 4
318#define kVmmReturnAlignmentFault 6
319#define kVmmReturnProgramException 7
320#define kVmmReturnSystemCall 12
321#define kVmmReturnTraceException 13
322#define kVmmAltivecAssist 22
55e303ae
A
323#define kVmmInvalidAddress 0x1000
324#define kVmmInvalidAdSpace 0x1001
325
326/*
327 * Notes on guest address spaces.
328 *
329 * Address spaces are loosely coupled to virtual machines. The default is for
330 * a guest with an index of 1 to use address space 1, 2 to use 2, etc. However,
331 * any guest may be launched using any address space and any address space may be the
332 * target for a map or unmap function. Note that the (un)map list functions may pass in
333 * an address space ID on a page-by-page basis.
334 *
335 * An address space is instantiated either explicitly by mapping something into it, or
336 * implicitly by launching a guest with it.
337 *
338 * An address space is destroyed explicitly by kVmmTearDownAll or kVmmUnmapAllPages. It is
339 * destroyed implicitly by kVmmTearDownContext. The latter is done in order to remain
340 * backwards compatible with the previous implementation, which does not have decoupled
341 * guests and address spaces.
342 *
343 * An address space supports the maximum virtual address supported by the processor.
344 * The 64-bit variant of the mapping functions can be used on non-64-bit machines. If an
345 * unmappable address (e.g., an address larger than 4GB-1 on a 32-bit machine) is requested,
346 * the operation fails with a kVmmInvalidAddress return code.
347 *
348 * Note that for 64-bit calls, both host and guest are specified at 64-bit values.
349 *
350 */
351
352
353
1c79356b 354
0b4e3aa0
A
355/*
356 * Storage Extended Protection modes
357 * Notes:
358 * To keep compatibility, vmmKey and the PPC key have reversed meanings,
359 * i.e., vmmKey 0 is PPC key 1 and vice versa.
360 *
361 * vmmKey Notes
362 * Mode 0 1
363 *
364 * kVmmProtNARW not accessible read/write VM_PROT_NONE (not settable via VM calls)
365 * kVmmProtRORW read only read/write
366 * kVmmProtRWRW read/write read/write VM_PROT_WRITE or (VM_PROT_WRITE | VM_PROT_READ)
367 * kVmmProtRORO read only read only VM_PROT_READ
368
369 */
370
371#define kVmmProtXtnd 0x00000008
372#define kVmmProtNARW (kVmmProtXtnd | 0x00000000)
373#define kVmmProtRORW (kVmmProtXtnd | 0x00000001)
374#define kVmmProtRWRW (kVmmProtXtnd | 0x00000002)
375#define kVmmProtRORO (kVmmProtXtnd | 0x00000003)
1c79356b 376
9bccf70c 377/*
55e303ae
A
378 * Map list formats
379 * The last 12 bits in the guest virtual address is used as flags as follows:
380 * 0x007 - for the map calls, this is the key to set
381 * 0x3F0 - for both map and unmap, this is the address space ID upon which to operate.
382 * Note that if 0, the address space ID from the function call is used instead.
9bccf70c
A
383 */
384
55e303ae
A
385typedef struct vmmMList {
386 unsigned int vmlva; /* Virtual address in host address space */
387 unsigned int vmlava; /* Virtual address in guest address space */
388} vmmMList;
389
390typedef struct vmmMList64 {
391 unsigned long long vmlva; /* Virtual address in host address space */
392 unsigned long long vmlava; /* Virtual address in guest address space */
393} vmmMList64;
394
395typedef struct vmmUMList {
396 unsigned int vmlava; /* Virtual address in guest address space */
397} vmmUMList;
398
399typedef struct vmmUMList64 {
400 unsigned long long vmlava; /* Virtual address in guest address space */
401} vmmUMList64;
9bccf70c 402
55e303ae
A
403#define vmmlFlgs 0x00000FFF /* Flags passed in in vmlava low order 12 bits */
404#define vmmlProt 0x00000007 /* Protection flags for the page */
405#define vmmlAdID 0x000003F0 /* Guest address space ID - used only if non-zero */
91447636
A
406#define vmmlGlob 0x00000400 /* Mapping is global */
407#define vmmlRsvd 0x00000800 /* Reserved for future */
9bccf70c 408
1c79356b
A
409/*************************************************************************************
410 Internal Emulation Types
411**************************************************************************************/
412
55e303ae 413#define kVmmMaxContexts 32
9bccf70c
A
414#define kVmmMaxUnmapPages 64
415#define kVmmMaxMapPages 64
1c79356b 416
55e303ae 417#pragma pack(4) /* Make sure the structure stays as we defined it */
1c79356b
A
418typedef struct vmmCntrlEntry { /* Virtual Machine Monitor control table entry */
419 unsigned int vmmFlags; /* Assorted control flags */
420#define vmmInUse 0x80000000
421#define vmmInUseb 0
422#define vmmFloatCngd 0x40000000
423#define vmmFloatCngdb 1
424#define vmmVectCngd 0x20000000
425#define vmmVectCngdb 2
426#define vmmTimerPop 0x10000000
427#define vmmTimerPopb 3
d7e50217
A
428#define vmmFAMmode 0x04000000
429#define vmmFAMmodeb 5
0b4e3aa0
A
430#define vmmXStop 0x00800000
431#define vmmXStopb 8
1c79356b
A
432#define vmmSpfSave 0x000000FF
433#define vmmSpfSaveb 24
55e303ae 434 unsigned int vmmXAFlgs; /* Extended Architecture flags */
1c79356b 435 vmm_state_page_t *vmmContextKern; /* Kernel address of context communications area */
91447636 436 ppnum_t vmmContextPhys; /* Physical address of context communications area */
1c79356b 437 vmm_state_page_t *vmmContextUser; /* User address of context communications area */
9bccf70c 438 facility_context vmmFacCtx; /* Header for vector and floating point contexts */
55e303ae 439 pmap_t vmmPmap; /* Last dispatched pmap */
0b4e3aa0 440 uint64_t vmmTimer; /* Last set timer value. Zero means unset */
d7e50217 441 unsigned int vmmFAMintercept; /* FAM intercepted exceptions */
1c79356b 442} vmmCntrlEntry;
55e303ae 443#pragma pack()
1c79356b 444
55e303ae 445#pragma pack(4) /* Make sure the structure stays as we defined it */
1c79356b 446typedef struct vmmCntrlTable { /* Virtual Machine Monitor Control table */
55e303ae
A
447 unsigned int vmmGFlags; /* Global flags */
448#define vmmLastAdSp 0xFF /* Remember the address space that was mapped last */
449 addr64_t vmmLastMap; /* Last vaddr mapping made */
450 vmmCntrlEntry vmmc[kVmmMaxContexts]; /* One entry for each possible Virtual Machine Monitor context */
451 pmap_t vmmAdsp[kVmmMaxContexts]; /* Guest address space pmaps */
1c79356b 452} vmmCntrlTable;
55e303ae 453#pragma pack()
1c79356b
A
454
455/* function decls for kernel level routines... */
91447636
A
456extern void vmm_execute_vm(thread_t act, vmm_thread_index_t index);
457extern kern_return_t vmm_tear_down_context(thread_t act, vmm_thread_index_t index);
458extern kern_return_t vmm_get_float_state(thread_t act, vmm_thread_index_t index);
459extern kern_return_t vmm_get_vector_state(thread_t act, vmm_thread_index_t index);
460extern kern_return_t vmm_set_timer(thread_t act, vmm_thread_index_t index, unsigned int timerhi, unsigned int timerlo);
461extern kern_return_t vmm_get_timer(thread_t act, vmm_thread_index_t index);
462extern void vmm_tear_down_all(thread_t act);
463extern kern_return_t vmm_map_page(thread_t act, vmm_thread_index_t hindex, addr64_t cva,
55e303ae 464 addr64_t ava, vm_prot_t prot);
91447636 465extern vmm_return_code_t vmm_map_execute(thread_t act, vmm_thread_index_t hindex, addr64_t cva,
55e303ae 466 addr64_t ava, vm_prot_t prot);
91447636 467extern kern_return_t vmm_protect_page(thread_t act, vmm_thread_index_t hindex, addr64_t va,
0b4e3aa0 468 vm_prot_t prot);
91447636 469extern vmm_return_code_t vmm_protect_execute(thread_t act, vmm_thread_index_t hindex, addr64_t va,
0b4e3aa0 470 vm_prot_t prot);
91447636 471extern addr64_t vmm_get_page_mapping(thread_t act, vmm_thread_index_t index,
55e303ae 472 addr64_t va);
91447636
A
473extern kern_return_t vmm_unmap_page(thread_t act, vmm_thread_index_t index, addr64_t va);
474extern void vmm_unmap_all_pages(thread_t act, vmm_thread_index_t index);
475extern boolean_t vmm_get_page_dirty_flag(thread_t act, vmm_thread_index_t index,
55e303ae 476 addr64_t va, unsigned int reset);
91447636
A
477extern kern_return_t vmm_activate_XA(thread_t act, vmm_thread_index_t index, unsigned int xaflags);
478extern kern_return_t vmm_deactivate_XA(thread_t act, vmm_thread_index_t index, unsigned int xaflags);
479extern unsigned int vmm_get_XA(thread_t act, vmm_thread_index_t index);
1c79356b
A
480extern int vmm_get_features(struct savearea *);
481extern int vmm_get_version(struct savearea *);
482extern int vmm_init_context(struct savearea *);
483extern int vmm_dispatch(struct savearea *);
91447636
A
484extern int vmm_exit(thread_t act, struct savearea *);
485extern void vmm_force_exit(thread_t act, struct savearea *);
0b4e3aa0 486extern int vmm_stop_vm(struct savearea *save);
91447636
A
487extern void vmm_timer_pop(thread_t act);
488extern void vmm_interrupt(ReturnHandler *rh, thread_t act);
489extern kern_return_t vmm_map_list(thread_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor);
490extern kern_return_t vmm_unmap_list(thread_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor);
d7e50217
A
491extern vmm_return_code_t vmm_resume_guest(vmm_thread_index_t index, unsigned long pc,
492 unsigned long vmmCntrl, unsigned long vmmCntrMaskl);
493extern vmm_return_code_t vmm_exit_to_host(vmm_thread_index_t index);
494extern unsigned long vmm_get_guest_register(vmm_thread_index_t index, unsigned long reg_index);
495extern vmm_return_code_t vmm_set_guest_register(vmm_thread_index_t index, unsigned long reg_index, unsigned long reg_value);
91447636
A
496extern addr64_t vmm_max_addr(thread_t act);
497extern kern_return_t vmm_set_guest_memory(thread_t act, vmm_thread_index_t index, addr64_t base, addr64_t extent);
498extern kern_return_t vmm_purge_local(thread_t act, vmm_thread_index_t index);
1c79356b
A
499
500#endif