]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/vmachmon.h
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.h
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
1c79356b 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
1c79356b
A
21 */
22/*-----------------------------------------------------------------------
23** vmachmon.h
24**
25** C routines that we are adding to the MacOS X kernel.
26**
1c79356b
A
27-----------------------------------------------------------------------*/
28
29#include <ppc/exception.h>
30
31#ifndef _VEMULATION_H_
32#define _VEMULATION_H_
33
34/*************************************************************************************
35 External Emulation Types
36**************************************************************************************/
37
38typedef union vmm_vector_register_t {
39 unsigned long i[4];
40 unsigned short s[8];
41 unsigned char b[16];
42} vmm_vector_register_t;
43
44typedef union vmm_fp_register_t {
45 double d;
46 unsigned long i[2];
47 unsigned short s[4];
48 unsigned char b[8];
49} vmm_fp_register_t;
50
9bccf70c 51
55e303ae 52typedef struct vmm_regs32_t {
d7e50217 53
55e303ae
A
54 unsigned long ppcPC; /* 000 */
55 unsigned long ppcMSR; /* 004 */
d7e50217 56
55e303ae
A
57 unsigned long ppcGPRs[32]; /* 008 */
58
59 unsigned long ppcCR; /* 088 */
60 unsigned long ppcXER; /* 08C */
61 unsigned long ppcLR; /* 090 */
62 unsigned long ppcCTR; /* 094 */
63 unsigned long ppcMQ; /* 098 - Obsolete */
64 unsigned long ppcVRSave; /* 09C */
65 unsigned long ppcRsrvd0A0[40]; /* 0A0 */
66 /* 140 */
67} vmm_regs32_t;
68
69#pragma pack(4) /* Make sure the structure stays as we defined it */
70typedef struct vmm_regs64_t {
71
72 unsigned long long ppcPC; /* 000 */
73 unsigned long long ppcMSR; /* 008 */
74
75 unsigned long long ppcGPRs[32]; /* 010 */
76
77 unsigned long long ppcXER; /* 110 */
78 unsigned long long ppcLR; /* 118 */
79 unsigned long long ppcCTR; /* 120 */
80 unsigned long ppcCR; /* 128 */
81 unsigned long ppcVRSave; /* 12C */
82 unsigned long ppcRsvd130[4]; /* 130 */
83 /* 140 */
84} vmm_regs64_t;
85#pragma pack()
de355530 86
55e303ae
A
87
88#pragma pack(4) /* Make sure the structure stays as we defined it */
89typedef union vmm_regs_t {
90 vmm_regs32_t ppcRegs32;
91 vmm_regs64_t ppcRegs64;
92} vmm_regs_t;
93#pragma pack()
94
95#pragma pack(4) /* Make sure the structure stays as we defined it */
96typedef struct vmm_processor_state_t {
97 /* 32-byte bndry */
98 vmm_regs_t ppcRegs; /* Define registers areas */
1c79356b
A
99
100/* We must be 16-byte aligned here */
101
55e303ae
A
102 vmm_vector_register_t ppcVRs[32]; /* These are only valid after a kVmmGetVectorState */
103 vmm_vector_register_t ppcVSCR; /* This is always loaded/saved at host/guest transition */
1c79356b
A
104
105/* We must be 8-byte aligned here */
106
55e303ae
A
107 vmm_fp_register_t ppcFPRs[32]; /* These are only valid after a kVmmGetFloatState */
108 vmm_fp_register_t ppcFPSCR; /* This is always loaded/saved at host/guest transition */
1c79356b
A
109 unsigned long ppcReserved2[2]; /* Pad out to multiple of 16 bytes */
110} vmm_processor_state_t;
55e303ae 111#pragma pack()
1c79356b
A
112
113typedef unsigned long vmm_return_code_t;
114
115typedef unsigned long vmm_thread_index_t;
55e303ae
A
116#define vmmTInum 0x000000FF
117#define vmmTIadsp 0x0000FF00
118typedef unsigned long vmm_adsp_id_t;
0b4e3aa0 119
1c79356b 120enum {
0b4e3aa0 121 kVmmCurMajorVersion = 0x0001,
91447636 122 kVmmCurMinorVersion = 0x0007,
0b4e3aa0 123 kVmmMinMajorVersion = 0x0001,
1c79356b 124};
0b4e3aa0 125#define kVmmCurrentVersion ((kVmmCurMajorVersion << 16) | kVmmCurMinorVersion)
1c79356b
A
126
127typedef unsigned long vmm_features_t;
128enum {
0b4e3aa0
A
129 kVmmFeature_LittleEndian = 0x00000001,
130 kVmmFeature_Stop = 0x00000002,
131 kVmmFeature_ExtendedMapping = 0x00000004,
9bccf70c 132 kVmmFeature_ListMapping = 0x00000008,
d7e50217 133 kVmmFeature_FastAssist = 0x00000010,
55e303ae
A
134 kVmmFeature_XA = 0x00000020,
135 kVmmFeature_SixtyFourBit = 0x00000040,
136 kVmmFeature_MultAddrSpace = 0x00000080,
91447636
A
137 kVmmFeature_GuestShadowAssist = 0x00000100, /* Guest->physical shadow hash table */
138 kVmmFeature_GlobalMappingAssist = 0x00000200, /* Global shadow mapping support */
139 kVmmFeature_HostShadowAssist = 0x00000400, /* Linear shadow mapping of an area of
140 host virtual as guest physical */
141 kVmmFeature_MultAddrSpaceAssist = 0x00000800, /* Expanded pool of guest virtual
142 address spaces */
55e303ae
A
143};
144#define kVmmCurrentFeatures (kVmmFeature_LittleEndian | kVmmFeature_Stop | kVmmFeature_ExtendedMapping \
91447636
A
145 | kVmmFeature_ListMapping | kVmmFeature_FastAssist | kVmmFeature_XA \
146 | kVmmFeature_GuestShadowAssist)
55e303ae
A
147
148enum {
91447636
A
149 vmm64Bit = 0x80000000, /* Make guest 64-bit */
150 vmmGSA = 0x40000000, /* Enable guest shadow assist (GSA) */
151 vmmGMA = 0x20000000, /* Enable global shadow mapping assist (GMA) */
1c79356b 152};
d7e50217 153
91447636 154#define kVmmSupportedSetXA (vmm64Bit | vmmGSA | vmmGMA)
1c79356b
A
155
156typedef unsigned long vmm_version_t;
157
55e303ae
A
158typedef struct vmm_ret_parms32_t {
159 unsigned long return_params[4];
160} vmm_ret_parms32_t;
161
162typedef struct vmm_ret_parms64_t {
163 unsigned long long return_params[4];
164} vmm_ret_parms64_t;
165
166#pragma pack(4) /* Make sure the structure stays as we defined it */
167typedef union vmm_ret_parms_t {
168 vmm_ret_parms64_t vmmrp64; /* 64-bit flavor */
169 vmm_ret_parms32_t vmmrp32; /* 32-bit flavor */
170 unsigned int retgas[11]; /* Force this to be 11 words long */
171} vmm_ret_parms_t;
172#pragma pack()
173
174#pragma pack(4) /* Make sure the structure stays as we defined it */
175typedef struct vmm_fastassist_state32_t {
d7e50217
A
176 unsigned long fastassist_dispatch;
177 unsigned long fastassist_refcon;
178
179 unsigned long fastassist_dispatch_code;
180 unsigned long fastassist_parameter[5];
181
182 unsigned long guest_register[8];
183
184 unsigned long guest_pc;
185 unsigned long guest_msr;
186
187 unsigned long fastassist_intercepts;
188 unsigned long fastassist_reserved1;
55e303ae
A
189} vmm_fastassist_state32_t;
190
191typedef struct vmm_fastassist_state64_t {
192 unsigned long long fastassist_dispatch;
193 unsigned long long fastassist_refcon;
194
195 unsigned long long fastassist_dispatch_code;
196 unsigned long long fastassist_parameter[5];
197
198 unsigned long long guest_register[8];
199
200 unsigned long long guest_pc;
201 unsigned long long guest_msr;
202
203 unsigned long fastassist_intercepts;
204 unsigned long fastassist_reserved1;
205} vmm_fastassist_state64_t;
206
207typedef union vmm_fastassist_state_t {
208 vmm_fastassist_state64_t vmmfs64; /* 64-bit flavor */
209 vmm_fastassist_state32_t vmmfs32; /* 32-bit flavor */
d7e50217 210} vmm_fastassist_state_t;
55e303ae 211#pragma pack()
d7e50217 212
55e303ae 213#pragma pack(4) /* Make sure the structure stays as we defined it */
1c79356b
A
214typedef struct vmm_state_page_t {
215 /* This structure must remain below 4Kb (one page) in size */
216 vmm_version_t interface_version;
217 vmm_thread_index_t thread_index;
218 unsigned int vmmStat; /* Note: this field is identical to vmmFlags in vmmCntrlEntry */
219 unsigned int vmmCntrl;
220#define vmmFloatLoad 0x80000000
221#define vmmFloatLoadb 0
222#define vmmVectLoad 0x40000000
223#define vmmVectLoadb 1
224#define vmmVectVRall 0x20000000
225#define vmmVectVRallb 2
226#define vmmVectVAss 0x10000000
227#define vmmVectVAssb 3
0b4e3aa0
A
228#define vmmXStart 0x08000000
229#define vmmXStartb 4
230#define vmmKey 0x04000000
231#define vmmKeyb 5
d7e50217
A
232#define vmmFamEna 0x02000000
233#define vmmFamEnab 6
234#define vmmFamSet 0x01000000
235#define vmmFamSetb 7
236
1c79356b 237 vmm_return_code_t return_code;
55e303ae 238 vmm_ret_parms_t vmmRet;
1c79356b
A
239
240 /* The next portion of the structure must remain 32-byte aligned */
241 vmm_processor_state_t vmm_proc_state;
242
d7e50217
A
243 /* The next portion of the structure must remain 16-byte aligned */
244 vmm_fastassist_state_t vmm_fastassist_state;
245
1c79356b 246} vmm_state_page_t;
55e303ae 247#pragma pack()
1c79356b 248
55e303ae 249#pragma pack(4) /* Make sure the structure stays as we defined it */
9bccf70c
A
250typedef struct vmm_comm_page_t {
251 union {
252 vmm_state_page_t vmcpState; /* Reserve area for state */
253 unsigned int vmcpPad[768]; /* Reserve space for 3/4 page state area */
254 } vmcpfirst;
255 unsigned int vmcpComm[256]; /* Define last 1024 bytes as a communications area - function specific */
256} vmm_comm_page_t;
55e303ae 257#pragma pack()
9bccf70c 258
1c79356b
A
259enum {
260 /* Function Indices (passed in r3) */
55e303ae
A
261 kVmmGetVersion = 0, /* Get VMM system version */
262 kVmmvGetFeatures, /* Get VMM supported features */
263 kVmmInitContext, /* Initialize a context */
264 kVmmTearDownContext, /* Destroy a context */
265 kVmmTearDownAll, /* Destory all contexts */
266 kVmmMapPage, /* Map a host to guest address space */
267 kVmmGetPageMapping, /* Get host address of a guest page */
268 kVmmUnmapPage, /* Unmap a guest page */
269 kVmmUnmapAllPages, /* Unmap all pages in a guest address space */
270 kVmmGetPageDirtyFlag, /* Check if guest page modified */
271 kVmmGetFloatState, /* Retrieve guest floating point context */
272 kVmmGetVectorState, /* Retrieve guest vector context */
273 kVmmSetTimer, /* Set a guest timer */
274 kVmmGetTimer, /* Get a guest timer */
275 kVmmExecuteVM, /* Launch a guest */
276 kVmmProtectPage, /* Set protection attributes for a guest page */
277 kVmmMapExecute, /* Map guest page and launch */
278 kVmmProtectExecute, /* Set prot attributes and launch */
279 kVmmMapList, /* Map a list of pages into guest address spaces */
280 kVmmUnmapList, /* Unmap a list of pages from guest address spaces */
91447636
A
281 kvmmExitToHost, /* Exit from FAM to host -- fast-path syscall */
282 kvmmResumeGuest, /* Resume guest from FAM -- fast-path syscall */
283 kvmmGetGuestRegister, /* Get guest register from FAM -- fast-path syscall */
284 kvmmSetGuestRegister, /* Set guest register from FAM -- fast-path syscall */
55e303ae 285
91447636
A
286 kVmmActivateXA, /* Activate extended architecture features for a VM */
287 kVmmDeactivateXA, /* Deactivate extended architecture features for a VM */
55e303ae
A
288 kVmmGetXA, /* Get extended architecture features from a VM */
289
290 kVmmMapPage64, /* Map a host to guest address space - supports 64-bit */
291 kVmmGetPageMapping64, /* Get host address of a guest page - supports 64-bit */
292 kVmmUnmapPage64, /* Unmap a guest page - supports 64-bit */
293 kVmmGetPageDirtyFlag64, /* Check if guest page modified - supports 64-bit */
294 kVmmProtectPage64, /* Set protection attributes for a guest page - supports 64-bit */
295 kVmmMapExecute64, /* Map guest page and launch - supports 64-bit */
296 kVmmProtectExecute64, /* Set prot attributes and launch - supports 64-bit */
297 kVmmMapList64, /* Map a list of pages into guest address spaces - supports 64-bit */
298 kVmmUnmapList64, /* Unmap a list of pages from guest address spaces - supports 64-bit */
299 kVmmMaxAddr, /* Returns the maximum virtual address that is mappable */
91447636
A
300
301 kVmmSetGuestMemory, /* Sets base and extent of guest physical memory in host address space */
302 kVmmPurgeLocal, /* Purges all non-global mappings for a given guest address space */
1c79356b
A
303};
304
305#define kVmmReturnNull 0
306#define kVmmBogusContext 1
0b4e3aa0 307#define kVmmStopped 2
1c79356b
A
308#define kVmmReturnDataPageFault 3
309#define kVmmReturnInstrPageFault 4
310#define kVmmReturnAlignmentFault 6
311#define kVmmReturnProgramException 7
312#define kVmmReturnSystemCall 12
313#define kVmmReturnTraceException 13
314#define kVmmAltivecAssist 22
55e303ae
A
315#define kVmmInvalidAddress 0x1000
316#define kVmmInvalidAdSpace 0x1001
317
318/*
319 * Notes on guest address spaces.
320 *
321 * Address spaces are loosely coupled to virtual machines. The default is for
322 * a guest with an index of 1 to use address space 1, 2 to use 2, etc. However,
323 * any guest may be launched using any address space and any address space may be the
324 * target for a map or unmap function. Note that the (un)map list functions may pass in
325 * an address space ID on a page-by-page basis.
326 *
327 * An address space is instantiated either explicitly by mapping something into it, or
328 * implicitly by launching a guest with it.
329 *
330 * An address space is destroyed explicitly by kVmmTearDownAll or kVmmUnmapAllPages. It is
331 * destroyed implicitly by kVmmTearDownContext. The latter is done in order to remain
332 * backwards compatible with the previous implementation, which does not have decoupled
333 * guests and address spaces.
334 *
335 * An address space supports the maximum virtual address supported by the processor.
336 * The 64-bit variant of the mapping functions can be used on non-64-bit machines. If an
337 * unmappable address (e.g., an address larger than 4GB-1 on a 32-bit machine) is requested,
338 * the operation fails with a kVmmInvalidAddress return code.
339 *
340 * Note that for 64-bit calls, both host and guest are specified at 64-bit values.
341 *
342 */
343
344
345
1c79356b 346
0b4e3aa0
A
347/*
348 * Storage Extended Protection modes
349 * Notes:
350 * To keep compatibility, vmmKey and the PPC key have reversed meanings,
351 * i.e., vmmKey 0 is PPC key 1 and vice versa.
352 *
353 * vmmKey Notes
354 * Mode 0 1
355 *
356 * kVmmProtNARW not accessible read/write VM_PROT_NONE (not settable via VM calls)
357 * kVmmProtRORW read only read/write
358 * kVmmProtRWRW read/write read/write VM_PROT_WRITE or (VM_PROT_WRITE | VM_PROT_READ)
359 * kVmmProtRORO read only read only VM_PROT_READ
360
361 */
362
363#define kVmmProtXtnd 0x00000008
364#define kVmmProtNARW (kVmmProtXtnd | 0x00000000)
365#define kVmmProtRORW (kVmmProtXtnd | 0x00000001)
366#define kVmmProtRWRW (kVmmProtXtnd | 0x00000002)
367#define kVmmProtRORO (kVmmProtXtnd | 0x00000003)
1c79356b 368
9bccf70c 369/*
55e303ae
A
370 * Map list formats
371 * The last 12 bits in the guest virtual address is used as flags as follows:
372 * 0x007 - for the map calls, this is the key to set
373 * 0x3F0 - for both map and unmap, this is the address space ID upon which to operate.
374 * Note that if 0, the address space ID from the function call is used instead.
9bccf70c
A
375 */
376
55e303ae
A
377typedef struct vmmMList {
378 unsigned int vmlva; /* Virtual address in host address space */
379 unsigned int vmlava; /* Virtual address in guest address space */
380} vmmMList;
381
382typedef struct vmmMList64 {
383 unsigned long long vmlva; /* Virtual address in host address space */
384 unsigned long long vmlava; /* Virtual address in guest address space */
385} vmmMList64;
386
387typedef struct vmmUMList {
388 unsigned int vmlava; /* Virtual address in guest address space */
389} vmmUMList;
390
391typedef struct vmmUMList64 {
392 unsigned long long vmlava; /* Virtual address in guest address space */
393} vmmUMList64;
9bccf70c 394
55e303ae
A
395#define vmmlFlgs 0x00000FFF /* Flags passed in in vmlava low order 12 bits */
396#define vmmlProt 0x00000007 /* Protection flags for the page */
397#define vmmlAdID 0x000003F0 /* Guest address space ID - used only if non-zero */
91447636
A
398#define vmmlGlob 0x00000400 /* Mapping is global */
399#define vmmlRsvd 0x00000800 /* Reserved for future */
9bccf70c 400
1c79356b
A
401/*************************************************************************************
402 Internal Emulation Types
403**************************************************************************************/
404
55e303ae 405#define kVmmMaxContexts 32
9bccf70c
A
406#define kVmmMaxUnmapPages 64
407#define kVmmMaxMapPages 64
1c79356b 408
55e303ae 409#pragma pack(4) /* Make sure the structure stays as we defined it */
1c79356b
A
410typedef struct vmmCntrlEntry { /* Virtual Machine Monitor control table entry */
411 unsigned int vmmFlags; /* Assorted control flags */
412#define vmmInUse 0x80000000
413#define vmmInUseb 0
414#define vmmFloatCngd 0x40000000
415#define vmmFloatCngdb 1
416#define vmmVectCngd 0x20000000
417#define vmmVectCngdb 2
418#define vmmTimerPop 0x10000000
419#define vmmTimerPopb 3
d7e50217
A
420#define vmmFAMmode 0x04000000
421#define vmmFAMmodeb 5
0b4e3aa0
A
422#define vmmXStop 0x00800000
423#define vmmXStopb 8
1c79356b
A
424#define vmmSpfSave 0x000000FF
425#define vmmSpfSaveb 24
55e303ae 426 unsigned int vmmXAFlgs; /* Extended Architecture flags */
1c79356b 427 vmm_state_page_t *vmmContextKern; /* Kernel address of context communications area */
91447636 428 ppnum_t vmmContextPhys; /* Physical address of context communications area */
1c79356b 429 vmm_state_page_t *vmmContextUser; /* User address of context communications area */
9bccf70c 430 facility_context vmmFacCtx; /* Header for vector and floating point contexts */
55e303ae 431 pmap_t vmmPmap; /* Last dispatched pmap */
0b4e3aa0 432 uint64_t vmmTimer; /* Last set timer value. Zero means unset */
d7e50217 433 unsigned int vmmFAMintercept; /* FAM intercepted exceptions */
1c79356b 434} vmmCntrlEntry;
55e303ae 435#pragma pack()
1c79356b 436
55e303ae 437#pragma pack(4) /* Make sure the structure stays as we defined it */
1c79356b 438typedef struct vmmCntrlTable { /* Virtual Machine Monitor Control table */
55e303ae
A
439 unsigned int vmmGFlags; /* Global flags */
440#define vmmLastAdSp 0xFF /* Remember the address space that was mapped last */
441 addr64_t vmmLastMap; /* Last vaddr mapping made */
442 vmmCntrlEntry vmmc[kVmmMaxContexts]; /* One entry for each possible Virtual Machine Monitor context */
443 pmap_t vmmAdsp[kVmmMaxContexts]; /* Guest address space pmaps */
1c79356b 444} vmmCntrlTable;
55e303ae 445#pragma pack()
1c79356b
A
446
447/* function decls for kernel level routines... */
91447636
A
448extern void vmm_execute_vm(thread_t act, vmm_thread_index_t index);
449extern kern_return_t vmm_tear_down_context(thread_t act, vmm_thread_index_t index);
450extern kern_return_t vmm_get_float_state(thread_t act, vmm_thread_index_t index);
451extern kern_return_t vmm_get_vector_state(thread_t act, vmm_thread_index_t index);
452extern kern_return_t vmm_set_timer(thread_t act, vmm_thread_index_t index, unsigned int timerhi, unsigned int timerlo);
453extern kern_return_t vmm_get_timer(thread_t act, vmm_thread_index_t index);
454extern void vmm_tear_down_all(thread_t act);
455extern kern_return_t vmm_map_page(thread_t act, vmm_thread_index_t hindex, addr64_t cva,
55e303ae 456 addr64_t ava, vm_prot_t prot);
91447636 457extern vmm_return_code_t vmm_map_execute(thread_t act, vmm_thread_index_t hindex, addr64_t cva,
55e303ae 458 addr64_t ava, vm_prot_t prot);
91447636 459extern kern_return_t vmm_protect_page(thread_t act, vmm_thread_index_t hindex, addr64_t va,
0b4e3aa0 460 vm_prot_t prot);
91447636 461extern vmm_return_code_t vmm_protect_execute(thread_t act, vmm_thread_index_t hindex, addr64_t va,
0b4e3aa0 462 vm_prot_t prot);
91447636 463extern addr64_t vmm_get_page_mapping(thread_t act, vmm_thread_index_t index,
55e303ae 464 addr64_t va);
91447636
A
465extern kern_return_t vmm_unmap_page(thread_t act, vmm_thread_index_t index, addr64_t va);
466extern void vmm_unmap_all_pages(thread_t act, vmm_thread_index_t index);
467extern boolean_t vmm_get_page_dirty_flag(thread_t act, vmm_thread_index_t index,
55e303ae 468 addr64_t va, unsigned int reset);
91447636
A
469extern kern_return_t vmm_activate_XA(thread_t act, vmm_thread_index_t index, unsigned int xaflags);
470extern kern_return_t vmm_deactivate_XA(thread_t act, vmm_thread_index_t index, unsigned int xaflags);
471extern unsigned int vmm_get_XA(thread_t act, vmm_thread_index_t index);
1c79356b
A
472extern int vmm_get_features(struct savearea *);
473extern int vmm_get_version(struct savearea *);
474extern int vmm_init_context(struct savearea *);
475extern int vmm_dispatch(struct savearea *);
91447636
A
476extern int vmm_exit(thread_t act, struct savearea *);
477extern void vmm_force_exit(thread_t act, struct savearea *);
0b4e3aa0 478extern int vmm_stop_vm(struct savearea *save);
91447636
A
479extern void vmm_timer_pop(thread_t act);
480extern void vmm_interrupt(ReturnHandler *rh, thread_t act);
481extern kern_return_t vmm_map_list(thread_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor);
482extern kern_return_t vmm_unmap_list(thread_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor);
d7e50217
A
483extern vmm_return_code_t vmm_resume_guest(vmm_thread_index_t index, unsigned long pc,
484 unsigned long vmmCntrl, unsigned long vmmCntrMaskl);
485extern vmm_return_code_t vmm_exit_to_host(vmm_thread_index_t index);
486extern unsigned long vmm_get_guest_register(vmm_thread_index_t index, unsigned long reg_index);
487extern vmm_return_code_t vmm_set_guest_register(vmm_thread_index_t index, unsigned long reg_index, unsigned long reg_value);
91447636
A
488extern addr64_t vmm_max_addr(thread_t act);
489extern kern_return_t vmm_set_guest_memory(thread_t act, vmm_thread_index_t index, addr64_t base, addr64_t extent);
490extern kern_return_t vmm_purge_local(thread_t act, vmm_thread_index_t index);
1c79356b
A
491
492#endif