]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
22 | * | |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /*----------------------------------------------------------------------- | |
26 | ** vmachmon.h | |
27 | ** | |
28 | ** C routines that we are adding to the MacOS X kernel. | |
29 | ** | |
30 | -----------------------------------------------------------------------*/ | |
31 | ||
32 | #include <ppc/exception.h> | |
33 | ||
34 | #ifndef _VEMULATION_H_ | |
35 | #define _VEMULATION_H_ | |
36 | ||
37 | /************************************************************************************* | |
38 | External Emulation Types | |
39 | **************************************************************************************/ | |
40 | ||
41 | typedef union vmm_vector_register_t { | |
42 | unsigned long i[4]; | |
43 | unsigned short s[8]; | |
44 | unsigned char b[16]; | |
45 | } vmm_vector_register_t; | |
46 | ||
47 | typedef union vmm_fp_register_t { | |
48 | double d; | |
49 | unsigned long i[2]; | |
50 | unsigned short s[4]; | |
51 | unsigned char b[8]; | |
52 | } vmm_fp_register_t; | |
53 | ||
54 | ||
55 | typedef struct vmm_regs32_t { | |
56 | ||
57 | unsigned long ppcPC; /* 000 */ | |
58 | unsigned long ppcMSR; /* 004 */ | |
59 | ||
60 | unsigned long ppcGPRs[32]; /* 008 */ | |
61 | ||
62 | unsigned long ppcCR; /* 088 */ | |
63 | unsigned long ppcXER; /* 08C */ | |
64 | unsigned long ppcLR; /* 090 */ | |
65 | unsigned long ppcCTR; /* 094 */ | |
66 | unsigned long ppcMQ; /* 098 - Obsolete */ | |
67 | unsigned long ppcVRSave; /* 09C */ | |
68 | unsigned long ppcRsrvd0A0[40]; /* 0A0 */ | |
69 | /* 140 */ | |
70 | } vmm_regs32_t; | |
71 | ||
72 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
73 | typedef struct vmm_regs64_t { | |
74 | ||
75 | unsigned long long ppcPC; /* 000 */ | |
76 | unsigned long long ppcMSR; /* 008 */ | |
77 | ||
78 | unsigned long long ppcGPRs[32]; /* 010 */ | |
79 | ||
80 | unsigned long long ppcXER; /* 110 */ | |
81 | unsigned long long ppcLR; /* 118 */ | |
82 | unsigned long long ppcCTR; /* 120 */ | |
83 | unsigned long ppcCR; /* 128 */ | |
84 | unsigned long ppcVRSave; /* 12C */ | |
85 | unsigned long ppcRsvd130[4]; /* 130 */ | |
86 | /* 140 */ | |
87 | } vmm_regs64_t; | |
88 | #pragma pack() | |
89 | ||
90 | ||
91 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
92 | typedef union vmm_regs_t { | |
93 | vmm_regs32_t ppcRegs32; | |
94 | vmm_regs64_t ppcRegs64; | |
95 | } vmm_regs_t; | |
96 | #pragma pack() | |
97 | ||
98 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
99 | typedef struct vmm_processor_state_t { | |
100 | /* 32-byte bndry */ | |
101 | vmm_regs_t ppcRegs; /* Define registers areas */ | |
102 | ||
103 | /* We must be 16-byte aligned here */ | |
104 | ||
105 | vmm_vector_register_t ppcVRs[32]; /* These are only valid after a kVmmGetVectorState */ | |
106 | vmm_vector_register_t ppcVSCR; /* This is always loaded/saved at host/guest transition */ | |
107 | ||
108 | /* We must be 8-byte aligned here */ | |
109 | ||
110 | vmm_fp_register_t ppcFPRs[32]; /* These are only valid after a kVmmGetFloatState */ | |
111 | vmm_fp_register_t ppcFPSCR; /* This is always loaded/saved at host/guest transition */ | |
112 | unsigned long ppcReserved2[2]; /* Pad out to multiple of 16 bytes */ | |
113 | } vmm_processor_state_t; | |
114 | #pragma pack() | |
115 | ||
116 | typedef unsigned long vmm_return_code_t; | |
117 | ||
118 | typedef unsigned long vmm_thread_index_t; | |
119 | #define vmmTInum 0x000000FF | |
120 | #define vmmTIadsp 0x0000FF00 | |
121 | typedef unsigned long vmm_adsp_id_t; | |
122 | ||
123 | enum { | |
124 | kVmmCurMajorVersion = 0x0001, | |
125 | kVmmCurMinorVersion = 0x0006, | |
126 | kVmmMinMajorVersion = 0x0001, | |
127 | }; | |
128 | #define kVmmCurrentVersion ((kVmmCurMajorVersion << 16) | kVmmCurMinorVersion) | |
129 | ||
130 | typedef unsigned long vmm_features_t; | |
131 | enum { | |
132 | kVmmFeature_LittleEndian = 0x00000001, | |
133 | kVmmFeature_Stop = 0x00000002, | |
134 | kVmmFeature_ExtendedMapping = 0x00000004, | |
135 | kVmmFeature_ListMapping = 0x00000008, | |
136 | kVmmFeature_FastAssist = 0x00000010, | |
137 | kVmmFeature_XA = 0x00000020, | |
138 | kVmmFeature_SixtyFourBit = 0x00000040, | |
139 | kVmmFeature_MultAddrSpace = 0x00000080, | |
140 | }; | |
141 | #define kVmmCurrentFeatures (kVmmFeature_LittleEndian | kVmmFeature_Stop | kVmmFeature_ExtendedMapping \ | |
142 | | kVmmFeature_ListMapping | kVmmFeature_FastAssist | kVmmFeature_XA | kVmmFeature_MultAddrSpace) | |
143 | ||
144 | enum { | |
145 | vmm64Bit = 0x80000000, | |
146 | }; | |
147 | ||
148 | ||
149 | typedef unsigned long vmm_version_t; | |
150 | ||
151 | typedef struct vmm_ret_parms32_t { | |
152 | unsigned long return_params[4]; | |
153 | } vmm_ret_parms32_t; | |
154 | ||
155 | typedef struct vmm_ret_parms64_t { | |
156 | unsigned long long return_params[4]; | |
157 | } vmm_ret_parms64_t; | |
158 | ||
159 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
160 | typedef union vmm_ret_parms_t { | |
161 | vmm_ret_parms64_t vmmrp64; /* 64-bit flavor */ | |
162 | vmm_ret_parms32_t vmmrp32; /* 32-bit flavor */ | |
163 | unsigned int retgas[11]; /* Force this to be 11 words long */ | |
164 | } vmm_ret_parms_t; | |
165 | #pragma pack() | |
166 | ||
167 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
168 | typedef struct vmm_fastassist_state32_t { | |
169 | unsigned long fastassist_dispatch; | |
170 | unsigned long fastassist_refcon; | |
171 | ||
172 | unsigned long fastassist_dispatch_code; | |
173 | unsigned long fastassist_parameter[5]; | |
174 | ||
175 | unsigned long guest_register[8]; | |
176 | ||
177 | unsigned long guest_pc; | |
178 | unsigned long guest_msr; | |
179 | ||
180 | unsigned long fastassist_intercepts; | |
181 | unsigned long fastassist_reserved1; | |
182 | } vmm_fastassist_state32_t; | |
183 | ||
184 | typedef struct vmm_fastassist_state64_t { | |
185 | unsigned long long fastassist_dispatch; | |
186 | unsigned long long fastassist_refcon; | |
187 | ||
188 | unsigned long long fastassist_dispatch_code; | |
189 | unsigned long long fastassist_parameter[5]; | |
190 | ||
191 | unsigned long long guest_register[8]; | |
192 | ||
193 | unsigned long long guest_pc; | |
194 | unsigned long long guest_msr; | |
195 | ||
196 | unsigned long fastassist_intercepts; | |
197 | unsigned long fastassist_reserved1; | |
198 | } vmm_fastassist_state64_t; | |
199 | ||
200 | typedef union vmm_fastassist_state_t { | |
201 | vmm_fastassist_state64_t vmmfs64; /* 64-bit flavor */ | |
202 | vmm_fastassist_state32_t vmmfs32; /* 32-bit flavor */ | |
203 | } vmm_fastassist_state_t; | |
204 | #pragma pack() | |
205 | ||
206 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
207 | typedef struct vmm_state_page_t { | |
208 | /* This structure must remain below 4Kb (one page) in size */ | |
209 | vmm_version_t interface_version; | |
210 | vmm_thread_index_t thread_index; | |
211 | unsigned int vmmStat; /* Note: this field is identical to vmmFlags in vmmCntrlEntry */ | |
212 | unsigned int vmmCntrl; | |
213 | #define vmmFloatLoad 0x80000000 | |
214 | #define vmmFloatLoadb 0 | |
215 | #define vmmVectLoad 0x40000000 | |
216 | #define vmmVectLoadb 1 | |
217 | #define vmmVectVRall 0x20000000 | |
218 | #define vmmVectVRallb 2 | |
219 | #define vmmVectVAss 0x10000000 | |
220 | #define vmmVectVAssb 3 | |
221 | #define vmmXStart 0x08000000 | |
222 | #define vmmXStartb 4 | |
223 | #define vmmKey 0x04000000 | |
224 | #define vmmKeyb 5 | |
225 | #define vmmFamEna 0x02000000 | |
226 | #define vmmFamEnab 6 | |
227 | #define vmmFamSet 0x01000000 | |
228 | #define vmmFamSetb 7 | |
229 | ||
230 | vmm_return_code_t return_code; | |
231 | vmm_ret_parms_t vmmRet; | |
232 | ||
233 | /* The next portion of the structure must remain 32-byte aligned */ | |
234 | vmm_processor_state_t vmm_proc_state; | |
235 | ||
236 | /* The next portion of the structure must remain 16-byte aligned */ | |
237 | vmm_fastassist_state_t vmm_fastassist_state; | |
238 | ||
239 | } vmm_state_page_t; | |
240 | #pragma pack() | |
241 | ||
242 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
243 | typedef struct vmm_comm_page_t { | |
244 | union { | |
245 | vmm_state_page_t vmcpState; /* Reserve area for state */ | |
246 | unsigned int vmcpPad[768]; /* Reserve space for 3/4 page state area */ | |
247 | } vmcpfirst; | |
248 | unsigned int vmcpComm[256]; /* Define last 1024 bytes as a communications area - function specific */ | |
249 | } vmm_comm_page_t; | |
250 | #pragma pack() | |
251 | ||
252 | enum { | |
253 | /* Function Indices (passed in r3) */ | |
254 | kVmmGetVersion = 0, /* Get VMM system version */ | |
255 | kVmmvGetFeatures, /* Get VMM supported features */ | |
256 | kVmmInitContext, /* Initialize a context */ | |
257 | kVmmTearDownContext, /* Destroy a context */ | |
258 | kVmmTearDownAll, /* Destory all contexts */ | |
259 | kVmmMapPage, /* Map a host to guest address space */ | |
260 | kVmmGetPageMapping, /* Get host address of a guest page */ | |
261 | kVmmUnmapPage, /* Unmap a guest page */ | |
262 | kVmmUnmapAllPages, /* Unmap all pages in a guest address space */ | |
263 | kVmmGetPageDirtyFlag, /* Check if guest page modified */ | |
264 | kVmmGetFloatState, /* Retrieve guest floating point context */ | |
265 | kVmmGetVectorState, /* Retrieve guest vector context */ | |
266 | kVmmSetTimer, /* Set a guest timer */ | |
267 | kVmmGetTimer, /* Get a guest timer */ | |
268 | kVmmExecuteVM, /* Launch a guest */ | |
269 | kVmmProtectPage, /* Set protection attributes for a guest page */ | |
270 | kVmmMapExecute, /* Map guest page and launch */ | |
271 | kVmmProtectExecute, /* Set prot attributes and launch */ | |
272 | kVmmMapList, /* Map a list of pages into guest address spaces */ | |
273 | kVmmUnmapList, /* Unmap a list of pages from guest address spaces */ | |
274 | kvmmExitToHost, | |
275 | kvmmResumeGuest, | |
276 | kvmmGetGuestRegister, | |
277 | kvmmSetGuestRegister, | |
278 | ||
279 | kVmmSetXA, /* Set extended architecture features for a VM */ | |
280 | kVmmGetXA, /* Get extended architecture features from a VM */ | |
281 | ||
282 | kVmmMapPage64, /* Map a host to guest address space - supports 64-bit */ | |
283 | kVmmGetPageMapping64, /* Get host address of a guest page - supports 64-bit */ | |
284 | kVmmUnmapPage64, /* Unmap a guest page - supports 64-bit */ | |
285 | kVmmGetPageDirtyFlag64, /* Check if guest page modified - supports 64-bit */ | |
286 | kVmmProtectPage64, /* Set protection attributes for a guest page - supports 64-bit */ | |
287 | kVmmMapExecute64, /* Map guest page and launch - supports 64-bit */ | |
288 | kVmmProtectExecute64, /* Set prot attributes and launch - supports 64-bit */ | |
289 | kVmmMapList64, /* Map a list of pages into guest address spaces - supports 64-bit */ | |
290 | kVmmUnmapList64, /* Unmap a list of pages from guest address spaces - supports 64-bit */ | |
291 | kVmmMaxAddr, /* Returns the maximum virtual address that is mappable */ | |
292 | }; | |
293 | ||
294 | #define kVmmReturnNull 0 | |
295 | #define kVmmBogusContext 1 | |
296 | #define kVmmStopped 2 | |
297 | #define kVmmReturnDataPageFault 3 | |
298 | #define kVmmReturnInstrPageFault 4 | |
299 | #define kVmmReturnAlignmentFault 6 | |
300 | #define kVmmReturnProgramException 7 | |
301 | #define kVmmReturnSystemCall 12 | |
302 | #define kVmmReturnTraceException 13 | |
303 | #define kVmmAltivecAssist 22 | |
304 | #define kVmmInvalidAddress 0x1000 | |
305 | #define kVmmInvalidAdSpace 0x1001 | |
306 | ||
307 | /* | |
308 | * Notes on guest address spaces. | |
309 | * | |
310 | * Address spaces are loosely coupled to virtual machines. The default is for | |
311 | * a guest with an index of 1 to use address space 1, 2 to use 2, etc. However, | |
312 | * any guest may be launched using any address space and any address space may be the | |
313 | * target for a map or unmap function. Note that the (un)map list functions may pass in | |
314 | * an address space ID on a page-by-page basis. | |
315 | * | |
316 | * An address space is instantiated either explicitly by mapping something into it, or | |
317 | * implicitly by launching a guest with it. | |
318 | * | |
319 | * An address space is destroyed explicitly by kVmmTearDownAll or kVmmUnmapAllPages. It is | |
320 | * destroyed implicitly by kVmmTearDownContext. The latter is done in order to remain | |
321 | * backwards compatible with the previous implementation, which does not have decoupled | |
322 | * guests and address spaces. | |
323 | * | |
324 | * An address space supports the maximum virtual address supported by the processor. | |
325 | * The 64-bit variant of the mapping functions can be used on non-64-bit machines. If an | |
326 | * unmappable address (e.g., an address larger than 4GB-1 on a 32-bit machine) is requested, | |
327 | * the operation fails with a kVmmInvalidAddress return code. | |
328 | * | |
329 | * Note that for 64-bit calls, both host and guest are specified at 64-bit values. | |
330 | * | |
331 | */ | |
332 | ||
333 | ||
334 | ||
335 | ||
336 | /* | |
337 | * Storage Extended Protection modes | |
338 | * Notes: | |
339 | * To keep compatibility, vmmKey and the PPC key have reversed meanings, | |
340 | * i.e., vmmKey 0 is PPC key 1 and vice versa. | |
341 | * | |
342 | * vmmKey Notes | |
343 | * Mode 0 1 | |
344 | * | |
345 | * kVmmProtNARW not accessible read/write VM_PROT_NONE (not settable via VM calls) | |
346 | * kVmmProtRORW read only read/write | |
347 | * kVmmProtRWRW read/write read/write VM_PROT_WRITE or (VM_PROT_WRITE | VM_PROT_READ) | |
348 | * kVmmProtRORO read only read only VM_PROT_READ | |
349 | ||
350 | */ | |
351 | ||
352 | #define kVmmProtXtnd 0x00000008 | |
353 | #define kVmmProtNARW (kVmmProtXtnd | 0x00000000) | |
354 | #define kVmmProtRORW (kVmmProtXtnd | 0x00000001) | |
355 | #define kVmmProtRWRW (kVmmProtXtnd | 0x00000002) | |
356 | #define kVmmProtRORO (kVmmProtXtnd | 0x00000003) | |
357 | ||
358 | /* | |
359 | * Map list formats | |
360 | * The last 12 bits in the guest virtual address is used as flags as follows: | |
361 | * 0x007 - for the map calls, this is the key to set | |
362 | * 0x3F0 - for both map and unmap, this is the address space ID upon which to operate. | |
363 | * Note that if 0, the address space ID from the function call is used instead. | |
364 | */ | |
365 | ||
366 | typedef struct vmmMList { | |
367 | unsigned int vmlva; /* Virtual address in host address space */ | |
368 | unsigned int vmlava; /* Virtual address in guest address space */ | |
369 | } vmmMList; | |
370 | ||
371 | typedef struct vmmMList64 { | |
372 | unsigned long long vmlva; /* Virtual address in host address space */ | |
373 | unsigned long long vmlava; /* Virtual address in guest address space */ | |
374 | } vmmMList64; | |
375 | ||
376 | typedef struct vmmUMList { | |
377 | unsigned int vmlava; /* Virtual address in guest address space */ | |
378 | } vmmUMList; | |
379 | ||
380 | typedef struct vmmUMList64 { | |
381 | unsigned long long vmlava; /* Virtual address in guest address space */ | |
382 | } vmmUMList64; | |
383 | ||
384 | #define vmmlFlgs 0x00000FFF /* Flags passed in in vmlava low order 12 bits */ | |
385 | #define vmmlProt 0x00000007 /* Protection flags for the page */ | |
386 | #define vmmlAdID 0x000003F0 /* Guest address space ID - used only if non-zero */ | |
387 | #define vmmlRsvd 0x00000C08 /* Reserved for future */ | |
388 | ||
389 | /************************************************************************************* | |
390 | Internal Emulation Types | |
391 | **************************************************************************************/ | |
392 | ||
393 | #define kVmmMaxContexts 32 | |
394 | #define kVmmMaxUnmapPages 64 | |
395 | #define kVmmMaxMapPages 64 | |
396 | ||
397 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
398 | typedef struct vmmCntrlEntry { /* Virtual Machine Monitor control table entry */ | |
399 | unsigned int vmmFlags; /* Assorted control flags */ | |
400 | #define vmmInUse 0x80000000 | |
401 | #define vmmInUseb 0 | |
402 | #define vmmFloatCngd 0x40000000 | |
403 | #define vmmFloatCngdb 1 | |
404 | #define vmmVectCngd 0x20000000 | |
405 | #define vmmVectCngdb 2 | |
406 | #define vmmTimerPop 0x10000000 | |
407 | #define vmmTimerPopb 3 | |
408 | #define vmmFAMmode 0x04000000 | |
409 | #define vmmFAMmodeb 5 | |
410 | #define vmmXStop 0x00800000 | |
411 | #define vmmXStopb 8 | |
412 | #define vmmSpfSave 0x000000FF | |
413 | #define vmmSpfSaveb 24 | |
414 | unsigned int vmmXAFlgs; /* Extended Architecture flags */ | |
415 | vmm_state_page_t *vmmContextKern; /* Kernel address of context communications area */ | |
416 | ppnum_t vmmContextPhys; /* Physical address of context communications area */ | |
417 | vmm_state_page_t *vmmContextUser; /* User address of context communications area */ | |
418 | facility_context vmmFacCtx; /* Header for vector and floating point contexts */ | |
419 | pmap_t vmmPmap; /* Last dispatched pmap */ | |
420 | uint64_t vmmTimer; /* Last set timer value. Zero means unset */ | |
421 | unsigned int vmmFAMintercept; /* FAM intercepted exceptions */ | |
422 | } vmmCntrlEntry; | |
423 | #pragma pack() | |
424 | ||
425 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
426 | typedef struct vmmCntrlTable { /* Virtual Machine Monitor Control table */ | |
427 | unsigned int vmmGFlags; /* Global flags */ | |
428 | #define vmmLastAdSp 0xFF /* Remember the address space that was mapped last */ | |
429 | addr64_t vmmLastMap; /* Last vaddr mapping made */ | |
430 | vmmCntrlEntry vmmc[kVmmMaxContexts]; /* One entry for each possible Virtual Machine Monitor context */ | |
431 | pmap_t vmmAdsp[kVmmMaxContexts]; /* Guest address space pmaps */ | |
432 | } vmmCntrlTable; | |
433 | #pragma pack() | |
434 | ||
435 | /* function decls for kernel level routines... */ | |
436 | extern void vmm_execute_vm(thread_act_t act, vmm_thread_index_t index); | |
437 | extern vmmCntrlEntry *vmm_get_entry(thread_act_t act, vmm_thread_index_t index); | |
438 | extern kern_return_t vmm_tear_down_context(thread_act_t act, vmm_thread_index_t index); | |
439 | extern kern_return_t vmm_get_float_state(thread_act_t act, vmm_thread_index_t index); | |
440 | extern kern_return_t vmm_get_vector_state(thread_act_t act, vmm_thread_index_t index); | |
441 | extern kern_return_t vmm_set_timer(thread_act_t act, vmm_thread_index_t index, unsigned int timerhi, unsigned int timerlo); | |
442 | extern kern_return_t vmm_get_timer(thread_act_t act, vmm_thread_index_t index); | |
443 | extern void vmm_tear_down_all(thread_act_t act); | |
444 | extern kern_return_t vmm_map_page(thread_act_t act, vmm_thread_index_t hindex, addr64_t cva, | |
445 | addr64_t ava, vm_prot_t prot); | |
446 | extern vmm_return_code_t vmm_map_execute(thread_act_t act, vmm_thread_index_t hindex, addr64_t cva, | |
447 | addr64_t ava, vm_prot_t prot); | |
448 | extern kern_return_t vmm_protect_page(thread_act_t act, vmm_thread_index_t hindex, addr64_t va, | |
449 | vm_prot_t prot); | |
450 | extern vmm_return_code_t vmm_protect_execute(thread_act_t act, vmm_thread_index_t hindex, addr64_t va, | |
451 | vm_prot_t prot); | |
452 | extern addr64_t vmm_get_page_mapping(thread_act_t act, vmm_thread_index_t index, | |
453 | addr64_t va); | |
454 | extern kern_return_t vmm_unmap_page(thread_act_t act, vmm_thread_index_t index, addr64_t va); | |
455 | extern void vmm_unmap_all_pages(thread_act_t act, vmm_thread_index_t index); | |
456 | extern boolean_t vmm_get_page_dirty_flag(thread_act_t act, vmm_thread_index_t index, | |
457 | addr64_t va, unsigned int reset); | |
458 | extern kern_return_t vmm_set_XA(thread_act_t act, vmm_thread_index_t index, unsigned int xaflags); | |
459 | extern unsigned int vmm_get_XA(thread_act_t act, vmm_thread_index_t index); | |
460 | extern int vmm_get_features(struct savearea *); | |
461 | extern int vmm_get_version(struct savearea *); | |
462 | extern int vmm_init_context(struct savearea *); | |
463 | extern int vmm_dispatch(struct savearea *); | |
464 | extern int vmm_exit(thread_act_t act, struct savearea *); | |
465 | extern void vmm_force_exit(thread_act_t act, struct savearea *); | |
466 | extern int vmm_stop_vm(struct savearea *save); | |
467 | extern void vmm_timer_pop(thread_act_t act); | |
468 | extern void vmm_interrupt(ReturnHandler *rh, thread_act_t act); | |
469 | extern kern_return_t vmm_map_list(thread_act_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor); | |
470 | extern kern_return_t vmm_unmap_list(thread_act_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor); | |
471 | extern vmm_return_code_t vmm_resume_guest(vmm_thread_index_t index, unsigned long pc, | |
472 | unsigned long vmmCntrl, unsigned long vmmCntrMaskl); | |
473 | extern vmm_return_code_t vmm_exit_to_host(vmm_thread_index_t index); | |
474 | extern unsigned long vmm_get_guest_register(vmm_thread_index_t index, unsigned long reg_index); | |
475 | extern vmm_return_code_t vmm_set_guest_register(vmm_thread_index_t index, unsigned long reg_index, unsigned long reg_value); | |
476 | extern addr64_t vmm_max_addr(thread_act_t act); | |
477 | ||
478 | #endif | |
479 |