xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.h
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*-----------------------------------------------------------------------
31 ** vmachmon.h
32 **
33 ** C routines that we are adding to the MacOS X kernel.
34 **
35 -----------------------------------------------------------------------*/
36
37 #include <ppc/exception.h>
38
39 #ifndef _VEMULATION_H_
40 #define _VEMULATION_H_
41
42 /*************************************************************************************
43 External Emulation Types
44 **************************************************************************************/
45
46 typedef union vmm_vector_register_t {
47 unsigned long i[4];
48 unsigned short s[8];
49 unsigned char b[16];
50 } vmm_vector_register_t;
51
52 typedef union vmm_fp_register_t {
53 double d;
54 unsigned long i[2];
55 unsigned short s[4];
56 unsigned char b[8];
57 } vmm_fp_register_t;
58
59
60 typedef struct vmm_regs32_t {
61
62 unsigned long ppcPC; /* 000 */
63 unsigned long ppcMSR; /* 004 */
64
65 unsigned long ppcGPRs[32]; /* 008 */
66
67 unsigned long ppcCR; /* 088 */
68 unsigned long ppcXER; /* 08C */
69 unsigned long ppcLR; /* 090 */
70 unsigned long ppcCTR; /* 094 */
71 unsigned long ppcMQ; /* 098 - Obsolete */
72 unsigned long ppcVRSave; /* 09C */
73 unsigned long ppcRsrvd0A0[40]; /* 0A0 */
74 /* 140 */
75 } vmm_regs32_t;
76
77 #pragma pack(4) /* Make sure the structure stays as we defined it */
78 typedef struct vmm_regs64_t {
79
80 unsigned long long ppcPC; /* 000 */
81 unsigned long long ppcMSR; /* 008 */
82
83 unsigned long long ppcGPRs[32]; /* 010 */
84
85 unsigned long long ppcXER; /* 110 */
86 unsigned long long ppcLR; /* 118 */
87 unsigned long long ppcCTR; /* 120 */
88 unsigned long ppcCR; /* 128 */
89 unsigned long ppcVRSave; /* 12C */
90 unsigned long ppcRsvd130[4]; /* 130 */
91 /* 140 */
92 } vmm_regs64_t;
93 #pragma pack()
94
95
96 #pragma pack(4) /* Make sure the structure stays as we defined it */
97 typedef union vmm_regs_t {
98 vmm_regs32_t ppcRegs32;
99 vmm_regs64_t ppcRegs64;
100 } vmm_regs_t;
101 #pragma pack()
102
103 #pragma pack(4) /* Make sure the structure stays as we defined it */
104 typedef struct vmm_processor_state_t {
105 /* 32-byte bndry */
106 vmm_regs_t ppcRegs; /* Define registers areas */
107
108 /* We must be 16-byte aligned here */
109
110 vmm_vector_register_t ppcVRs[32]; /* These are only valid after a kVmmGetVectorState */
111 vmm_vector_register_t ppcVSCR; /* This is always loaded/saved at host/guest transition */
112
113 /* We must be 8-byte aligned here */
114
115 vmm_fp_register_t ppcFPRs[32]; /* These are only valid after a kVmmGetFloatState */
116 vmm_fp_register_t ppcFPSCR; /* This is always loaded/saved at host/guest transition */
117 unsigned long ppcReserved2[2]; /* Pad out to multiple of 16 bytes */
118 } vmm_processor_state_t;
119 #pragma pack()
120
121 typedef unsigned long vmm_return_code_t;
122
123 typedef unsigned long vmm_thread_index_t;
124 #define vmmTInum 0x000000FF
125 #define vmmTIadsp 0x0000FF00
126 typedef unsigned long vmm_adsp_id_t;
127
128 enum {
129 kVmmCurMajorVersion = 0x0001,
130 kVmmCurMinorVersion = 0x0007,
131 kVmmMinMajorVersion = 0x0001,
132 };
133 #define kVmmCurrentVersion ((kVmmCurMajorVersion << 16) | kVmmCurMinorVersion)
134
135 typedef unsigned long vmm_features_t;
136 enum {
137 kVmmFeature_LittleEndian = 0x00000001,
138 kVmmFeature_Stop = 0x00000002,
139 kVmmFeature_ExtendedMapping = 0x00000004,
140 kVmmFeature_ListMapping = 0x00000008,
141 kVmmFeature_FastAssist = 0x00000010,
142 kVmmFeature_XA = 0x00000020,
143 kVmmFeature_SixtyFourBit = 0x00000040,
144 kVmmFeature_MultAddrSpace = 0x00000080,
145 kVmmFeature_GuestShadowAssist = 0x00000100, /* Guest->physical shadow hash table */
146 kVmmFeature_GlobalMappingAssist = 0x00000200, /* Global shadow mapping support */
147 kVmmFeature_HostShadowAssist = 0x00000400, /* Linear shadow mapping of an area of
148 host virtual as guest physical */
149 kVmmFeature_MultAddrSpaceAssist = 0x00000800, /* Expanded pool of guest virtual
150 address spaces */
151 };
152 #define kVmmCurrentFeatures (kVmmFeature_LittleEndian | kVmmFeature_Stop | kVmmFeature_ExtendedMapping \
153 | kVmmFeature_ListMapping | kVmmFeature_FastAssist | kVmmFeature_XA \
154 | kVmmFeature_GuestShadowAssist)
155
156 enum {
157 vmm64Bit = 0x80000000, /* Make guest 64-bit */
158 vmmGSA = 0x40000000, /* Enable guest shadow assist (GSA) */
159 vmmGMA = 0x20000000, /* Enable global shadow mapping assist (GMA) */
160 };
161
162 #define kVmmSupportedSetXA (vmm64Bit | vmmGSA | vmmGMA)
163
164 typedef unsigned long vmm_version_t;
165
166 typedef struct vmm_ret_parms32_t {
167 unsigned long return_params[4];
168 } vmm_ret_parms32_t;
169
170 typedef struct vmm_ret_parms64_t {
171 unsigned long long return_params[4];
172 } vmm_ret_parms64_t;
173
174 #pragma pack(4) /* Make sure the structure stays as we defined it */
175 typedef union vmm_ret_parms_t {
176 vmm_ret_parms64_t vmmrp64; /* 64-bit flavor */
177 vmm_ret_parms32_t vmmrp32; /* 32-bit flavor */
178 unsigned int retgas[11]; /* Force this to be 11 words long */
179 } vmm_ret_parms_t;
180 #pragma pack()
181
182 #pragma pack(4) /* Make sure the structure stays as we defined it */
183 typedef struct vmm_fastassist_state32_t {
184 unsigned long fastassist_dispatch;
185 unsigned long fastassist_refcon;
186
187 unsigned long fastassist_dispatch_code;
188 unsigned long fastassist_parameter[5];
189
190 unsigned long guest_register[8];
191
192 unsigned long guest_pc;
193 unsigned long guest_msr;
194
195 unsigned long fastassist_intercepts;
196 unsigned long fastassist_reserved1;
197 } vmm_fastassist_state32_t;
198
199 typedef struct vmm_fastassist_state64_t {
200 unsigned long long fastassist_dispatch;
201 unsigned long long fastassist_refcon;
202
203 unsigned long long fastassist_dispatch_code;
204 unsigned long long fastassist_parameter[5];
205
206 unsigned long long guest_register[8];
207
208 unsigned long long guest_pc;
209 unsigned long long guest_msr;
210
211 unsigned long fastassist_intercepts;
212 unsigned long fastassist_reserved1;
213 } vmm_fastassist_state64_t;
214
215 typedef union vmm_fastassist_state_t {
216 vmm_fastassist_state64_t vmmfs64; /* 64-bit flavor */
217 vmm_fastassist_state32_t vmmfs32; /* 32-bit flavor */
218 } vmm_fastassist_state_t;
219 #pragma pack()
220
221 #pragma pack(4) /* Make sure the structure stays as we defined it */
222 typedef struct vmm_state_page_t {
223 /* This structure must remain below 4Kb (one page) in size */
224 vmm_version_t interface_version;
225 vmm_thread_index_t thread_index;
226 unsigned int vmmStat; /* Note: this field is identical to vmmFlags in vmmCntrlEntry */
227 unsigned int vmmCntrl;
228 #define vmmFloatLoad 0x80000000
229 #define vmmFloatLoadb 0
230 #define vmmVectLoad 0x40000000
231 #define vmmVectLoadb 1
232 #define vmmVectVRall 0x20000000
233 #define vmmVectVRallb 2
234 #define vmmVectVAss 0x10000000
235 #define vmmVectVAssb 3
236 #define vmmXStart 0x08000000
237 #define vmmXStartb 4
238 #define vmmKey 0x04000000
239 #define vmmKeyb 5
240 #define vmmFamEna 0x02000000
241 #define vmmFamEnab 6
242 #define vmmFamSet 0x01000000
243 #define vmmFamSetb 7
244
245 vmm_return_code_t return_code;
246 vmm_ret_parms_t vmmRet;
247
248 /* The next portion of the structure must remain 32-byte aligned */
249 vmm_processor_state_t vmm_proc_state;
250
251 /* The next portion of the structure must remain 16-byte aligned */
252 vmm_fastassist_state_t vmm_fastassist_state;
253
254 } vmm_state_page_t;
255 #pragma pack()
256
257 #pragma pack(4) /* Make sure the structure stays as we defined it */
258 typedef struct vmm_comm_page_t {
259 union {
260 vmm_state_page_t vmcpState; /* Reserve area for state */
261 unsigned int vmcpPad[768]; /* Reserve space for 3/4 page state area */
262 } vmcpfirst;
263 unsigned int vmcpComm[256]; /* Define last 1024 bytes as a communications area - function specific */
264 } vmm_comm_page_t;
265 #pragma pack()
266
267 enum {
268 /* Function Indices (passed in r3) */
269 kVmmGetVersion = 0, /* Get VMM system version */
270 kVmmvGetFeatures, /* Get VMM supported features */
271 kVmmInitContext, /* Initialize a context */
272 kVmmTearDownContext, /* Destroy a context */
273 kVmmTearDownAll, /* Destory all contexts */
274 kVmmMapPage, /* Map a host to guest address space */
275 kVmmGetPageMapping, /* Get host address of a guest page */
276 kVmmUnmapPage, /* Unmap a guest page */
277 kVmmUnmapAllPages, /* Unmap all pages in a guest address space */
278 kVmmGetPageDirtyFlag, /* Check if guest page modified */
279 kVmmGetFloatState, /* Retrieve guest floating point context */
280 kVmmGetVectorState, /* Retrieve guest vector context */
281 kVmmSetTimer, /* Set a guest timer */
282 kVmmGetTimer, /* Get a guest timer */
283 kVmmExecuteVM, /* Launch a guest */
284 kVmmProtectPage, /* Set protection attributes for a guest page */
285 kVmmMapExecute, /* Map guest page and launch */
286 kVmmProtectExecute, /* Set prot attributes and launch */
287 kVmmMapList, /* Map a list of pages into guest address spaces */
288 kVmmUnmapList, /* Unmap a list of pages from guest address spaces */
289 kvmmExitToHost, /* Exit from FAM to host -- fast-path syscall */
290 kvmmResumeGuest, /* Resume guest from FAM -- fast-path syscall */
291 kvmmGetGuestRegister, /* Get guest register from FAM -- fast-path syscall */
292 kvmmSetGuestRegister, /* Set guest register from FAM -- fast-path syscall */
293
294 kVmmActivateXA, /* Activate extended architecture features for a VM */
295 kVmmDeactivateXA, /* Deactivate extended architecture features for a VM */
296 kVmmGetXA, /* Get extended architecture features from a VM */
297
298 kVmmMapPage64, /* Map a host to guest address space - supports 64-bit */
299 kVmmGetPageMapping64, /* Get host address of a guest page - supports 64-bit */
300 kVmmUnmapPage64, /* Unmap a guest page - supports 64-bit */
301 kVmmGetPageDirtyFlag64, /* Check if guest page modified - supports 64-bit */
302 kVmmProtectPage64, /* Set protection attributes for a guest page - supports 64-bit */
303 kVmmMapExecute64, /* Map guest page and launch - supports 64-bit */
304 kVmmProtectExecute64, /* Set prot attributes and launch - supports 64-bit */
305 kVmmMapList64, /* Map a list of pages into guest address spaces - supports 64-bit */
306 kVmmUnmapList64, /* Unmap a list of pages from guest address spaces - supports 64-bit */
307 kVmmMaxAddr, /* Returns the maximum virtual address that is mappable */
308
309 kVmmSetGuestMemory, /* Sets base and extent of guest physical memory in host address space */
310 kVmmPurgeLocal, /* Purges all non-global mappings for a given guest address space */
311 };
312
313 #define kVmmReturnNull 0
314 #define kVmmBogusContext 1
315 #define kVmmStopped 2
316 #define kVmmReturnDataPageFault 3
317 #define kVmmReturnInstrPageFault 4
318 #define kVmmReturnAlignmentFault 6
319 #define kVmmReturnProgramException 7
320 #define kVmmReturnSystemCall 12
321 #define kVmmReturnTraceException 13
322 #define kVmmAltivecAssist 22
323 #define kVmmInvalidAddress 0x1000
324 #define kVmmInvalidAdSpace 0x1001
325
326 /*
327 * Notes on guest address spaces.
328 *
329 * Address spaces are loosely coupled to virtual machines. The default is for
330 * a guest with an index of 1 to use address space 1, 2 to use 2, etc. However,
331 * any guest may be launched using any address space and any address space may be the
332 * target for a map or unmap function. Note that the (un)map list functions may pass in
333 * an address space ID on a page-by-page basis.
334 *
335 * An address space is instantiated either explicitly by mapping something into it, or
336 * implicitly by launching a guest with it.
337 *
338 * An address space is destroyed explicitly by kVmmTearDownAll or kVmmUnmapAllPages. It is
339 * destroyed implicitly by kVmmTearDownContext. The latter is done in order to remain
340 * backwards compatible with the previous implementation, which does not have decoupled
341 * guests and address spaces.
342 *
343 * An address space supports the maximum virtual address supported by the processor.
344 * The 64-bit variant of the mapping functions can be used on non-64-bit machines. If an
345 * unmappable address (e.g., an address larger than 4GB-1 on a 32-bit machine) is requested,
346 * the operation fails with a kVmmInvalidAddress return code.
347 *
348 * Note that for 64-bit calls, both host and guest are specified at 64-bit values.
349 *
350 */
351
352
353
354
355 /*
356 * Storage Extended Protection modes
357 * Notes:
358 * To keep compatibility, vmmKey and the PPC key have reversed meanings,
359 * i.e., vmmKey 0 is PPC key 1 and vice versa.
360 *
361 * vmmKey Notes
362 * Mode 0 1
363 *
364 * kVmmProtNARW not accessible read/write VM_PROT_NONE (not settable via VM calls)
365 * kVmmProtRORW read only read/write
366 * kVmmProtRWRW read/write read/write VM_PROT_WRITE or (VM_PROT_WRITE | VM_PROT_READ)
367 * kVmmProtRORO read only read only VM_PROT_READ
368
369 */
370
371 #define kVmmProtXtnd 0x00000008
372 #define kVmmProtNARW (kVmmProtXtnd | 0x00000000)
373 #define kVmmProtRORW (kVmmProtXtnd | 0x00000001)
374 #define kVmmProtRWRW (kVmmProtXtnd | 0x00000002)
375 #define kVmmProtRORO (kVmmProtXtnd | 0x00000003)
376
377 /*
378 * Map list formats
379 * The last 12 bits in the guest virtual address is used as flags as follows:
380 * 0x007 - for the map calls, this is the key to set
381 * 0x3F0 - for both map and unmap, this is the address space ID upon which to operate.
382 * Note that if 0, the address space ID from the function call is used instead.
383 */
384
385 typedef struct vmmMList {
386 unsigned int vmlva; /* Virtual address in host address space */
387 unsigned int vmlava; /* Virtual address in guest address space */
388 } vmmMList;
389
390 typedef struct vmmMList64 {
391 unsigned long long vmlva; /* Virtual address in host address space */
392 unsigned long long vmlava; /* Virtual address in guest address space */
393 } vmmMList64;
394
395 typedef struct vmmUMList {
396 unsigned int vmlava; /* Virtual address in guest address space */
397 } vmmUMList;
398
399 typedef struct vmmUMList64 {
400 unsigned long long vmlava; /* Virtual address in guest address space */
401 } vmmUMList64;
402
403 #define vmmlFlgs 0x00000FFF /* Flags passed in in vmlava low order 12 bits */
404 #define vmmlProt 0x00000007 /* Protection flags for the page */
405 #define vmmlAdID 0x000003F0 /* Guest address space ID - used only if non-zero */
406 #define vmmlGlob 0x00000400 /* Mapping is global */
407 #define vmmlRsvd 0x00000800 /* Reserved for future */
408
409 /*************************************************************************************
410 Internal Emulation Types
411 **************************************************************************************/
412
413 #define kVmmMaxContexts 32
414 #define kVmmMaxUnmapPages 64
415 #define kVmmMaxMapPages 64
416
417 #pragma pack(4) /* Make sure the structure stays as we defined it */
418 typedef struct vmmCntrlEntry { /* Virtual Machine Monitor control table entry */
419 unsigned int vmmFlags; /* Assorted control flags */
420 #define vmmInUse 0x80000000
421 #define vmmInUseb 0
422 #define vmmFloatCngd 0x40000000
423 #define vmmFloatCngdb 1
424 #define vmmVectCngd 0x20000000
425 #define vmmVectCngdb 2
426 #define vmmTimerPop 0x10000000
427 #define vmmTimerPopb 3
428 #define vmmFAMmode 0x04000000
429 #define vmmFAMmodeb 5
430 #define vmmXStop 0x00800000
431 #define vmmXStopb 8
432 #define vmmSpfSave 0x000000FF
433 #define vmmSpfSaveb 24
434 unsigned int vmmXAFlgs; /* Extended Architecture flags */
435 vmm_state_page_t *vmmContextKern; /* Kernel address of context communications area */
436 ppnum_t vmmContextPhys; /* Physical address of context communications area */
437 vmm_state_page_t *vmmContextUser; /* User address of context communications area */
438 facility_context vmmFacCtx; /* Header for vector and floating point contexts */
439 pmap_t vmmPmap; /* Last dispatched pmap */
440 uint64_t vmmTimer; /* Last set timer value. Zero means unset */
441 unsigned int vmmFAMintercept; /* FAM intercepted exceptions */
442 } vmmCntrlEntry;
443 #pragma pack()
444
445 #pragma pack(4) /* Make sure the structure stays as we defined it */
446 typedef struct vmmCntrlTable { /* Virtual Machine Monitor Control table */
447 unsigned int vmmGFlags; /* Global flags */
448 #define vmmLastAdSp 0xFF /* Remember the address space that was mapped last */
449 addr64_t vmmLastMap; /* Last vaddr mapping made */
450 vmmCntrlEntry vmmc[kVmmMaxContexts]; /* One entry for each possible Virtual Machine Monitor context */
451 pmap_t vmmAdsp[kVmmMaxContexts]; /* Guest address space pmaps */
452 } vmmCntrlTable;
453 #pragma pack()
454
455 /* function decls for kernel level routines... */
456 extern void vmm_execute_vm(thread_t act, vmm_thread_index_t index);
457 extern kern_return_t vmm_tear_down_context(thread_t act, vmm_thread_index_t index);
458 extern kern_return_t vmm_get_float_state(thread_t act, vmm_thread_index_t index);
459 extern kern_return_t vmm_get_vector_state(thread_t act, vmm_thread_index_t index);
460 extern kern_return_t vmm_set_timer(thread_t act, vmm_thread_index_t index, unsigned int timerhi, unsigned int timerlo);
461 extern kern_return_t vmm_get_timer(thread_t act, vmm_thread_index_t index);
462 extern void vmm_tear_down_all(thread_t act);
463 extern kern_return_t vmm_map_page(thread_t act, vmm_thread_index_t hindex, addr64_t cva,
464 addr64_t ava, vm_prot_t prot);
465 extern vmm_return_code_t vmm_map_execute(thread_t act, vmm_thread_index_t hindex, addr64_t cva,
466 addr64_t ava, vm_prot_t prot);
467 extern kern_return_t vmm_protect_page(thread_t act, vmm_thread_index_t hindex, addr64_t va,
468 vm_prot_t prot);
469 extern vmm_return_code_t vmm_protect_execute(thread_t act, vmm_thread_index_t hindex, addr64_t va,
470 vm_prot_t prot);
471 extern addr64_t vmm_get_page_mapping(thread_t act, vmm_thread_index_t index,
472 addr64_t va);
473 extern kern_return_t vmm_unmap_page(thread_t act, vmm_thread_index_t index, addr64_t va);
474 extern void vmm_unmap_all_pages(thread_t act, vmm_thread_index_t index);
475 extern boolean_t vmm_get_page_dirty_flag(thread_t act, vmm_thread_index_t index,
476 addr64_t va, unsigned int reset);
477 extern kern_return_t vmm_activate_XA(thread_t act, vmm_thread_index_t index, unsigned int xaflags);
478 extern kern_return_t vmm_deactivate_XA(thread_t act, vmm_thread_index_t index, unsigned int xaflags);
479 extern unsigned int vmm_get_XA(thread_t act, vmm_thread_index_t index);
480 extern int vmm_get_features(struct savearea *);
481 extern int vmm_get_version(struct savearea *);
482 extern int vmm_init_context(struct savearea *);
483 extern int vmm_dispatch(struct savearea *);
484 extern int vmm_exit(thread_t act, struct savearea *);
485 extern void vmm_force_exit(thread_t act, struct savearea *);
486 extern int vmm_stop_vm(struct savearea *save);
487 extern void vmm_timer_pop(thread_t act);
488 extern void vmm_interrupt(ReturnHandler *rh, thread_t act);
489 extern kern_return_t vmm_map_list(thread_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor);
490 extern kern_return_t vmm_unmap_list(thread_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor);
491 extern vmm_return_code_t vmm_resume_guest(vmm_thread_index_t index, unsigned long pc,
492 unsigned long vmmCntrl, unsigned long vmmCntrMaskl);
493 extern vmm_return_code_t vmm_exit_to_host(vmm_thread_index_t index);
494 extern unsigned long vmm_get_guest_register(vmm_thread_index_t index, unsigned long reg_index);
495 extern vmm_return_code_t vmm_set_guest_register(vmm_thread_index_t index, unsigned long reg_index, unsigned long reg_value);
496 extern addr64_t vmm_max_addr(thread_t act);
497 extern kern_return_t vmm_set_guest_memory(thread_t act, vmm_thread_index_t index, addr64_t base, addr64_t extent);
498 extern kern_return_t vmm_purge_local(thread_t act, vmm_thread_index_t index);
499
500 #endif