]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/vmachmon.h
xnu-517.9.4.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*-----------------------------------------------------------------------
23 ** vmachmon.h
24 **
25 ** C routines that we are adding to the MacOS X kernel.
26 **
27 -----------------------------------------------------------------------*/
28
29 #include <ppc/exception.h>
30
31 #ifndef _VEMULATION_H_
32 #define _VEMULATION_H_
33
34 /*************************************************************************************
35 External Emulation Types
36 **************************************************************************************/
37
38 typedef union vmm_vector_register_t {
39 unsigned long i[4];
40 unsigned short s[8];
41 unsigned char b[16];
42 } vmm_vector_register_t;
43
44 typedef union vmm_fp_register_t {
45 double d;
46 unsigned long i[2];
47 unsigned short s[4];
48 unsigned char b[8];
49 } vmm_fp_register_t;
50
51
52 typedef struct vmm_regs32_t {
53
54 unsigned long ppcPC; /* 000 */
55 unsigned long ppcMSR; /* 004 */
56
57 unsigned long ppcGPRs[32]; /* 008 */
58
59 unsigned long ppcCR; /* 088 */
60 unsigned long ppcXER; /* 08C */
61 unsigned long ppcLR; /* 090 */
62 unsigned long ppcCTR; /* 094 */
63 unsigned long ppcMQ; /* 098 - Obsolete */
64 unsigned long ppcVRSave; /* 09C */
65 unsigned long ppcRsrvd0A0[40]; /* 0A0 */
66 /* 140 */
67 } vmm_regs32_t;
68
69 #pragma pack(4) /* Make sure the structure stays as we defined it */
70 typedef struct vmm_regs64_t {
71
72 unsigned long long ppcPC; /* 000 */
73 unsigned long long ppcMSR; /* 008 */
74
75 unsigned long long ppcGPRs[32]; /* 010 */
76
77 unsigned long long ppcXER; /* 110 */
78 unsigned long long ppcLR; /* 118 */
79 unsigned long long ppcCTR; /* 120 */
80 unsigned long ppcCR; /* 128 */
81 unsigned long ppcVRSave; /* 12C */
82 unsigned long ppcRsvd130[4]; /* 130 */
83 /* 140 */
84 } vmm_regs64_t;
85 #pragma pack()
86
87
88 #pragma pack(4) /* Make sure the structure stays as we defined it */
89 typedef union vmm_regs_t {
90 vmm_regs32_t ppcRegs32;
91 vmm_regs64_t ppcRegs64;
92 } vmm_regs_t;
93 #pragma pack()
94
95 #pragma pack(4) /* Make sure the structure stays as we defined it */
96 typedef struct vmm_processor_state_t {
97 /* 32-byte bndry */
98 vmm_regs_t ppcRegs; /* Define registers areas */
99
100 /* We must be 16-byte aligned here */
101
102 vmm_vector_register_t ppcVRs[32]; /* These are only valid after a kVmmGetVectorState */
103 vmm_vector_register_t ppcVSCR; /* This is always loaded/saved at host/guest transition */
104
105 /* We must be 8-byte aligned here */
106
107 vmm_fp_register_t ppcFPRs[32]; /* These are only valid after a kVmmGetFloatState */
108 vmm_fp_register_t ppcFPSCR; /* This is always loaded/saved at host/guest transition */
109 unsigned long ppcReserved2[2]; /* Pad out to multiple of 16 bytes */
110 } vmm_processor_state_t;
111 #pragma pack()
112
113 typedef unsigned long vmm_return_code_t;
114
115 typedef unsigned long vmm_thread_index_t;
116 #define vmmTInum 0x000000FF
117 #define vmmTIadsp 0x0000FF00
118 typedef unsigned long vmm_adsp_id_t;
119
120 enum {
121 kVmmCurMajorVersion = 0x0001,
122 kVmmCurMinorVersion = 0x0006,
123 kVmmMinMajorVersion = 0x0001,
124 };
125 #define kVmmCurrentVersion ((kVmmCurMajorVersion << 16) | kVmmCurMinorVersion)
126
127 typedef unsigned long vmm_features_t;
128 enum {
129 kVmmFeature_LittleEndian = 0x00000001,
130 kVmmFeature_Stop = 0x00000002,
131 kVmmFeature_ExtendedMapping = 0x00000004,
132 kVmmFeature_ListMapping = 0x00000008,
133 kVmmFeature_FastAssist = 0x00000010,
134 kVmmFeature_XA = 0x00000020,
135 kVmmFeature_SixtyFourBit = 0x00000040,
136 kVmmFeature_MultAddrSpace = 0x00000080,
137 };
138 #define kVmmCurrentFeatures (kVmmFeature_LittleEndian | kVmmFeature_Stop | kVmmFeature_ExtendedMapping \
139 | kVmmFeature_ListMapping | kVmmFeature_FastAssist | kVmmFeature_XA | kVmmFeature_MultAddrSpace)
140
141 enum {
142 vmm64Bit = 0x80000000,
143 };
144
145
146 typedef unsigned long vmm_version_t;
147
148 typedef struct vmm_ret_parms32_t {
149 unsigned long return_params[4];
150 } vmm_ret_parms32_t;
151
152 typedef struct vmm_ret_parms64_t {
153 unsigned long long return_params[4];
154 } vmm_ret_parms64_t;
155
156 #pragma pack(4) /* Make sure the structure stays as we defined it */
157 typedef union vmm_ret_parms_t {
158 vmm_ret_parms64_t vmmrp64; /* 64-bit flavor */
159 vmm_ret_parms32_t vmmrp32; /* 32-bit flavor */
160 unsigned int retgas[11]; /* Force this to be 11 words long */
161 } vmm_ret_parms_t;
162 #pragma pack()
163
164 #pragma pack(4) /* Make sure the structure stays as we defined it */
165 typedef struct vmm_fastassist_state32_t {
166 unsigned long fastassist_dispatch;
167 unsigned long fastassist_refcon;
168
169 unsigned long fastassist_dispatch_code;
170 unsigned long fastassist_parameter[5];
171
172 unsigned long guest_register[8];
173
174 unsigned long guest_pc;
175 unsigned long guest_msr;
176
177 unsigned long fastassist_intercepts;
178 unsigned long fastassist_reserved1;
179 } vmm_fastassist_state32_t;
180
181 typedef struct vmm_fastassist_state64_t {
182 unsigned long long fastassist_dispatch;
183 unsigned long long fastassist_refcon;
184
185 unsigned long long fastassist_dispatch_code;
186 unsigned long long fastassist_parameter[5];
187
188 unsigned long long guest_register[8];
189
190 unsigned long long guest_pc;
191 unsigned long long guest_msr;
192
193 unsigned long fastassist_intercepts;
194 unsigned long fastassist_reserved1;
195 } vmm_fastassist_state64_t;
196
197 typedef union vmm_fastassist_state_t {
198 vmm_fastassist_state64_t vmmfs64; /* 64-bit flavor */
199 vmm_fastassist_state32_t vmmfs32; /* 32-bit flavor */
200 } vmm_fastassist_state_t;
201 #pragma pack()
202
203 #pragma pack(4) /* Make sure the structure stays as we defined it */
204 typedef struct vmm_state_page_t {
205 /* This structure must remain below 4Kb (one page) in size */
206 vmm_version_t interface_version;
207 vmm_thread_index_t thread_index;
208 unsigned int vmmStat; /* Note: this field is identical to vmmFlags in vmmCntrlEntry */
209 unsigned int vmmCntrl;
210 #define vmmFloatLoad 0x80000000
211 #define vmmFloatLoadb 0
212 #define vmmVectLoad 0x40000000
213 #define vmmVectLoadb 1
214 #define vmmVectVRall 0x20000000
215 #define vmmVectVRallb 2
216 #define vmmVectVAss 0x10000000
217 #define vmmVectVAssb 3
218 #define vmmXStart 0x08000000
219 #define vmmXStartb 4
220 #define vmmKey 0x04000000
221 #define vmmKeyb 5
222 #define vmmFamEna 0x02000000
223 #define vmmFamEnab 6
224 #define vmmFamSet 0x01000000
225 #define vmmFamSetb 7
226
227 vmm_return_code_t return_code;
228 vmm_ret_parms_t vmmRet;
229
230 /* The next portion of the structure must remain 32-byte aligned */
231 vmm_processor_state_t vmm_proc_state;
232
233 /* The next portion of the structure must remain 16-byte aligned */
234 vmm_fastassist_state_t vmm_fastassist_state;
235
236 } vmm_state_page_t;
237 #pragma pack()
238
239 #pragma pack(4) /* Make sure the structure stays as we defined it */
240 typedef struct vmm_comm_page_t {
241 union {
242 vmm_state_page_t vmcpState; /* Reserve area for state */
243 unsigned int vmcpPad[768]; /* Reserve space for 3/4 page state area */
244 } vmcpfirst;
245 unsigned int vmcpComm[256]; /* Define last 1024 bytes as a communications area - function specific */
246 } vmm_comm_page_t;
247 #pragma pack()
248
249 enum {
250 /* Function Indices (passed in r3) */
251 kVmmGetVersion = 0, /* Get VMM system version */
252 kVmmvGetFeatures, /* Get VMM supported features */
253 kVmmInitContext, /* Initialize a context */
254 kVmmTearDownContext, /* Destroy a context */
255 kVmmTearDownAll, /* Destory all contexts */
256 kVmmMapPage, /* Map a host to guest address space */
257 kVmmGetPageMapping, /* Get host address of a guest page */
258 kVmmUnmapPage, /* Unmap a guest page */
259 kVmmUnmapAllPages, /* Unmap all pages in a guest address space */
260 kVmmGetPageDirtyFlag, /* Check if guest page modified */
261 kVmmGetFloatState, /* Retrieve guest floating point context */
262 kVmmGetVectorState, /* Retrieve guest vector context */
263 kVmmSetTimer, /* Set a guest timer */
264 kVmmGetTimer, /* Get a guest timer */
265 kVmmExecuteVM, /* Launch a guest */
266 kVmmProtectPage, /* Set protection attributes for a guest page */
267 kVmmMapExecute, /* Map guest page and launch */
268 kVmmProtectExecute, /* Set prot attributes and launch */
269 kVmmMapList, /* Map a list of pages into guest address spaces */
270 kVmmUnmapList, /* Unmap a list of pages from guest address spaces */
271 kvmmExitToHost,
272 kvmmResumeGuest,
273 kvmmGetGuestRegister,
274 kvmmSetGuestRegister,
275
276 kVmmSetXA, /* Set extended architecture features for a VM */
277 kVmmGetXA, /* Get extended architecture features from a VM */
278
279 kVmmMapPage64, /* Map a host to guest address space - supports 64-bit */
280 kVmmGetPageMapping64, /* Get host address of a guest page - supports 64-bit */
281 kVmmUnmapPage64, /* Unmap a guest page - supports 64-bit */
282 kVmmGetPageDirtyFlag64, /* Check if guest page modified - supports 64-bit */
283 kVmmProtectPage64, /* Set protection attributes for a guest page - supports 64-bit */
284 kVmmMapExecute64, /* Map guest page and launch - supports 64-bit */
285 kVmmProtectExecute64, /* Set prot attributes and launch - supports 64-bit */
286 kVmmMapList64, /* Map a list of pages into guest address spaces - supports 64-bit */
287 kVmmUnmapList64, /* Unmap a list of pages from guest address spaces - supports 64-bit */
288 kVmmMaxAddr, /* Returns the maximum virtual address that is mappable */
289 };
290
291 #define kVmmReturnNull 0
292 #define kVmmBogusContext 1
293 #define kVmmStopped 2
294 #define kVmmReturnDataPageFault 3
295 #define kVmmReturnInstrPageFault 4
296 #define kVmmReturnAlignmentFault 6
297 #define kVmmReturnProgramException 7
298 #define kVmmReturnSystemCall 12
299 #define kVmmReturnTraceException 13
300 #define kVmmAltivecAssist 22
301 #define kVmmInvalidAddress 0x1000
302 #define kVmmInvalidAdSpace 0x1001
303
304 /*
305 * Notes on guest address spaces.
306 *
307 * Address spaces are loosely coupled to virtual machines. The default is for
308 * a guest with an index of 1 to use address space 1, 2 to use 2, etc. However,
309 * any guest may be launched using any address space and any address space may be the
310 * target for a map or unmap function. Note that the (un)map list functions may pass in
311 * an address space ID on a page-by-page basis.
312 *
313 * An address space is instantiated either explicitly by mapping something into it, or
314 * implicitly by launching a guest with it.
315 *
316 * An address space is destroyed explicitly by kVmmTearDownAll or kVmmUnmapAllPages. It is
317 * destroyed implicitly by kVmmTearDownContext. The latter is done in order to remain
318 * backwards compatible with the previous implementation, which does not have decoupled
319 * guests and address spaces.
320 *
321 * An address space supports the maximum virtual address supported by the processor.
322 * The 64-bit variant of the mapping functions can be used on non-64-bit machines. If an
323 * unmappable address (e.g., an address larger than 4GB-1 on a 32-bit machine) is requested,
324 * the operation fails with a kVmmInvalidAddress return code.
325 *
326 * Note that for 64-bit calls, both host and guest are specified at 64-bit values.
327 *
328 */
329
330
331
332
333 /*
334 * Storage Extended Protection modes
335 * Notes:
336 * To keep compatibility, vmmKey and the PPC key have reversed meanings,
337 * i.e., vmmKey 0 is PPC key 1 and vice versa.
338 *
339 * vmmKey Notes
340 * Mode 0 1
341 *
342 * kVmmProtNARW not accessible read/write VM_PROT_NONE (not settable via VM calls)
343 * kVmmProtRORW read only read/write
344 * kVmmProtRWRW read/write read/write VM_PROT_WRITE or (VM_PROT_WRITE | VM_PROT_READ)
345 * kVmmProtRORO read only read only VM_PROT_READ
346
347 */
348
349 #define kVmmProtXtnd 0x00000008
350 #define kVmmProtNARW (kVmmProtXtnd | 0x00000000)
351 #define kVmmProtRORW (kVmmProtXtnd | 0x00000001)
352 #define kVmmProtRWRW (kVmmProtXtnd | 0x00000002)
353 #define kVmmProtRORO (kVmmProtXtnd | 0x00000003)
354
355 /*
356 * Map list formats
357 * The last 12 bits in the guest virtual address is used as flags as follows:
358 * 0x007 - for the map calls, this is the key to set
359 * 0x3F0 - for both map and unmap, this is the address space ID upon which to operate.
360 * Note that if 0, the address space ID from the function call is used instead.
361 */
362
363 typedef struct vmmMList {
364 unsigned int vmlva; /* Virtual address in host address space */
365 unsigned int vmlava; /* Virtual address in guest address space */
366 } vmmMList;
367
368 typedef struct vmmMList64 {
369 unsigned long long vmlva; /* Virtual address in host address space */
370 unsigned long long vmlava; /* Virtual address in guest address space */
371 } vmmMList64;
372
373 typedef struct vmmUMList {
374 unsigned int vmlava; /* Virtual address in guest address space */
375 } vmmUMList;
376
377 typedef struct vmmUMList64 {
378 unsigned long long vmlava; /* Virtual address in guest address space */
379 } vmmUMList64;
380
381 #define vmmlFlgs 0x00000FFF /* Flags passed in in vmlava low order 12 bits */
382 #define vmmlProt 0x00000007 /* Protection flags for the page */
383 #define vmmlAdID 0x000003F0 /* Guest address space ID - used only if non-zero */
384 #define vmmlRsvd 0x00000C08 /* Reserved for future */
385
386 /*************************************************************************************
387 Internal Emulation Types
388 **************************************************************************************/
389
390 #define kVmmMaxContexts 32
391 #define kVmmMaxUnmapPages 64
392 #define kVmmMaxMapPages 64
393
394 #pragma pack(4) /* Make sure the structure stays as we defined it */
395 typedef struct vmmCntrlEntry { /* Virtual Machine Monitor control table entry */
396 unsigned int vmmFlags; /* Assorted control flags */
397 #define vmmInUse 0x80000000
398 #define vmmInUseb 0
399 #define vmmFloatCngd 0x40000000
400 #define vmmFloatCngdb 1
401 #define vmmVectCngd 0x20000000
402 #define vmmVectCngdb 2
403 #define vmmTimerPop 0x10000000
404 #define vmmTimerPopb 3
405 #define vmmFAMmode 0x04000000
406 #define vmmFAMmodeb 5
407 #define vmmXStop 0x00800000
408 #define vmmXStopb 8
409 #define vmmSpfSave 0x000000FF
410 #define vmmSpfSaveb 24
411 unsigned int vmmXAFlgs; /* Extended Architecture flags */
412 vmm_state_page_t *vmmContextKern; /* Kernel address of context communications area */
413 ppnum_t vmmContextPhys; /* Physical address of context communications area */
414 vmm_state_page_t *vmmContextUser; /* User address of context communications area */
415 facility_context vmmFacCtx; /* Header for vector and floating point contexts */
416 pmap_t vmmPmap; /* Last dispatched pmap */
417 uint64_t vmmTimer; /* Last set timer value. Zero means unset */
418 unsigned int vmmFAMintercept; /* FAM intercepted exceptions */
419 } vmmCntrlEntry;
420 #pragma pack()
421
422 #pragma pack(4) /* Make sure the structure stays as we defined it */
423 typedef struct vmmCntrlTable { /* Virtual Machine Monitor Control table */
424 unsigned int vmmGFlags; /* Global flags */
425 #define vmmLastAdSp 0xFF /* Remember the address space that was mapped last */
426 addr64_t vmmLastMap; /* Last vaddr mapping made */
427 vmmCntrlEntry vmmc[kVmmMaxContexts]; /* One entry for each possible Virtual Machine Monitor context */
428 pmap_t vmmAdsp[kVmmMaxContexts]; /* Guest address space pmaps */
429 } vmmCntrlTable;
430 #pragma pack()
431
432 /* function decls for kernel level routines... */
433 extern void vmm_execute_vm(thread_act_t act, vmm_thread_index_t index);
434 extern vmmCntrlEntry *vmm_get_entry(thread_act_t act, vmm_thread_index_t index);
435 extern kern_return_t vmm_tear_down_context(thread_act_t act, vmm_thread_index_t index);
436 extern kern_return_t vmm_get_float_state(thread_act_t act, vmm_thread_index_t index);
437 extern kern_return_t vmm_get_vector_state(thread_act_t act, vmm_thread_index_t index);
438 extern kern_return_t vmm_set_timer(thread_act_t act, vmm_thread_index_t index, unsigned int timerhi, unsigned int timerlo);
439 extern kern_return_t vmm_get_timer(thread_act_t act, vmm_thread_index_t index);
440 extern void vmm_tear_down_all(thread_act_t act);
441 extern kern_return_t vmm_map_page(thread_act_t act, vmm_thread_index_t hindex, addr64_t cva,
442 addr64_t ava, vm_prot_t prot);
443 extern vmm_return_code_t vmm_map_execute(thread_act_t act, vmm_thread_index_t hindex, addr64_t cva,
444 addr64_t ava, vm_prot_t prot);
445 extern kern_return_t vmm_protect_page(thread_act_t act, vmm_thread_index_t hindex, addr64_t va,
446 vm_prot_t prot);
447 extern vmm_return_code_t vmm_protect_execute(thread_act_t act, vmm_thread_index_t hindex, addr64_t va,
448 vm_prot_t prot);
449 extern addr64_t vmm_get_page_mapping(thread_act_t act, vmm_thread_index_t index,
450 addr64_t va);
451 extern kern_return_t vmm_unmap_page(thread_act_t act, vmm_thread_index_t index, addr64_t va);
452 extern void vmm_unmap_all_pages(thread_act_t act, vmm_thread_index_t index);
453 extern boolean_t vmm_get_page_dirty_flag(thread_act_t act, vmm_thread_index_t index,
454 addr64_t va, unsigned int reset);
455 extern kern_return_t vmm_set_XA(thread_act_t act, vmm_thread_index_t index, unsigned int xaflags);
456 extern unsigned int vmm_get_XA(thread_act_t act, vmm_thread_index_t index);
457 extern int vmm_get_features(struct savearea *);
458 extern int vmm_get_version(struct savearea *);
459 extern int vmm_init_context(struct savearea *);
460 extern int vmm_dispatch(struct savearea *);
461 extern int vmm_exit(thread_act_t act, struct savearea *);
462 extern void vmm_force_exit(thread_act_t act, struct savearea *);
463 extern int vmm_stop_vm(struct savearea *save);
464 extern void vmm_timer_pop(thread_act_t act);
465 extern void vmm_interrupt(ReturnHandler *rh, thread_act_t act);
466 extern kern_return_t vmm_map_list(thread_act_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor);
467 extern kern_return_t vmm_unmap_list(thread_act_t act, vmm_thread_index_t index, unsigned int cnt, unsigned int flavor);
468 extern vmm_return_code_t vmm_resume_guest(vmm_thread_index_t index, unsigned long pc,
469 unsigned long vmmCntrl, unsigned long vmmCntrMaskl);
470 extern vmm_return_code_t vmm_exit_to_host(vmm_thread_index_t index);
471 extern unsigned long vmm_get_guest_register(vmm_thread_index_t index, unsigned long reg_index);
472 extern vmm_return_code_t vmm_set_guest_register(vmm_thread_index_t index, unsigned long reg_index, unsigned long reg_value);
473 extern addr64_t vmm_max_addr(thread_act_t act);
474
475 #endif
476