2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /*-----------------------------------------------------------------------
25 ** C routines that we are adding to the MacOS X kernel.
27 ** Wierd Apple PSL stuff goes here...
29 ** Until then, Copyright 2000, Connectix
31 -----------------------------------------------------------------------*/
33 #include <ppc/exception.h>
35 #ifndef _VEMULATION_H_
36 #define _VEMULATION_H_
38 /*************************************************************************************
39 External Emulation Types
40 **************************************************************************************/
42 typedef union vmm_vector_register_t
{
46 } vmm_vector_register_t
;
48 typedef union vmm_fp_register_t
{
55 typedef struct vmm_processor_state_t
{
60 unsigned long ppcGPRs
[32];
66 unsigned long ppcMQ
; /* Obsolete */
67 unsigned long ppcVRSave
;
69 vmm_vector_register_t ppcVSCR
;
70 vmm_fp_register_t ppcFPSCR
;
72 unsigned long ppcReserved1
[34]; /* Future processor state can go here */
74 /* We must be 16-byte aligned here */
76 vmm_vector_register_t ppcVRs
[32];
77 vmm_vector_register_t ppcVSCRshadow
;
79 /* We must be 8-byte aligned here */
81 vmm_fp_register_t ppcFPRs
[32];
82 vmm_fp_register_t ppcFPSCRshadow
;
83 unsigned long ppcReserved2
[2]; /* Pad out to multiple of 16 bytes */
84 } vmm_processor_state_t
;
86 typedef unsigned long vmm_return_code_t
;
88 typedef unsigned long vmm_thread_index_t
;
91 kVmmCurMajorVersion
= 0x0001,
92 kVmmCurMinorVersion
= 0x0005,
93 kVmmMinMajorVersion
= 0x0001,
95 #define kVmmCurrentVersion ((kVmmCurMajorVersion << 16) | kVmmCurMinorVersion)
97 typedef unsigned long vmm_features_t
;
99 kVmmFeature_LittleEndian
= 0x00000001,
100 kVmmFeature_Stop
= 0x00000002,
101 kVmmFeature_ExtendedMapping
= 0x00000004,
102 kVmmFeature_ListMapping
= 0x00000008,
103 kVmmFeature_FastAssist
= 0x00000010,
105 #define kVmmCurrentFeatures (kVmmFeature_LittleEndian | \
107 kVmmFeature_ExtendedMapping | \
108 kVmmFeature_ListMapping | \
109 kVmmFeature_FastAssist)
112 typedef unsigned long vmm_version_t
;
114 typedef struct vmm_fastassist_state_t
{
115 unsigned long fastassist_dispatch
;
116 unsigned long fastassist_refcon
;
118 unsigned long fastassist_dispatch_code
;
119 unsigned long fastassist_parameter
[5];
121 unsigned long guest_register
[8];
123 unsigned long guest_pc
;
124 unsigned long guest_msr
;
126 unsigned long fastassist_intercepts
;
127 unsigned long fastassist_reserved1
;
128 } vmm_fastassist_state_t
;
130 typedef struct vmm_state_page_t
{
131 /* This structure must remain below 4Kb (one page) in size */
132 vmm_version_t interface_version
;
133 vmm_thread_index_t thread_index
;
134 unsigned int vmmStat
; /* Note: this field is identical to vmmFlags in vmmCntrlEntry */
135 unsigned int vmmCntrl
;
136 #define vmmFloatLoad 0x80000000
137 #define vmmFloatLoadb 0
138 #define vmmVectLoad 0x40000000
139 #define vmmVectLoadb 1
140 #define vmmVectVRall 0x20000000
141 #define vmmVectVRallb 2
142 #define vmmVectVAss 0x10000000
143 #define vmmVectVAssb 3
144 #define vmmXStart 0x08000000
146 #define vmmKey 0x04000000
148 #define vmmFamEna 0x02000000
150 #define vmmFamSet 0x01000000
153 vmm_return_code_t return_code
;
154 unsigned long return_params
[4];
155 unsigned long gas
[7]; /* For alignment */
157 /* The next portion of the structure must remain 32-byte aligned */
158 vmm_processor_state_t vmm_proc_state
;
160 /* The next portion of the structure must remain 16-byte aligned */
161 vmm_fastassist_state_t vmm_fastassist_state
;
165 typedef struct vmm_comm_page_t
{
167 vmm_state_page_t vmcpState
; /* Reserve area for state */
168 unsigned int vmcpPad
[768]; /* Reserve space for 3/4 page state area */
170 unsigned int vmcpComm
[256]; /* Define last 1024 bytes as a communications area - function specific */
174 /* Function Indices (passed in r3) */
184 kVmmGetPageDirtyFlag
,
197 kvmmGetGuestRegister
,
198 kvmmSetGuestRegister
,
201 #define kVmmReturnNull 0
202 #define kVmmBogusContext 1
203 #define kVmmStopped 2
204 #define kVmmReturnDataPageFault 3
205 #define kVmmReturnInstrPageFault 4
206 #define kVmmReturnAlignmentFault 6
207 #define kVmmReturnProgramException 7
208 #define kVmmReturnSystemCall 12
209 #define kVmmReturnTraceException 13
210 #define kVmmAltivecAssist 22
211 #define kVmmInvalidAddress 4096
214 * Storage Extended Protection modes
216 * To keep compatibility, vmmKey and the PPC key have reversed meanings,
217 * i.e., vmmKey 0 is PPC key 1 and vice versa.
222 * kVmmProtNARW not accessible read/write VM_PROT_NONE (not settable via VM calls)
223 * kVmmProtRORW read only read/write
224 * kVmmProtRWRW read/write read/write VM_PROT_WRITE or (VM_PROT_WRITE | VM_PROT_READ)
225 * kVmmProtRORO read only read only VM_PROT_READ
229 #define kVmmProtXtnd 0x00000008
230 #define kVmmProtNARW (kVmmProtXtnd | 0x00000000)
231 #define kVmmProtRORW (kVmmProtXtnd | 0x00000001)
232 #define kVmmProtRWRW (kVmmProtXtnd | 0x00000002)
233 #define kVmmProtRORO (kVmmProtXtnd | 0x00000003)
239 typedef struct vmmMapList
{
240 unsigned int vmlva
; /* Virtual address in emulator address space */
241 unsigned int vmlava
; /* Virtual address in alternate address space */
242 #define vmlFlgs 0x00000FFF /* Flags passed in in vmlava low order 12 bits */
243 #define vmlProt 0x00000003 /* Protection flags for the page */
247 /*************************************************************************************
248 Internal Emulation Types
249 **************************************************************************************/
251 #define kVmmMaxContextsPerThread 32
252 #define kVmmMaxUnmapPages 64
253 #define kVmmMaxMapPages 64
255 typedef struct vmmCntrlEntry
{ /* Virtual Machine Monitor control table entry */
256 unsigned int vmmFlags
; /* Assorted control flags */
257 #define vmmInUse 0x80000000
259 #define vmmFloatCngd 0x40000000
260 #define vmmFloatCngdb 1
261 #define vmmVectCngd 0x20000000
262 #define vmmVectCngdb 2
263 #define vmmTimerPop 0x10000000
264 #define vmmTimerPopb 3
265 #define vmmMapDone 0x08000000
266 #define vmmMapDoneb 4
267 #define vmmFAMmode 0x04000000
268 #define vmmFAMmodeb 5
269 #define vmmXStop 0x00800000
271 #define vmmSpfSave 0x000000FF
272 #define vmmSpfSaveb 24
273 pmap_t vmmPmap
; /* pmap for alternate context's view of task memory */
274 vmm_state_page_t
*vmmContextKern
; /* Kernel address of context communications area */
275 vmm_state_page_t
*vmmContextPhys
; /* Physical address of context communications area */
276 vmm_state_page_t
*vmmContextUser
; /* User address of context communications area */
277 facility_context vmmFacCtx
; /* Header for vector and floating point contexts */
278 uint64_t vmmTimer
; /* Last set timer value. Zero means unset */
279 vm_offset_t vmmLastMap
; /* Last vaddr mapping into virtual machine */
280 unsigned int vmmFAMintercept
; /* FAM intercepted exceptions */
283 typedef struct vmmCntrlTable
{ /* Virtual Machine Monitor Control table */
284 vmmCntrlEntry vmmc
[kVmmMaxContextsPerThread
]; /* One entry for each possible Virtual Machine Monitor context */
287 /* function decls for kernel level routines... */
288 extern void vmm_execute_vm(thread_act_t act
, vmm_thread_index_t index
);
289 extern vmmCntrlEntry
*vmm_get_entry(thread_act_t act
, vmm_thread_index_t index
);
290 extern kern_return_t
vmm_tear_down_context(thread_act_t act
, vmm_thread_index_t index
);
291 extern kern_return_t
vmm_get_float_state(thread_act_t act
, vmm_thread_index_t index
);
292 extern kern_return_t
vmm_get_vector_state(thread_act_t act
, vmm_thread_index_t index
);
293 extern kern_return_t
vmm_set_timer(thread_act_t act
, vmm_thread_index_t index
, unsigned int timerhi
, unsigned int timerlo
);
294 extern kern_return_t
vmm_get_timer(thread_act_t act
, vmm_thread_index_t index
);
295 extern void vmm_tear_down_all(thread_act_t act
);
296 extern kern_return_t
vmm_map_page(thread_act_t act
, vmm_thread_index_t hindex
, vm_offset_t cva
,
297 vm_offset_t ava
, vm_prot_t prot
);
298 extern vmm_return_code_t
vmm_map_execute(thread_act_t act
, vmm_thread_index_t hindex
, vm_offset_t cva
,
299 vm_offset_t ava
, vm_prot_t prot
);
300 extern kern_return_t
vmm_protect_page(thread_act_t act
, vmm_thread_index_t hindex
, vm_offset_t va
,
302 extern vmm_return_code_t
vmm_protect_execute(thread_act_t act
, vmm_thread_index_t hindex
, vm_offset_t va
,
304 extern vm_offset_t
vmm_get_page_mapping(thread_act_t act
, vmm_thread_index_t index
,
306 extern kern_return_t
vmm_unmap_page(thread_act_t act
, vmm_thread_index_t index
, vm_offset_t va
);
307 extern void vmm_unmap_all_pages(thread_act_t act
, vmm_thread_index_t index
);
308 extern boolean_t
vmm_get_page_dirty_flag(thread_act_t act
, vmm_thread_index_t index
,
309 vm_offset_t va
, unsigned int reset
);
310 extern int vmm_get_features(struct savearea
*);
311 extern int vmm_get_version(struct savearea
*);
312 extern int vmm_init_context(struct savearea
*);
313 extern int vmm_dispatch(struct savearea
*);
314 extern int vmm_exit(thread_act_t act
, struct savearea
*);
315 extern void vmm_force_exit(thread_act_t act
, struct savearea
*);
316 extern int vmm_stop_vm(struct savearea
*save
);
317 extern void vmm_timer_pop(thread_act_t act
);
318 extern void vmm_interrupt(ReturnHandler
*rh
, thread_act_t act
);
319 extern kern_return_t
vmm_map_list(thread_act_t act
, vmm_thread_index_t index
, unsigned int cnt
);
320 extern kern_return_t
vmm_unmap_list(thread_act_t act
, vmm_thread_index_t index
, unsigned int cnt
);
321 extern vmm_return_code_t
vmm_resume_guest(vmm_thread_index_t index
, unsigned long pc
,
322 unsigned long vmmCntrl
, unsigned long vmmCntrMaskl
);
323 extern vmm_return_code_t
vmm_exit_to_host(vmm_thread_index_t index
);
324 extern unsigned long vmm_get_guest_register(vmm_thread_index_t index
, unsigned long reg_index
);
325 extern vmm_return_code_t
vmm_set_guest_register(vmm_thread_index_t index
, unsigned long reg_index
, unsigned long reg_value
);