]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | #include <assym.s> | |
26 | #include <debug.h> | |
27 | #include <ppc/asm.h> | |
28 | #include <ppc/proc_reg.h> | |
29 | #include <ppc/exception.h> | |
30 | ||
31 | /* | |
32 | * This file contains implementations for the Virtual Machine Monitor | |
33 | * facility. | |
34 | */ | |
35 | ||
36 | ||
37 | /* | |
38 | * int vmm_dispatch(savearea, act); | |
39 | ||
40 | * vmm_dispatch is a PPC only system call. It is used with a selector (first | |
41 | * parameter) to determine what function to enter. This is treated as an extension | |
42 | * of hw_exceptions. | |
43 | * | |
44 | * Inputs: | |
45 | * R4 = current activation | |
46 | * R16 = current thread | |
47 | * R30 = current savearea | |
48 | */ | |
49 | ||
de355530 | 50 | .align 5 /* Line up on cache line */ |
1c79356b A |
51 | .globl EXT(vmm_dispatch_table) |
52 | ||
53 | LEXT(vmm_dispatch_table) | |
54 | ||
55 | /* Don't change the order of these routines in the table. It's */ | |
56 | /* OK to add new routines, but they must be added at the bottom. */ | |
57 | ||
58 | .long EXT(vmm_get_version_sel) ; Get the version of the VMM interface | |
d7e50217 | 59 | .long 0 ; Not valid in Fam |
1c79356b | 60 | .long EXT(vmm_get_features_sel) ; Get the features of the VMM interface |
d7e50217 | 61 | .long 0 ; Not valid in Fam |
1c79356b | 62 | .long EXT(vmm_init_context_sel) ; Initializes a new VMM context |
d7e50217 | 63 | .long 0 ; Not valid in Fam |
1c79356b | 64 | .long EXT(vmm_tear_down_context) ; Tears down a previously-allocated VMM context |
d7e50217 | 65 | .long 0 ; Not valid in Fam |
1c79356b | 66 | .long EXT(vmm_tear_down_all) ; Tears down all VMMs |
d7e50217 | 67 | .long 0 ; Not valid in Fam |
de355530 | 68 | .long EXT(vmm_map_page) ; Maps a page from the main address space into the VM space |
d7e50217 | 69 | .long 1 ; Valid in Fam |
de355530 | 70 | .long EXT(vmm_get_page_mapping) ; Returns client va associated with VM va |
d7e50217 | 71 | .long 1 ; Valid in Fam |
de355530 | 72 | .long EXT(vmm_unmap_page) ; Unmaps a page from the VM space |
d7e50217 | 73 | .long 1 ; Valid in Fam |
de355530 | 74 | .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space |
d7e50217 | 75 | .long 1 ; Valid in Fam |
de355530 | 76 | .long EXT(vmm_get_page_dirty_flag) ; Gets the change bit for a page and optionally clears it |
d7e50217 | 77 | .long 1 ; Valid in Fam |
1c79356b | 78 | .long EXT(vmm_get_float_state) ; Gets current floating point state |
d7e50217 | 79 | .long 0 ; not valid in Fam |
1c79356b | 80 | .long EXT(vmm_get_vector_state) ; Gets current vector state |
d7e50217 | 81 | .long 0 ; Not valid in Fam |
1c79356b | 82 | .long EXT(vmm_set_timer) ; Sets a timer value |
d7e50217 | 83 | .long 1 ; Valid in Fam |
1c79356b | 84 | .long EXT(vmm_get_timer) ; Gets a timer value |
d7e50217 | 85 | .long 1 ; Valid in Fam |
1c79356b | 86 | .long EXT(switchIntoVM) ; Switches to the VM context |
d7e50217 | 87 | .long 1 ; Valid in Fam |
de355530 | 88 | .long EXT(vmm_protect_page) ; Sets protection values for a page |
d7e50217 | 89 | .long 1 ; Valid in Fam |
de355530 | 90 | .long EXT(vmm_map_execute) ; Maps a page an launches VM |
d7e50217 | 91 | .long 1 ; Not valid in Fam |
de355530 | 92 | .long EXT(vmm_protect_execute) ; Sets protection values for a page and launches VM |
d7e50217 | 93 | .long 1 ; Valid in Fam |
de355530 | 94 | .long EXT(vmm_map_list) ; Maps a list of pages |
d7e50217 | 95 | .long 1 ; Valid in Fam |
de355530 | 96 | .long EXT(vmm_unmap_list) ; Unmaps a list of pages |
d7e50217 A |
97 | .long 1 ; Valid in Fam |
98 | .long EXT(vmm_fam_reserved) ; exit from Fam to host | |
99 | .long 1 ; Valid in Fam | |
100 | .long EXT(vmm_fam_reserved) ; resume guest from Fam | |
101 | .long 1 ; Valid in Fam | |
102 | .long EXT(vmm_fam_reserved) ; get guest register from Fam | |
103 | .long 1 ; Valid in Fam | |
104 | .long EXT(vmm_fam_reserved) ; Set guest register from Fam | |
105 | .long 1 ; Valid in Fam | |
d7e50217 A |
106 | |
107 | .set vmm_count,(.-EXT(vmm_dispatch_table))/8 ; Get the top number | |
1c79356b A |
108 | |
109 | ||
110 | .align 5 | |
111 | .globl EXT(vmm_dispatch) | |
112 | ||
113 | LEXT(vmm_dispatch) | |
114 | ||
de355530 | 115 | lwz r11,saver3(r30) ; Get the selector |
1c79356b A |
116 | mr r3,r4 ; All of our functions want the activation as the first parm |
117 | lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table | |
118 | cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now? | |
119 | cmplwi cr1,r11,vmm_count ; See if we have a valid selector | |
120 | ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table | |
de355530 | 121 | lwz r4,saver4(r30) ; Get 1st parameter after selector |
1c79356b | 122 | beq+ EXT(switchIntoVM) ; Yes, go switch to it.... |
d7e50217 | 123 | rlwinm r11,r11,3,0,28 ; Index into table |
de355530 | 124 | bgt- cr1,vmmBogus ; It is a bogus entry |
d7e50217 A |
125 | add r12,r10,r11 ; Get the vmm dispatch syscall entry |
126 | mfsprg r10,0 ; Get the per_proc | |
127 | lwz r13,0(r12) ; Get address of routine | |
128 | lwz r12,4(r12) ; Get validity flag | |
129 | lwz r5,spcFlags(r10) ; Get per_proc special flags | |
130 | cmpwi cr1,r12,0 ; Check Fam valid | |
131 | rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit | |
132 | crand cr0_eq,cr1_eq,cr0_gt ; In Fam and Invalid syscall | |
133 | beq vmmBogus ; Intercept to host | |
de355530 A |
134 | lwz r5,saver5(r30) ; Get 2nd parameter after selector |
135 | lwz r6,saver6(r30) ; Get 3rd parameter after selector | |
d7e50217 | 136 | mtlr r13 ; Set the routine address |
de355530 | 137 | lwz r7,saver7(r30) ; Get 4th parameter after selector |
d7e50217 | 138 | ; |
de355530 A |
139 | ; NOTE: currently the most paramters for any call is 4. We will support at most 8 because we |
140 | ; do not want to get into any stack based parms. However, here is where we need to add | |
141 | ; code for the 5th - 8th parms if we need them. | |
1c79356b A |
142 | ; |
143 | ||
144 | blrl ; Call function | |
de355530 A |
145 | |
146 | stw r3,saver3(r30) ; Pass back the return code | |
1c79356b A |
147 | li r3,1 ; Set normal return with check for AST |
148 | b EXT(ppcscret) ; Go back to handler... | |
149 | ||
d7e50217 A |
150 | vmmBogus: |
151 | mfsprg r10,0 ; Get the per_proc | |
152 | mfsprg r3,1 ; Load current activation | |
153 | lwz r5,spcFlags(r10) ; Get per_proc special flags | |
154 | rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit | |
155 | bne vmmexitcall ; Do it to it | |
156 | li r3,0 ; Bogus selector, treat like a bogus system call | |
1c79356b A |
157 | b EXT(ppcscret) ; Go back to handler... |
158 | ||
159 | ||
160 | .align 5 | |
161 | .globl EXT(vmm_get_version_sel) | |
162 | ||
163 | LEXT(vmm_get_version_sel) ; Selector based version of get version | |
164 | ||
165 | lis r3,hi16(EXT(vmm_get_version)) | |
166 | ori r3,r3,lo16(EXT(vmm_get_version)) | |
167 | b selcomm | |
168 | ||
169 | ||
170 | .align 5 | |
171 | .globl EXT(vmm_get_features_sel) | |
172 | ||
173 | LEXT(vmm_get_features_sel) ; Selector based version of get features | |
174 | ||
0b4e3aa0 A |
175 | lis r3,hi16(EXT(vmm_get_features)) |
176 | ori r3,r3,lo16(EXT(vmm_get_features)) | |
1c79356b A |
177 | b selcomm |
178 | ||
179 | ||
180 | .align 5 | |
181 | .globl EXT(vmm_init_context_sel) | |
182 | ||
183 | LEXT(vmm_init_context_sel) ; Selector based version of init context | |
184 | ||
de355530 A |
185 | lwz r4,saver4(r30) ; Get the passed in version |
186 | lwz r5,saver5(r30) ; Get the passed in comm area | |
0b4e3aa0 | 187 | lis r3,hi16(EXT(vmm_init_context)) |
de355530 | 188 | stw r4,saver3(r30) ; Cheat and move this parameter over |
0b4e3aa0 | 189 | ori r3,r3,lo16(EXT(vmm_init_context)) |
de355530 | 190 | stw r5,saver4(r30) ; Cheat and move this parameter over |
1c79356b A |
191 | |
192 | selcomm: mtlr r3 ; Set the real routine address | |
193 | mr r3,r30 ; Pass in the savearea | |
194 | blrl ; Call the function | |
195 | b EXT(ppcscret) ; Go back to handler... | |
196 | ||
197 | /* | |
198 | * Here is where we transition to the virtual machine. | |
199 | * | |
200 | * We will swap the register context in the savearea with that which is saved in our shared | |
201 | * context area. We will validity check a bit and clear any nasty bits in the MSR and force | |
202 | * the manditory ones on. | |
203 | * | |
204 | * Then we will setup the new address space to run with, and anything else that is normally part | |
205 | * of a context switch. | |
206 | * | |
0b4e3aa0 A |
207 | * The vmm_execute_vm entry point is for the fused vmm_map_execute and vmm_protect_execute |
208 | * calls. This is called, but never returned from. We always go directly back to the | |
209 | * user from here. | |
210 | * | |
de355530 A |
211 | * Still need to figure out final floats and vectors. For now, we will go brute |
212 | * force and when we go into the VM, we will force save any normal floats and | |
213 | * vectors. Then we will hide them and swap the VM copy (if any) into the normal | |
214 | * chain. When we exit VM we will do the opposite. This is not as fast as I would | |
215 | * like it to be. | |
216 | * | |
1c79356b A |
217 | * |
218 | */ | |
219 | ||
0b4e3aa0 A |
220 | |
221 | .align 5 | |
222 | .globl EXT(vmm_execute_vm) | |
223 | ||
224 | LEXT(vmm_execute_vm) | |
0b4e3aa0 A |
225 | lwz r30,ACT_MACT_PCB(r3) ; Restore the savearea pointer because it could be trash here |
226 | b EXT(switchIntoVM) ; Join common... | |
227 | ||
228 | ||
1c79356b A |
229 | .align 5 |
230 | .globl EXT(switchIntoVM) | |
231 | ||
232 | LEXT(switchIntoVM) | |
d7e50217 | 233 | mfsprg r10,0 ; Get the per_proc |
de355530 A |
234 | lwz r15,spcFlags(r10) ; Get per_proc special flags |
235 | rlwinm. r0,r15,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit | |
236 | bne vmmFamGuestResume | |
237 | lwz r5,vmmControl(r3) ; Pick up the control table address | |
1c79356b | 238 | subi r4,r4,1 ; Switch to zero offset |
de355530 | 239 | rlwinm. r2,r5,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we |
1c79356b | 240 | ; do not try this while we are transitioning off to on |
de355530 | 241 | cmplwi cr1,r4,kVmmMaxContextsPerThread ; Is the index valid? |
1c79356b A |
242 | beq- vmmBogus ; Not started, treat like a bogus system call |
243 | mulli r2,r4,vmmCEntrySize ; Get displacement from index | |
de355530 A |
244 | bgt- cr1,swvmmBogus ; Index is bogus... |
245 | add r2,r2,r5 ; Point to the entry | |
246 | ||
1c79356b A |
247 | lwz r4,vmmFlags(r2) ; Get the flags for the selected entry |
248 | lwz r5,vmmContextKern(r2) ; Get the context area address | |
249 | rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use | |
de355530 A |
250 | bne+ swvmChkIntcpt ; We are so cool. Go do check for immediate intercepts... |
251 | ||
252 | swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return | |
253 | li r3,1 ; Set normal return with check for AST | |
254 | stw r2,saver3(r30) ; Pass back the return code | |
255 | b EXT(ppcscret) ; Go back to handler... | |
256 | ||
1c79356b A |
257 | ; |
258 | ; Here we check for any immediate intercepts. So far, the only | |
0b4e3aa0 A |
259 | ; two of these are a timer pop and and external stop. We will not dispatch if |
260 | ; either is true. They need to either reset the timer (i.e. set timer | |
261 | ; to 0) or to set a future time, or if it is external stop, set the vmmXStopRst flag. | |
1c79356b A |
262 | ; |
263 | ||
de355530 | 264 | swvmChkIntcpt: |
0b4e3aa0 A |
265 | lwz r6,vmmCntrl(r5) ; Get the control field |
266 | rlwinm. r7,r6,0,vmmXStartb,vmmXStartb ; Clear all but start bit | |
267 | beq+ swvmChkStop ; Do not reset stop | |
268 | andc r6,r6,r7 ; Clear it | |
269 | li r8,vmmFlags ; Point to the flags | |
270 | stw r6,vmmCntrl(r5) ; Set the control field | |
271 | ||
272 | swvmtryx: lwarx r4,r8,r2 ; Pick up the flags | |
273 | rlwinm r4,r4,0,vmmXStopb+1,vmmXStopb-1 ; Clear the stop bit | |
274 | stwcx. r4,r8,r2 ; Save the updated field | |
275 | bne- swvmtryx ; Try again... | |
276 | ||
277 | swvmChkStop: | |
278 | rlwinm. r26,r4,0,vmmXStopb,vmmXStopb ; Is this VM stopped? | |
de355530 A |
279 | beq+ swvmNoStop ; Nope... |
280 | ||
281 | li r2,kVmmStopped ; Set stopped return | |
282 | li r3,1 ; Set normal return with check for AST | |
283 | stw r2,saver3(r30) ; Pass back the return code | |
284 | stw r2,return_code(r5) ; Save the exit code | |
285 | b EXT(ppcscret) ; Go back to handler... | |
0b4e3aa0 | 286 | |
de355530 | 287 | swvmNoStop: |
9bccf70c | 288 | rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer go pop? |
de355530 A |
289 | beq+ swvmDoSwitch ; No... |
290 | ||
291 | li r2,kVmmReturnNull ; Set null return | |
292 | li r3,1 ; Set normal return with check for AST | |
293 | stw r2,saver3(r30) ; Pass back the return code | |
294 | stw r2,return_code(r5) ; Save the exit code | |
295 | b EXT(ppcscret) ; Go back to handler... | |
1c79356b A |
296 | |
297 | ; | |
298 | ; Here is where we actually swap into the VM (alternate) context. | |
299 | ; We will bulk do a wholesale swap of the registers in the context area (the VMs) | |
300 | ; with the ones in the savearea (our main code). During the copy, we will fix up the | |
301 | ; MSR, forcing on a few bits and turning off a few others. Then we will deal with the | |
302 | ; PMAP and other per_proc stuff. Finally, we will exit back through the main exception | |
303 | ; handler to deal with unstacking saveareas and ASTs, etc. | |
304 | ; | |
305 | ||
306 | swvmDoSwitch: | |
307 | ||
308 | ; | |
309 | ; First, we save the volatile registers we care about. Remember, all register | |
310 | ; handling here is pretty funky anyway, so we just pick the ones that are ok. | |
311 | ; | |
312 | mr r26,r3 ; Save the activation pointer | |
1c79356b | 313 | |
9bccf70c A |
314 | la r11,vmmFacCtx(r2) ; Point to the virtual machine facility context |
315 | mr r27,r2 ; Save the context entry | |
316 | stw r11,deferctx(r3) ; Start using the virtual machine facility context when we exit | |
1c79356b A |
317 | |
318 | lwz r11,ACT_MACT_SPF(r26) ; Get the special flags | |
de355530 | 319 | lwz r3,vmmPmap(r27) ; Get the pointer to the PMAP |
1c79356b A |
320 | oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now |
321 | bl EXT(hw_set_user_space_dis) ; Swap the address spaces | |
322 | lwz r17,vmmFlags(r27) ; Get the status flags | |
d7e50217 A |
323 | lwz r20,vmmContextKern(r27) ; Get the state page kernel addr |
324 | lwz r21,vmmCntrl(r20) ; Get vmmCntrl | |
325 | rlwinm. r22,r21,0,vmmFamEnab,vmmFamEnab ; Is vmmFamEnab set? | |
d7e50217 | 326 | beq swvmNoFam ; No Fam intercept |
d7e50217 A |
327 | rlwimi r15,r21,32+vmmFamSetb-FamVMmodebit,FamVMmodebit,FamVMmodebit ; Set FamVMmode bit |
328 | rlwinm r21,r21,0,vmmFamSetb+1,vmmFamSetb-1 ; Clear FamSet bit | |
d7e50217 | 329 | lwz r22,famintercepts(r20) ; Load intercept bit field |
d7e50217 A |
330 | stw r21,vmmCntrl(r20) ; Update vmmCntrl |
331 | lwz r19,vmmContextPhys(r27) ; Get vmmFAMarea address | |
332 | stw r22,vmmFAMintercept(r27) ; Get vmmFAMintercept | |
333 | stw r22,FAMintercept(r10) ; Store vmmFAMintercept in per_proc FAMintercept | |
de355530 | 334 | stw r19,vmmContextPhys(r27) ; Store vmmContextPhys |
d7e50217 A |
335 | stw r19,VMMareaPhys(r10) ; Store VMMareaPhys |
336 | oris r15,r15,hi16(FamVMena) ; Set FamVMenabit | |
337 | swvmNoFam: | |
de355530 | 338 | rlwinm. r0,r17,0,vmmMapDoneb,vmmMapDoneb ; Did we just do a map function? |
1c79356b | 339 | stw r27,vmmCEntry(r26) ; Remember what context we are running |
de355530 A |
340 | andc r17,r17,r0 ; Turn off map flag |
341 | beq+ swvmNoMap ; No mapping done... | |
1c79356b A |
342 | |
343 | ; | |
344 | ; This little bit of hoopala here (triggered by vmmMapDone) is | |
345 | ; a performance enhancement. This will change the returning savearea | |
346 | ; to look like we had a DSI rather than a system call. Then, setting | |
347 | ; the redrive bit, the exception handler will redrive the exception as | |
348 | ; a DSI, entering the last mapped address into the hash table. This keeps | |
349 | ; double faults from happening. Note that there is only a gain if the VM | |
350 | ; takes a fault, then the emulator resolves it only, and then begins | |
351 | ; the VM execution again. It seems like this should be the normal case. | |
352 | ; | |
353 | ||
354 | lwz r3,SAVflags(r30) ; Pick up the savearea flags | |
de355530 | 355 | lwz r2,vmmLastMap(r27) ; Get the last mapped address |
1c79356b A |
356 | li r20,T_DATA_ACCESS ; Change to DSI fault |
357 | oris r3,r3,hi16(SAVredrive) ; Set exception redrive | |
358 | stw r2,savedar(r30) ; Set the DAR to the last thing we mapped | |
359 | stw r3,SAVflags(r30) ; Turn on the redrive request | |
360 | lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss | |
361 | stw r20,saveexception(r30) ; Say we need to emulate a DSI | |
362 | stw r2,savedsisr(r30) ; Pretend we have a PTE miss | |
363 | ||
0b4e3aa0 A |
364 | swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area |
365 | rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits | |
366 | lwz r20,vmmCntrl(r20) ; Get the control flags | |
1c79356b | 367 | rlwimi r17,r11,8,24,31 ; Save the old spf flags |
0b4e3aa0 | 368 | rlwimi r15,r20,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key |
1c79356b A |
369 | stw r15,spcFlags(r10) ; Set per_proc copy of the special flags |
370 | stw r15,ACT_MACT_SPF(r26) ; Get the special flags | |
371 | ||
372 | stw r17,vmmFlags(r27) ; Set the status flags | |
373 | ||
374 | bl swapCtxt ; First, swap the general register state | |
375 | ||
0b4e3aa0 | 376 | lwz r17,vmmContextKern(r27) ; Get the comm area back |
9bccf70c | 377 | la r25,vmmFacCtx(r27) ; Point to the facility context |
0b4e3aa0 | 378 | lwz r15,vmmCntrl(r17) ; Get the control flags again |
9bccf70c | 379 | mfsprg r29,0 ; Get the per_proc |
1c79356b | 380 | |
9bccf70c A |
381 | ; |
382 | ; Check if there is new floating point context to load | |
383 | ; | |
384 | ||
1c79356b | 385 | rlwinm. r0,r15,0,vmmFloatLoadb,vmmFloatLoadb ; Are there new floating point values? |
9bccf70c | 386 | lhz r29,PP_CPU_NUMBER(r29) ; Get our cpu number |
1c79356b A |
387 | li r14,vmmppcFPRs ; Get displacement to the new values |
388 | andc r15,r15,r0 ; Clear the bit | |
389 | beq+ swvmNoNewFloats ; Nope, good... | |
390 | ||
9bccf70c A |
391 | lwz r19,FPUcpu(r25) ; Get the last CPU we ran on |
392 | ||
393 | stw r29,FPUcpu(r25) ; Claim the context for ourselves | |
394 | ||
395 | eieio ; Make sure this stays in order | |
396 | ||
397 | lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc | |
398 | mulli r19,r19,ppSize ; Find offset to the owner per_proc | |
399 | ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc | |
400 | li r16,FPUowner ; Displacement to float owner | |
401 | add r19,r18,r19 ; Point to the owner per_proc | |
de355530 | 402 | li r0,0 ; Clear this out |
9bccf70c A |
403 | |
404 | swvminvfpu: lwarx r18,r16,r19 ; Get the owner | |
de355530 A |
405 | cmplw r18,r25 ; Does he still have this context? |
406 | bne swvminvfpv ; Nope... | |
407 | stwcx. r0,r16,r19 ; Try to invalidate it | |
408 | bne- swvminvfpu ; Try again if there was a collision... | |
409 | ||
410 | swvminvfpv: lwz r3,FPUsave(r25) ; Get the FPU savearea | |
9bccf70c | 411 | dcbt r14,r17 ; Touch in first line of new stuff |
1c79356b A |
412 | mr. r3,r3 ; Is there one? |
413 | bne+ swvmGotFloat ; Yes... | |
414 | ||
415 | bl EXT(save_get) ; Get a savearea | |
416 | ||
9bccf70c A |
417 | li r7,SAVfloat ; Get floating point flag |
418 | stw r26,SAVact(r3) ; Save our activation | |
419 | li r0,0 ; Get a zero | |
420 | stb r7,SAVflags+2(r3) ; Set that this is floating point | |
de355530 | 421 | stw r0,SAVprev(r3) ; Clear the back chain |
9bccf70c A |
422 | stw r0,SAVlevel(r3) ; We are always at level 0 (user state) |
423 | ||
424 | stw r3,FPUsave(r25) ; Chain us to context | |
1c79356b A |
425 | |
426 | swvmGotFloat: | |
1c79356b A |
427 | la r4,savefp0(r3) ; Point to the destination |
428 | mr r21,r3 ; Save the save area | |
429 | la r3,vmmppcFPRs(r17) ; Point to the source | |
9bccf70c | 430 | li r5,32*8 ; Get the size (32 FPRs at 8 bytes each) |
1c79356b A |
431 | |
432 | bl EXT(bcopy) ; Copy the new values | |
9bccf70c | 433 | |
de355530 A |
434 | lwz r14,vmmppcFPSCRshadow(r17) ; Get the fpscr pad |
435 | lwz r10,vmmppcFPSCRshadow+4(r17) ; Get the fpscr | |
436 | stw r14,savefpscrpad(r30) ; Save the new fpscr pad | |
437 | stw r10,savefpscr(r30) ; Save the new fpscr | |
438 | ||
1c79356b A |
439 | lwz r11,ACT_MACT_SPF(r26) ; Get the special flags |
440 | stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad | |
441 | rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here | |
442 | lwz r14,vmmStat(r17) ; Get the status flags | |
443 | mfsprg r10,0 ; Get the per_proc | |
444 | stw r11,ACT_MACT_SPF(r26) ; Get the special flags | |
445 | rlwinm r14,r14,0,vmmFloatCngdb+1,vmmFloatCngdb-1 ; Clear the changed flag | |
446 | stw r11,spcFlags(r10) ; Set per_proc copy of the special flags | |
447 | stw r14,vmmStat(r17) ; Set the status flags sans vmmFloatCngd | |
1c79356b | 448 | |
9bccf70c A |
449 | ; |
450 | ; Check if there is new vector context to load | |
451 | ; | |
452 | ||
1c79356b A |
453 | swvmNoNewFloats: |
454 | rlwinm. r0,r15,0,vmmVectLoadb,vmmVectLoadb ; Are there new vector values? | |
455 | li r14,vmmppcVRs ; Get displacement to the new values | |
456 | andc r15,r15,r0 ; Clear the bit | |
457 | beq+ swvmNoNewVects ; Nope, good... | |
458 | ||
9bccf70c A |
459 | lwz r19,VMXcpu(r25) ; Get the last CPU we ran on |
460 | ||
461 | stw r29,VMXcpu(r25) ; Claim the context for ourselves | |
462 | ||
463 | eieio ; Make sure this stays in order | |
464 | ||
465 | lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc | |
466 | mulli r19,r19,ppSize ; Find offset to the owner per_proc | |
467 | ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc | |
468 | li r16,VMXowner ; Displacement to vector owner | |
469 | add r19,r18,r19 ; Point to the owner per_proc | |
de355530 | 470 | li r0,0 ; Clear this out |
9bccf70c A |
471 | |
472 | swvminvvec: lwarx r18,r16,r19 ; Get the owner | |
de355530 A |
473 | cmplw r18,r25 ; Does he still have this context? |
474 | bne swvminvved ; Nope... | |
475 | stwcx. r0,r16,r19 ; Try to invalidate it | |
476 | bne- swvminvvec ; Try again if there was a collision... | |
9bccf70c A |
477 | |
478 | swvminvved: lwz r3,VMXsave(r25) ; Get the vector savearea | |
479 | dcbt r14,r17 ; Touch in first line of new stuff | |
1c79356b A |
480 | mr. r3,r3 ; Is there one? |
481 | bne+ swvmGotVect ; Yes... | |
482 | ||
483 | bl EXT(save_get) ; Get a savearea | |
484 | ||
9bccf70c A |
485 | li r7,SAVvector ; Get the vector type flag |
486 | stw r26,SAVact(r3) ; Save our activation | |
487 | li r0,0 ; Get a zero | |
488 | stb r7,SAVflags+2(r3) ; Set that this is vector | |
de355530 | 489 | stw r0,SAVprev(r3) ; Clear the back chain |
9bccf70c A |
490 | stw r0,SAVlevel(r3) ; We are always at level 0 (user state) |
491 | ||
492 | stw r3,VMXsave(r25) ; Chain us to context | |
1c79356b A |
493 | |
494 | swvmGotVect: | |
1c79356b A |
495 | mr r21,r3 ; Save the pointer to the savearea |
496 | la r4,savevr0(r3) ; Point to the destination | |
497 | la r3,vmmppcVRs(r17) ; Point to the source | |
9bccf70c | 498 | li r5,32*16 ; Get the size (32 vectors at 16 bytes each) |
1c79356b A |
499 | |
500 | bl EXT(bcopy) ; Copy the new values | |
501 | ||
de355530 A |
502 | lwz r11,vmmppcVSCRshadow+0(r17) ; Get the VSCR |
503 | lwz r14,vmmppcVSCRshadow+4(r17) ; Get the VSCR | |
504 | lwz r10,vmmppcVSCRshadow+8(r17) ; Get the VSCR | |
505 | lwz r9,vmmppcVSCRshadow+12(r17) ; Get the VSCR | |
9bccf70c A |
506 | lwz r8,savevrsave(r30) ; Get the current VRSave |
507 | ||
de355530 A |
508 | stw r11,savevscr+0(r30) ; Set the VSCR |
509 | stw r14,savevscr+4(r30) ; Set the VSCR | |
510 | stw r10,savevscr+8(r30) ; Set the VSCR | |
511 | stw r9,savevscr+12(r30) ; Set the VSCR | |
512 | stw r8,savevrvalid(r21) ; Set the current VRSave as valid saved | |
513 | ||
1c79356b A |
514 | lwz r11,ACT_MACT_SPF(r26) ; Get the special flags |
515 | stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad | |
516 | rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here | |
517 | lwz r14,vmmStat(r17) ; Get the status flags | |
518 | mfsprg r10,0 ; Get the per_proc | |
519 | stw r11,ACT_MACT_SPF(r26) ; Get the special flags | |
520 | rlwinm r14,r14,0,vmmVectCngdb+1,vmmVectCngdb-1 ; Clear the changed flag | |
1c79356b A |
521 | stw r11,spcFlags(r10) ; Set per_proc copy of the special flags |
522 | stw r14,vmmStat(r17) ; Set the status flags sans vmmVectCngd | |
1c79356b A |
523 | |
524 | swvmNoNewVects: | |
525 | li r3,1 ; Show normal exit with check for AST | |
9bccf70c | 526 | lwz r16,ACT_THREAD(r26) ; Restore the thread pointer |
1c79356b A |
527 | b EXT(ppcscret) ; Go back to handler... |
528 | ||
d7e50217 A |
529 | |
530 | ; | |
531 | ; These syscalls are invalid, FAM syscall fast path | |
532 | ; | |
533 | ||
534 | .align 5 | |
535 | .globl EXT(vmm_fam_reserved) | |
536 | ||
537 | LEXT(vmm_fam_reserved) | |
538 | li r3,0 ; Force exception | |
539 | b EXT(ppcscret) ; Go back to handler... | |
1c79356b | 540 | |
1c79356b A |
541 | ; |
542 | ; Here is where we exit from vmm mode. We do this on any kind of exception. | |
543 | ; Interruptions (decrementer, external, etc.) are another story though. | |
544 | ; These we just pass through. We also switch back explicity when requested. | |
545 | ; This will happen in response to a timer pop and some kinds of ASTs. | |
546 | ; | |
547 | ; Inputs: | |
548 | ; R3 = activation | |
549 | ; R4 = savearea | |
550 | ; | |
551 | ||
552 | .align 5 | |
553 | .globl EXT(vmm_exit) | |
554 | ||
555 | LEXT(vmm_exit) | |
556 | ||
d7e50217 | 557 | vmmexitcall: |
1c79356b A |
558 | lwz r2,vmmCEntry(r3) ; Get the context that is active |
559 | lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy | |
560 | lwz r11,ACT_MACT_SPF(r3) ; Get the special flags | |
561 | lwz r19,vmmFlags(r2) ; Get the status flags | |
562 | mr r16,r3 ; R16 is safe to use for the activation address | |
563 | ||
564 | rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits | |
565 | li r0,0 ; Get a zero | |
566 | rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf | |
567 | lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation | |
568 | rlwinm r11,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag | |
569 | stw r0,vmmCEntry(r16) ; Clear pointer to active context | |
570 | stw r19,vmmFlags(r2) ; Set the status flags | |
0b4e3aa0 | 571 | rlwinm r11,r11,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key |
1c79356b | 572 | mfsprg r10,0 ; Get the per_proc block |
d7e50217 A |
573 | rlwinm r11,r11,0,FamVMenabit+1,FamVMenabit-1 ; Clear FamVMEnable |
574 | lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags | |
575 | lwz r5,vmmContextKern(r2) ; Get the state page kernel addr | |
576 | rlwinm r11,r11,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMMode | |
577 | lwz r6,vmmCntrl(r5) ; Get the control field | |
578 | rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits | |
579 | rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits | |
580 | rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits | |
1c79356b | 581 | stw r11,ACT_MACT_SPF(r16) ; Get the special flags |
d7e50217 | 582 | stw r6,vmmCntrl(r5) ; Store the control field |
1c79356b A |
583 | stw r11,spcFlags(r10) ; Set per_proc copy of the special flags |
584 | ||
585 | mr r26,r16 ; Save the activation pointer | |
586 | mr r27,r2 ; Save the context entry | |
587 | ||
588 | bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator | |
589 | ||
9bccf70c | 590 | la r5,facctx(r16) ; Point to the main facility context |
1c79356b | 591 | mr r2,r27 ; Restore |
9bccf70c A |
592 | stw r5,deferctx(r16) ; Start using the main facility context on the way out |
593 | lwz r5,vmmContextKern(r27) ; Get the context area address | |
1c79356b A |
594 | mr r3,r16 ; Restore activation address |
595 | stw r19,vmmStat(r5) ; Save the changed and popped flags | |
596 | bl swapCtxt ; Exchange the VM context for the emulator one | |
de355530 | 597 | stw r8,saver3(r30) ; Set the return code as the return value also |
1c79356b A |
598 | b EXT(retFromVM) ; Go back to handler... |
599 | ||
600 | ||
601 | ; | |
602 | ; Here is where we force exit from vmm mode. We do this when as | |
603 | ; part of termination and is used to insure that we are not executing | |
604 | ; in an alternate context. Because this is called from C we need to save | |
605 | ; all non-volatile registers. | |
606 | ; | |
607 | ; Inputs: | |
608 | ; R3 = activation | |
609 | ; R4 = user savearea | |
610 | ; Interruptions disabled | |
611 | ; | |
612 | ||
613 | .align 5 | |
614 | .globl EXT(vmm_force_exit) | |
615 | ||
616 | LEXT(vmm_force_exit) | |
617 | ||
618 | stwu r1,-(FM_ALIGN(20*4)+FM_SIZE)(r1) ; Get enough space for the registers | |
619 | mflr r0 ; Save the return | |
620 | stmw r13,FM_ARG0(r1) ; Save all non-volatile registers | |
621 | stw r0,(FM_ALIGN(20*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
622 | ||
623 | lwz r2,vmmCEntry(r3) ; Get the context that is active | |
624 | lwz r11,ACT_MACT_SPF(r3) ; Get the special flags | |
625 | lwz r19,vmmFlags(r2) ; Get the status flags | |
626 | lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy | |
627 | ||
628 | rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits | |
629 | mr r26,r3 ; Save the activation pointer | |
630 | rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf | |
631 | li r0,0 ; Get a zero | |
632 | rlwinm r9,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag | |
633 | cmplw r9,r11 ; Check if we were in a vm | |
634 | lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation | |
635 | beq- vfeNotRun ; We were not in a vm.... | |
0b4e3aa0 | 636 | rlwinm r9,r9,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key |
1c79356b A |
637 | stw r0,vmmCEntry(r26) ; Clear pointer to active context |
638 | mfsprg r10,0 ; Get the per_proc block | |
d7e50217 A |
639 | lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags |
640 | rlwinm r9,r9,0,FamVMenabit+1,FamVMenabit-1 ; Clear Fam Enable | |
641 | rlwinm r9,r9,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear Fam Enable | |
642 | lwz r5,vmmContextKern(r2) ; Get the context area address | |
643 | lwz r6,vmmCntrl(r5) ; Get the control field | |
644 | rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits | |
645 | rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits | |
646 | rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits | |
647 | stw r6,vmmCntrl(r5) ; Store the control field | |
1c79356b A |
648 | stw r9,ACT_MACT_SPF(r26) ; Get the special flags |
649 | stw r9,spcFlags(r10) ; Set per_proc copy of the special flags | |
650 | ||
651 | mr r27,r2 ; Save the context entry | |
652 | mr r30,r4 ; Save the savearea | |
653 | ||
654 | bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator | |
655 | ||
9bccf70c | 656 | la r7,facctx(r26) ; Point to the main facility context |
1c79356b A |
657 | |
658 | lwz r5,vmmContextKern(r27) ; Get the context area address | |
659 | stw r19,vmmStat(r5) ; Save the changed and popped flags | |
9bccf70c A |
660 | stw r7,deferctx(r26) ; Tell context launcher to switch facility context |
661 | ||
1c79356b A |
662 | bl swapCtxt ; Exchange the VM context for the emulator one |
663 | ||
0b4e3aa0 | 664 | lwz r8,saveexception(r30) ; Pick up the exception code |
9bccf70c A |
665 | lwz r7,SAVflags(r30) ; Pick up the savearea flags |
666 | lis r9,hi16(SAVredrive) ; Get exception redrive bit | |
0b4e3aa0 | 667 | rlwinm r8,r8,30,24,31 ; Convert exception to return code |
9bccf70c | 668 | andc r7,r7,r9 ; Make sure redrive is off because we are intercepting |
de355530 | 669 | stw r8,saver3(r30) ; Set the return code as the return value also |
9bccf70c | 670 | stw r7,SAVflags(r30) ; Set the savearea flags |
1c79356b A |
671 | |
672 | ||
673 | vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers | |
674 | lwz r1,0(r1) ; Pop the stack | |
675 | lwz r0,FM_LR_SAVE(r1) ; Get the return address | |
676 | mtlr r0 ; Set return | |
677 | blr | |
678 | ||
679 | ; | |
680 | ; Note: we will not do any DCBTs to the savearea. It was just stored to a few cycles ago and should | |
9bccf70c | 681 | ; still be in the cache. |
1c79356b | 682 | ; |
1c79356b | 683 | ; NOTE NOTE: R16 is important to save!!!! |
9bccf70c | 684 | ; |
1c79356b A |
685 | .align 5 |
686 | ||
de355530 | 687 | swapCtxt: la r6,vmmppcpc(r5) ; Point to the first line |
1c79356b A |
688 | |
689 | lwz r14,saveexception(r30) ; Get the exception code | |
9bccf70c | 690 | dcbt 0,r6 ; Touch in the first line of the context area |
de355530 A |
691 | lwz r7,savesrr0(r30) ; Start moving context |
692 | lwz r8,savesrr1(r30) | |
693 | lwz r9,saver0(r30) | |
1c79356b | 694 | cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call? |
de355530 A |
695 | lwz r10,saver1(r30) |
696 | lwz r11,saver2(r30) | |
697 | lwz r12,saver3(r30) | |
698 | lwz r13,saver4(r30) | |
9bccf70c | 699 | la r6,vmmppcr6(r5) ; Point to second line |
de355530 | 700 | lwz r14,saver5(r30) |
1c79356b | 701 | |
9bccf70c | 702 | dcbt 0,r6 ; Touch second line of context area |
1c79356b | 703 | |
9bccf70c | 704 | lwz r15,vmmppcpc(r5) ; First line of context |
1c79356b | 705 | lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user |
9bccf70c | 706 | lwz r23,vmmppcmsr(r5) |
d7e50217 | 707 | ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user |
9bccf70c A |
708 | lwz r17,vmmppcr0(r5) |
709 | lwz r18,vmmppcr1(r5) | |
1c79356b | 710 | and r23,r23,r22 ; Keep only the controllable bits |
9bccf70c | 711 | lwz r19,vmmppcr2(r5) |
1c79356b | 712 | oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits |
9bccf70c | 713 | lwz r20,vmmppcr3(r5) |
1c79356b | 714 | ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits |
9bccf70c A |
715 | lwz r21,vmmppcr4(r5) |
716 | lwz r22,vmmppcr5(r5) | |
1c79356b | 717 | |
9bccf70c | 718 | dcbt 0,r6 ; Touch third line of context area |
1c79356b | 719 | |
9bccf70c A |
720 | stw r7,vmmppcpc(r5) ; Save emulator context into the context area |
721 | stw r8,vmmppcmsr(r5) | |
722 | stw r9,vmmppcr0(r5) | |
723 | stw r10,vmmppcr1(r5) | |
724 | stw r11,vmmppcr2(r5) | |
725 | stw r12,vmmppcr3(r5) | |
726 | stw r13,vmmppcr4(r5) | |
727 | stw r14,vmmppcr5(r5) | |
1c79356b A |
728 | |
729 | ; | |
730 | ; Save the first 3 parameters if we are an SC (we will take care of the last later) | |
731 | ; | |
732 | bne+ cr1,swapnotsc ; Skip next if not an SC exception... | |
733 | stw r12,return_params+0(r5) ; Save the first return | |
734 | stw r13,return_params+4(r5) ; Save the second return | |
735 | stw r14,return_params+8(r5) ; Save the third return | |
736 | ||
de355530 A |
737 | swapnotsc: stw r15,savesrr0(r30) ; Save vm context into the savearea |
738 | stw r23,savesrr1(r30) | |
739 | stw r17,saver0(r30) | |
740 | stw r18,saver1(r30) | |
741 | stw r19,saver2(r30) | |
742 | stw r20,saver3(r30) | |
743 | stw r21,saver4(r30) | |
9bccf70c | 744 | la r6,vmmppcr14(r5) ; Point to fourth line |
de355530 | 745 | stw r22,saver5(r30) |
9bccf70c A |
746 | |
747 | dcbt 0,r6 ; Touch fourth line | |
748 | ||
749 | ; Swap 8 registers | |
750 | ||
de355530 A |
751 | lwz r7,saver6(r30) ; Read savearea |
752 | lwz r8,saver7(r30) | |
753 | lwz r9,saver8(r30) | |
754 | lwz r10,saver9(r30) | |
755 | lwz r11,saver10(r30) | |
756 | lwz r12,saver11(r30) | |
757 | lwz r13,saver12(r30) | |
758 | lwz r14,saver13(r30) | |
9bccf70c A |
759 | |
760 | lwz r15,vmmppcr6(r5) ; Read vm context | |
761 | lwz r24,vmmppcr7(r5) | |
762 | lwz r17,vmmppcr8(r5) | |
763 | lwz r18,vmmppcr9(r5) | |
764 | lwz r19,vmmppcr10(r5) | |
765 | lwz r20,vmmppcr11(r5) | |
766 | lwz r21,vmmppcr12(r5) | |
767 | lwz r22,vmmppcr13(r5) | |
768 | ||
769 | stw r7,vmmppcr6(r5) ; Write context | |
770 | stw r8,vmmppcr7(r5) | |
771 | stw r9,vmmppcr8(r5) | |
772 | stw r10,vmmppcr9(r5) | |
773 | stw r11,vmmppcr10(r5) | |
774 | stw r12,vmmppcr11(r5) | |
775 | stw r13,vmmppcr12(r5) | |
776 | la r6,vmmppcr22(r5) ; Point to fifth line | |
777 | stw r14,vmmppcr13(r5) | |
778 | ||
779 | dcbt 0,r6 ; Touch fifth line | |
780 | ||
de355530 A |
781 | stw r15,saver6(r30) ; Write vm context |
782 | stw r24,saver7(r30) | |
783 | stw r17,saver8(r30) | |
784 | stw r18,saver9(r30) | |
785 | stw r19,saver10(r30) | |
786 | stw r20,saver11(r30) | |
787 | stw r21,saver12(r30) | |
788 | stw r22,saver13(r30) | |
9bccf70c A |
789 | |
790 | ; Swap 8 registers | |
791 | ||
de355530 A |
792 | lwz r7,saver14(r30) ; Read savearea |
793 | lwz r8,saver15(r30) | |
794 | lwz r9,saver16(r30) | |
795 | lwz r10,saver17(r30) | |
796 | lwz r11,saver18(r30) | |
797 | lwz r12,saver19(r30) | |
798 | lwz r13,saver20(r30) | |
799 | lwz r14,saver21(r30) | |
9bccf70c A |
800 | |
801 | lwz r15,vmmppcr14(r5) ; Read vm context | |
802 | lwz r24,vmmppcr15(r5) | |
803 | lwz r17,vmmppcr16(r5) | |
804 | lwz r18,vmmppcr17(r5) | |
805 | lwz r19,vmmppcr18(r5) | |
806 | lwz r20,vmmppcr19(r5) | |
807 | lwz r21,vmmppcr20(r5) | |
808 | lwz r22,vmmppcr21(r5) | |
809 | ||
810 | stw r7,vmmppcr14(r5) ; Write context | |
811 | stw r8,vmmppcr15(r5) | |
812 | stw r9,vmmppcr16(r5) | |
813 | stw r10,vmmppcr17(r5) | |
814 | stw r11,vmmppcr18(r5) | |
815 | stw r12,vmmppcr19(r5) | |
816 | stw r13,vmmppcr20(r5) | |
817 | la r6,vmmppcr30(r5) ; Point to sixth line | |
818 | stw r14,vmmppcr21(r5) | |
819 | ||
820 | dcbt 0,r6 ; Touch sixth line | |
821 | ||
de355530 A |
822 | stw r15,saver14(r30) ; Write vm context |
823 | stw r24,saver15(r30) | |
824 | stw r17,saver16(r30) | |
825 | stw r18,saver17(r30) | |
826 | stw r19,saver18(r30) | |
827 | stw r20,saver19(r30) | |
828 | stw r21,saver20(r30) | |
829 | stw r22,saver21(r30) | |
9bccf70c A |
830 | |
831 | ; Swap 8 registers | |
832 | ||
de355530 A |
833 | lwz r7,saver22(r30) ; Read savearea |
834 | lwz r8,saver23(r30) | |
835 | lwz r9,saver24(r30) | |
836 | lwz r10,saver25(r30) | |
837 | lwz r11,saver26(r30) | |
838 | lwz r12,saver27(r30) | |
839 | lwz r13,saver28(r30) | |
840 | lwz r14,saver29(r30) | |
9bccf70c A |
841 | |
842 | lwz r15,vmmppcr22(r5) ; Read vm context | |
843 | lwz r24,vmmppcr23(r5) | |
844 | lwz r17,vmmppcr24(r5) | |
845 | lwz r18,vmmppcr25(r5) | |
846 | lwz r19,vmmppcr26(r5) | |
847 | lwz r20,vmmppcr27(r5) | |
848 | lwz r21,vmmppcr28(r5) | |
849 | lwz r22,vmmppcr29(r5) | |
850 | ||
851 | stw r7,vmmppcr22(r5) ; Write context | |
852 | stw r8,vmmppcr23(r5) | |
853 | stw r9,vmmppcr24(r5) | |
854 | stw r10,vmmppcr25(r5) | |
855 | stw r11,vmmppcr26(r5) | |
856 | stw r12,vmmppcr27(r5) | |
857 | stw r13,vmmppcr28(r5) | |
858 | la r6,vmmppcvscr(r5) ; Point to seventh line | |
859 | stw r14,vmmppcr29(r5) | |
860 | ||
861 | dcbt 0,r6 ; Touch seventh line | |
862 | ||
de355530 A |
863 | stw r15,saver22(r30) ; Write vm context |
864 | stw r24,saver23(r30) | |
865 | stw r17,saver24(r30) | |
866 | stw r18,saver25(r30) | |
867 | stw r19,saver26(r30) | |
868 | stw r20,saver27(r30) | |
869 | stw r21,saver28(r30) | |
870 | stw r22,saver29(r30) | |
9bccf70c A |
871 | |
872 | ; Swap 8 registers | |
873 | ||
de355530 A |
874 | lwz r7,saver30(r30) ; Read savearea |
875 | lwz r8,saver31(r30) | |
9bccf70c | 876 | lwz r9,savecr(r30) |
de355530 A |
877 | lwz r10,savexer(r30) |
878 | lwz r11,savelr(r30) | |
879 | lwz r12,savectr(r30) | |
9bccf70c A |
880 | lwz r14,savevrsave(r30) |
881 | ||
882 | lwz r15,vmmppcr30(r5) ; Read vm context | |
883 | lwz r24,vmmppcr31(r5) | |
884 | lwz r17,vmmppccr(r5) | |
885 | lwz r18,vmmppcxer(r5) | |
886 | lwz r19,vmmppclr(r5) | |
887 | lwz r20,vmmppcctr(r5) | |
888 | lwz r22,vmmppcvrsave(r5) | |
889 | ||
890 | stw r7,vmmppcr30(r5) ; Write context | |
891 | stw r8,vmmppcr31(r5) | |
892 | stw r9,vmmppccr(r5) | |
893 | stw r10,vmmppcxer(r5) | |
894 | stw r11,vmmppclr(r5) | |
895 | stw r12,vmmppcctr(r5) | |
896 | stw r14,vmmppcvrsave(r5) | |
897 | ||
de355530 A |
898 | stw r15,saver30(r30) ; Write vm context |
899 | stw r24,saver31(r30) | |
9bccf70c | 900 | stw r17,savecr(r30) |
de355530 A |
901 | stw r18,savexer(r30) |
902 | stw r19,savelr(r30) | |
903 | stw r20,savectr(r30) | |
9bccf70c A |
904 | stw r22,savevrsave(r30) |
905 | ||
906 | ; Swap 8 registers | |
907 | ||
908 | lwz r7,savevscr+0(r30) ; Read savearea | |
909 | lwz r8,savevscr+4(r30) | |
910 | lwz r9,savevscr+8(r30) | |
911 | lwz r10,savevscr+12(r30) | |
912 | lwz r11,savefpscrpad(r30) | |
913 | lwz r12,savefpscr(r30) | |
914 | ||
915 | lwz r15,vmmppcvscr+0(r5) ; Read vm context | |
916 | lwz r24,vmmppcvscr+4(r5) | |
917 | lwz r17,vmmppcvscr+8(r5) | |
918 | lwz r18,vmmppcvscr+12(r5) | |
919 | lwz r19,vmmppcfpscrpad(r5) | |
920 | lwz r20,vmmppcfpscr(r5) | |
921 | ||
922 | stw r7,vmmppcvscr+0(r5) ; Write context | |
923 | stw r8,vmmppcvscr+4(r5) | |
924 | stw r9,vmmppcvscr+8(r5) | |
925 | stw r10,vmmppcvscr+12(r5) | |
926 | stw r11,vmmppcfpscrpad(r5) | |
927 | stw r12,vmmppcfpscr(r5) | |
928 | ||
929 | stw r15,savevscr+0(r30) ; Write vm context | |
930 | stw r24,savevscr+4(r30) | |
931 | stw r17,savevscr+8(r30) | |
932 | stw r18,savevscr+12(r30) | |
933 | stw r19,savefpscrpad(r30) | |
934 | stw r20,savefpscr(r30) | |
935 | ||
1c79356b A |
936 | |
937 | ; | |
938 | ; Cobble up the exception return code and save any specific return values | |
939 | ; | |
940 | ||
941 | lwz r7,saveexception(r30) ; Pick up the exception code | |
942 | rlwinm r8,r7,30,24,31 ; Convert exception to return code | |
943 | cmplwi r7,T_DATA_ACCESS ; Was this a DSI? | |
944 | stw r8,return_code(r5) ; Save the exit code | |
945 | cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI? | |
946 | beq+ swapDSI ; Yeah... | |
947 | cmplwi r7,T_ALIGNMENT ; Alignment exception? | |
948 | beq+ cr1,swapISI ; We had an ISI... | |
949 | cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call? | |
950 | beq+ swapDSI ; An alignment exception looks like a DSI... | |
951 | beq+ cr1,swapSC ; We had a system call... | |
952 | ||
953 | blr ; Return... | |
954 | ||
955 | ; | |
956 | ; Set exit returns for a DSI or alignment exception | |
957 | ; | |
958 | ||
de355530 | 959 | swapDSI: lwz r10,savedar(r30) ; Get the DAR |
1c79356b A |
960 | lwz r7,savedsisr(r30) ; and the DSISR |
961 | stw r10,return_params+0(r5) ; Save DAR as first return parm | |
962 | stw r7,return_params+4(r5) ; Save DSISR as second return parm | |
963 | blr ; Return... | |
964 | ||
965 | ; | |
966 | ; Set exit returns for a ISI | |
967 | ; | |
968 | ||
9bccf70c A |
969 | swapISI: lwz r7,vmmppcmsr(r5) ; Get the SRR1 value |
970 | lwz r10,vmmppcpc(r5) ; Get the PC as failing address | |
1c79356b A |
971 | rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR |
972 | stw r10,return_params+0(r5) ; Save PC as first return parm | |
973 | stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm | |
974 | blr ; Return... | |
975 | ||
976 | ; | |
977 | ; Set exit returns for a system call (note: we did the first 3 earlier) | |
978 | ; Do we really need to pass parameters back here???? | |
979 | ; | |
980 | ||
9bccf70c | 981 | swapSC: lwz r10,vmmppcr6(r5) ; Get the fourth paramter |
1c79356b A |
982 | stw r10,return_params+12(r5) ; Save it |
983 | blr ; Return... | |
984 | ||
d7e50217 A |
985 | ; |
986 | ; vmmFamGuestResume: | |
987 | ; Restore Guest context from Fam mode. | |
988 | ; | |
989 | ||
990 | vmmFamGuestResume: | |
991 | mfsprg r10,0 ; Get the per_proc | |
992 | lwz r27,vmmCEntry(r3) ; Get the context that is active | |
d7e50217 A |
993 | lwz r15,spcFlags(r10) ; Get per_proc special flags |
994 | mr r26,r3 ; Save the activation pointer | |
de355530 | 995 | lwz r17,vmmFlags(r27) ; Get the status flags |
d7e50217 A |
996 | lwz r20,vmmContextKern(r27) ; Get the comm area |
997 | rlwinm r15,r15,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit | |
998 | stw r15,spcFlags(r10) ; Update the special flags | |
de355530 | 999 | rlwinm. r0,r17,0,vmmMapDoneb,vmmMapDoneb ; Did we just do a map function? |
d7e50217 | 1000 | lwz r7,famguestpc(r20) ; Load famguest ctx pc |
de355530 A |
1001 | andc r17,r17,r0 ; Turn off map flag |
1002 | stw r17,vmmFlags(r27) ; Update vmmFlags | |
1003 | beq+ vmmFamRetNoMap ; No mapping done... | |
d7e50217 | 1004 | lwz r3,SAVflags(r30) ; Pick up the savearea flags |
de355530 | 1005 | lwz r2,vmmLastMap(r27) ; Get the last mapped address |
d7e50217 A |
1006 | li r4,T_DATA_ACCESS ; Change to DSI fault |
1007 | oris r3,r3,hi16(SAVredrive) ; Set exception redrive | |
1008 | stw r2,savedar(r30) ; Set the DAR to the last thing we mapped | |
d7e50217 A |
1009 | stw r3,SAVflags(r30) ; Turn on the redrive request |
1010 | lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss | |
1011 | stw r4,saveexception(r30) ; Say we need to emulate a DSI | |
d7e50217 | 1012 | stw r2,savedsisr(r30) ; Pretend we have a PTE miss |
de355530 A |
1013 | vmmFamRetNoMap: |
1014 | mfsrr1 r4 ; Get the current MSR value | |
1015 | stw r7,savesrr0(r30) ; Set savearea pc | |
d7e50217 A |
1016 | lwz r5,famguestmsr(r20) ; Load famguest ctx msr |
1017 | lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user | |
1018 | ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user | |
1019 | and r5,r5,r6 ; Keep only the controllable bits | |
1020 | oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits | |
1021 | ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits | |
1022 | rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP | |
1023 | rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector | |
de355530 | 1024 | stw r5,savesrr1(r30) ; Set savearea srr1 |
d7e50217 A |
1025 | lwz r4,famguestr0(r20) ; Load famguest ctx r0 |
1026 | lwz r5,famguestr1(r20) ; Load famguest ctx r1 | |
1027 | lwz r6,famguestr2(r20) ; Load famguest ctx r2 | |
1028 | lwz r7,famguestr3(r20) ; Load famguest ctx r3 | |
de355530 A |
1029 | stw r4,saver0(r30) ; Set savearea r0 |
1030 | stw r5,saver1(r30) ; Set savearea r1 | |
1031 | stw r6,saver2(r30) ; Set savearea r2 | |
1032 | stw r7,saver3(r30) ; Set savearea r3 | |
d7e50217 A |
1033 | lwz r4,famguestr4(r20) ; Load famguest ctx r4 |
1034 | lwz r5,famguestr5(r20) ; Load famguest ctx r5 | |
1035 | lwz r6,famguestr6(r20) ; Load famguest ctx r6 | |
1036 | lwz r7,famguestr7(r20) ; Load famguest ctx r7 | |
de355530 A |
1037 | stw r4,saver4(r30) ; Set savearea r4 |
1038 | stw r5,saver5(r30) ; Set savearea r5 | |
1039 | stw r6,saver6(r30) ; Set savearea r6 | |
1040 | stw r7,saver7(r30) ; Set savearea r7 | |
1041 | ||
d7e50217 A |
1042 | li r3,1 ; Show normal exit with check for AST |
1043 | lwz r16,ACT_THREAD(r26) ; Restore the thread pointer | |
1044 | b EXT(ppcscret) ; Go back to handler... | |
1045 | ||
1046 | ; | |
de355530 | 1047 | ; FAM Intercept handler |
d7e50217 A |
1048 | ; |
1049 | ||
1050 | .align 5 | |
de355530 A |
1051 | .globl EXT(vmm_fam_handler) |
1052 | ||
1053 | LEXT(vmm_fam_handler) | |
1054 | lwz r4,saver4(r13) ; Load savearea r0 | |
d7e50217 A |
1055 | cmplwi r11,T_ALIGNMENT ; Alignment exception? |
1056 | lwz r3,VMMareaPhys(r2) ; Load phys state page addr | |
d7e50217 | 1057 | cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG? |
d7e50217 A |
1058 | stw r4,famguestr4(r3) ; Save r4 in famguest ctx |
1059 | stw r5,famguestr5(r3) ; Save r5 in famguest ctx | |
1060 | stw r6,famguestr6(r3) ; Save r6 in famguest ctx | |
1061 | stw r7,famguestr7(r3) ; Save r7 in famguest ctx | |
de355530 A |
1062 | lwz r4,saver0(r13) ; Load savearea r0 |
1063 | lwz r5,saver1(r13) ; Load savearea r1 | |
1064 | lwz r6,saver2(r13) ; Load savearea r2 | |
1065 | lwz r7,saver3(r13) ; Load savearea r3 | |
d7e50217 A |
1066 | stw r4,famguestr0(r3) ; Save r0 in famguest ctx |
1067 | stw r5,famguestr1(r3) ; Save r1 in famguest ctx | |
1068 | stw r6,famguestr2(r3) ; Save r2 in famguest ctx | |
1069 | stw r7,famguestr3(r3) ; Save r3 in famguest ctx | |
1070 | lwz r4,spcFlags(r2) ; Load per_proc spcFlags | |
1071 | oris r4,r4,hi16(FamVMmode) ; Set FAM mode | |
1072 | stw r4,spcFlags(r2) ; Update per_proc spcFlags | |
1073 | mfsrr0 r2 ; Get the interrupt srr0 | |
1074 | mfsrr1 r4 ; Get the interrupt srr1 | |
1075 | stw r2,famguestpc(r3) ; Save srr0 in famguest ctx | |
1076 | stw r4,famguestmsr(r3) ; Save srr1 in famguest ctx | |
1077 | li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1)) | |
1078 | andc r6,r4,r6 ; Clear SE BE FE0 FE1 | |
1079 | mtsrr1 r6 ; Set srr1 | |
1080 | mr r6,r3 ; Set r6 with phys state page addr | |
1081 | rlwinm r7,r11,30,24,31 ; Convert exception to return code | |
de355530 A |
1082 | beq+ cr1,famPRG ; We had a program exception... |
1083 | bne+ famRet | |
d7e50217 A |
1084 | ; We had an Alignment... |
1085 | mfdar r3 ; Load dar | |
1086 | mfdsisr r4 ; Load dsisr | |
1087 | stw r3,famparam+0x4(r6) ; Set famparam 1 with dar | |
1088 | stw r4,famparam+0x8(r6) ; Set famparam 2 with dsir | |
de355530 A |
1089 | b famRet ; |
1090 | famPRG: | |
d7e50217 A |
1091 | stw r4,famparam+0x4(r6) ; Set famparam 1 with srr1 |
1092 | mr r3,r4 ; Set r3 with dsisr | |
1093 | lwz r4,famguestr4(r6) ; Load r4 from famguest context | |
de355530 | 1094 | famRet: |
d7e50217 A |
1095 | lwz r5,famguestr5(r6) ; Load r5 from famguest context |
1096 | lwz r13,famhandler(r6) ; Load user address to resume | |
1097 | stw r2,famparam(r6) ; Set famparam 0 with srr0 | |
1098 | stw r7,famdispcode(r6) ; Save the exit code | |
1099 | lwz r1,famrefcon(r6) ; load refcon | |
d7e50217 A |
1100 | mtcr r0 ; Restore cr |
1101 | mtsrr0 r13 ; Load srr0 | |
1102 | mr r0,r7 ; Set dispatch code | |
1103 | lwz r7,famguestr7(r6) ; Load r7 from famguest context | |
1104 | lwz r6,famguestr6(r6) ; Load r6 from famguest context | |
1105 | mfsprg r13,2 ; Restore r13 | |
1106 | mfsprg r11,3 ; Restore r11 | |
1107 | rfi | |
d7e50217 A |
1108 | |
1109 | ; | |
1110 | ; FAM Intercept DSI ISI fault handler | |
1111 | ; | |
1112 | ||
1113 | .align 5 | |
de355530 | 1114 | .globl EXT(vmm_fam_pf_handler) |
d7e50217 | 1115 | |
de355530 | 1116 | LEXT(vmm_fam_pf_handler) |
d7e50217 | 1117 | lwz r3,VMMareaPhys(r2) ; Load phys state page addr |
de355530 A |
1118 | lwz r4,saver0(r13) ; Load savearea r0 |
1119 | lwz r5,saver1(r13) ; Load savearea r1 | |
1120 | lwz r6,saver2(r13) ; Load savearea r2 | |
1121 | lwz r7,saver3(r13) ; Load savearea r3 | |
d7e50217 A |
1122 | stw r4,famguestr0(r3) ; Save r0 in famguest |
1123 | stw r5,famguestr1(r3) ; Save r1 in famguest | |
1124 | stw r6,famguestr2(r3) ; Save r2 in famguest | |
1125 | stw r7,famguestr3(r3) ; Save r3 in famguest | |
de355530 A |
1126 | lwz r4,saver4(r13) ; Load savearea r0 |
1127 | lwz r5,saver5(r13) ; Load savearea r1 | |
1128 | lwz r6,saver6(r13) ; Load savearea r2 | |
1129 | lwz r7,saver7(r13) ; Load savearea r3 | |
d7e50217 A |
1130 | stw r4,famguestr4(r3) ; Save r4 in famguest |
1131 | lwz r4,spcFlags(r2) ; Load spcFlags | |
1132 | stw r5,famguestr5(r3) ; Save r5 in famguest | |
de355530 | 1133 | lwz r5,savesrr0(r13) ; Get the interrupt srr0 |
d7e50217 | 1134 | stw r6,famguestr6(r3) ; Save r6 in famguest |
de355530 | 1135 | lwz r6,savesrr1(r13) ; Load srr1 |
d7e50217 A |
1136 | oris r4,r4,hi16(FamVMmode) ; Set FAM mode |
1137 | stw r7,famguestr7(r3) ; Save r7 in famguest | |
1138 | stw r4,spcFlags(r2) ; Update spcFlags | |
1139 | lwz r1,famrefcon(r3) ; Load refcon | |
1140 | lwz r2,famhandler(r3) ; Load famhandler to resume | |
1141 | stw r5,famguestpc(r3) ; Save srr0 | |
de355530 | 1142 | stw r5,saver2(r13) ; Store srr0 in savearea r2 |
d7e50217 A |
1143 | stw r5,famparam(r3) ; Store srr0 in fam param 0 |
1144 | stw r6,famguestmsr(r3) ; Save srr1 in famguestmsr | |
1145 | cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI? | |
1146 | rlwinm r7,r11,30,24,31 ; Convert exception to return code | |
de355530 A |
1147 | beq+ cr1,FamPfISI ; We had an ISI... |
1148 | ; FamPfDSI | |
1149 | lwz r6,savedar(r13) ; Load dar from savearea | |
d7e50217 A |
1150 | lwz r4,savedsisr(r13) ; Load dsisr from savearea |
1151 | stw r6,famparam+0x4(r3) ; Store dar in fam param 1 | |
de355530 | 1152 | stw r6,saver3(r13) ; Store dar in savearea r3 |
d7e50217 | 1153 | stw r4,famparam+0x8(r3) ; Store dsisr in fam param 2 |
de355530 A |
1154 | stw r4,saver4(r13) ; Store dsisr in savearea r4 |
1155 | b FamPfRet | |
1156 | FamPfISI: | |
d7e50217 A |
1157 | rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR |
1158 | stw r6,famparam+0x4(r3) ; Store srr1 in fam param 1 | |
de355530 A |
1159 | stw r6,saver3(r13) ; Store srr1 in savearea r3 |
1160 | FamPfRet: | |
1161 | stw r7,saver0(r13) ; Set dispatch code | |
d7e50217 | 1162 | stw r7,famdispcode(r3) ; Set dispatch code |
de355530 A |
1163 | stw r1,saver1(r13) ; Store refcon in savearea r1 |
1164 | stw r2,savesrr0(r13) ; Store famhandler in srr0 | |
d7e50217 A |
1165 | blr |
1166 | ||
1167 | ; | |
1168 | ; Ultra Fast Path FAM syscalls | |
1169 | ; | |
1170 | ||
1171 | .align 5 | |
1172 | .globl EXT(vmm_ufp) | |
1173 | ||
1174 | LEXT(vmm_ufp) | |
1175 | mfsprg r3,0 ; Get the per_proc area | |
de355530 A |
1176 | bt cr5_eq,ResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest |
1177 | lwz r3,VMMareaPhys(r3) ; Load fast assist area | |
d7e50217 A |
1178 | cmpwi cr7,r4,0 ; Compare first arg with 0 |
1179 | cmpwi cr5,r4,7 ; Compare first arg with 7 | |
1180 | cror cr1_eq,cr7_lt,cr5_gt ; Is it in 0 to 7 range | |
1181 | beq cr1,ufpVMret ; Return if not in the range | |
1182 | slwi r4,r4,2 ; multiply index by 4 | |
de355530 A |
1183 | la r3,famguestr0(r3) ; Load the base address |
1184 | bt cr6_eq,SetGuestReg ; Set/get selector | |
1185 | ; GetGuestReg | |
d7e50217 A |
1186 | lwzx r3,r4,r3 ; Load the guest register |
1187 | b ufpVMret ; Return | |
de355530 | 1188 | SetGuestReg: |
d7e50217 A |
1189 | stwx r5,r4,r3 ; Update the guest register |
1190 | li r3,0 ; Set return value | |
1191 | b ufpVMret ; Return | |
de355530 | 1192 | ResumeGuest: |
d7e50217 | 1193 | lwz r7,spcFlags(r3) ; Pick up the special flags |
de355530 | 1194 | lwz r13,VMMareaPhys(r3) ; Load fast assist area |
d7e50217 A |
1195 | mtsrr0 r4 ; Set srr0 |
1196 | rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl | |
1197 | rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit | |
de355530 | 1198 | beq ResumeGuest_nokey ; Branch if not key switch |
d7e50217 A |
1199 | mr r2,r7 ; Save r7 |
1200 | rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key | |
1201 | cmpw cr0,r7,r2 ; Is userProtKeybit changed? | |
de355530 A |
1202 | beq ResumeGuest_nokey ; No, go to ResumeGuest_nokey |
1203 | lwz r2,PP_USERPMAP(r3) ; Get user pmap phys addr | |
1204 | rlwinm r6,r7,userProtKeybit-2,2,2 ; Extract and shift the key bit | |
1205 | lwz r5,PMAP_SPACE(r2) ; Load the space id | |
1206 | oris r5,r5,hi16(SEG_REG_PROT) ; Set the protection | |
1207 | xor r5,r5,r6 ; Flip to proper segment register key | |
1208 | addis r4,r5,0x0000 ; Get SR0 value | |
1209 | mtsr sr0,r4 ; Load up the SR | |
1210 | addis r4,r5,0x0010 ; Get SR1 value | |
1211 | mtsr sr1,r4 ; Load up the SR | |
1212 | addis r4,r5,0x0020 ; Get SR2 value | |
1213 | mtsr sr2,r4 ; Load up the SR | |
1214 | addis r4,r5,0x0030 ; Get SR3 value | |
1215 | mtsr sr3,r4 ; Load up the SR | |
1216 | addis r4,r5,0x0040 ; Get SR4 value | |
1217 | mtsr sr4,r4 ; Load up the SR | |
1218 | addis r4,r5,0x0050 ; Get SR5 value | |
1219 | mtsr sr5,r4 ; Load up the SR | |
1220 | addis r4,r5,0x0060 ; Get SR6 value | |
1221 | mtsr sr6,r4 ; Load up the SR | |
1222 | addis r4,r5,0x0070 ; Get SR7 value | |
1223 | mtsr sr7,r4 ; Load up the SR | |
1224 | addis r4,r5,0x0080 ; Get SR8 value | |
1225 | mtsr sr8,r4 ; Load up the SR | |
1226 | addis r4,r5,0x0090 ; Get SR9 value | |
1227 | mtsr sr9,r4 ; Load up the SR | |
1228 | addis r4,r5,0x00a0 ; Get SR10 value | |
1229 | mtsr sr10,r4 ; Load up the SR | |
1230 | addis r4,r5,0x00b0 ; Get SR11 value | |
1231 | mtsr sr11,r4 ; Load up the SR | |
1232 | addis r4,r5,0x00c0 ; Get SR12 value | |
1233 | mtsr sr12,r4 ; Load up the SR | |
1234 | addis r4,r5,0x00d0 ; Get SR13 value | |
1235 | mtsr sr13,r4 ; Load up the SR | |
1236 | addis r4,r5,0x00e0 ; Get SR14 value | |
1237 | mtsr sr14,r4 ; Load up the SR | |
1238 | addis r4,r5,0x00f0 ; Get SR15 value | |
1239 | mtsr sr15,r4 ; Load up the SR | |
1240 | ResumeGuest_nokey: | |
1241 | mfsrr1 r6 ; Get the current MSR value | |
d7e50217 A |
1242 | lwz r0,famguestr0(r13) ; Load r0 |
1243 | lwz r1,famguestr1(r13) ; Load r1 | |
de355530 A |
1244 | lwz r4,famguestmsr(r13) ; Load guest srr1 |
1245 | stw r7,spcFlags(r3) ; Update the special flags | |
1246 | lis r5,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user | |
d7e50217 A |
1247 | lwz r2,famguestr2(r13) ; Load r2 |
1248 | lwz r3,famguestr3(r13) ; Load r3 | |
de355530 A |
1249 | ori r5,r5,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user |
1250 | and r7,r4,r5 ; Keep only the controllable bits | |
d7e50217 | 1251 | lwz r4,famguestr4(r13) ; Load r4 |
de355530 | 1252 | oris r7,r7,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits |
d7e50217 | 1253 | lwz r5,famguestr5(r13) ; Load r5 |
de355530 A |
1254 | ori r7,r7,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits |
1255 | rlwimi r7,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP | |
1256 | rlwimi r7,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector | |
1257 | mtsrr1 r7 ; Set srr1 | |
d7e50217 A |
1258 | lwz r6,famguestr6(r13) ; Load r6 |
1259 | lwz r7,famguestr7(r13) ; Load r7 | |
1260 | ufpVMret: | |
d7e50217 A |
1261 | mtcrf 0xFF,r11 ; Restore CR |
1262 | mfsprg r11,3 ; Restore R11 | |
d7e50217 | 1263 | mfsprg r13,2 ; Restore R13 |
de355530 | 1264 | rfi ; All done, go back... |