]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | #include <assym.s> | |
23 | #include <debug.h> | |
24 | #include <ppc/asm.h> | |
25 | #include <ppc/proc_reg.h> | |
26 | #include <ppc/exception.h> | |
27 | ||
28 | /* | |
29 | * This file contains implementations for the Virtual Machine Monitor | |
30 | * facility. | |
31 | */ | |
32 | ||
33 | ||
34 | /* | |
35 | * int vmm_dispatch(savearea, act); | |
36 | ||
37 | * vmm_dispatch is a PPC only system call. It is used with a selector (first | |
38 | * parameter) to determine what function to enter. This is treated as an extension | |
39 | * of hw_exceptions. | |
40 | * | |
41 | * Inputs: | |
42 | * R4 = current activation | |
43 | * R16 = current thread | |
44 | * R30 = current savearea | |
45 | */ | |
46 | ||
47 | .align 5 /* Line up on cache line */ | |
48 | .globl EXT(vmm_dispatch_table) | |
49 | ||
50 | LEXT(vmm_dispatch_table) | |
51 | ||
52 | /* Don't change the order of these routines in the table. It's */ | |
53 | /* OK to add new routines, but they must be added at the bottom. */ | |
54 | ||
55 | .long EXT(vmm_get_version_sel) ; Get the version of the VMM interface | |
56 | .long EXT(vmm_get_features_sel) ; Get the features of the VMM interface | |
57 | .long EXT(vmm_init_context_sel) ; Initializes a new VMM context | |
58 | .long EXT(vmm_tear_down_context) ; Tears down a previously-allocated VMM context | |
59 | .long EXT(vmm_tear_down_all) ; Tears down all VMMs | |
60 | .long EXT(vmm_map_page) ; Maps a page from the main address space into the VM space | |
61 | .long EXT(vmm_get_page_mapping) ; Returns client va associated with VM va | |
62 | .long EXT(vmm_unmap_page) ; Unmaps a page from the VM space | |
63 | .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space | |
64 | .long EXT(vmm_get_page_dirty_flag) ; Gets the change bit for a page and optionally clears it | |
65 | .long EXT(vmm_get_float_state) ; Gets current floating point state | |
66 | .long EXT(vmm_get_vector_state) ; Gets current vector state | |
67 | .long EXT(vmm_set_timer) ; Sets a timer value | |
68 | .long EXT(vmm_get_timer) ; Gets a timer value | |
69 | .long EXT(switchIntoVM) ; Switches to the VM context | |
0b4e3aa0 A |
70 | .long EXT(vmm_protect_page) ; Sets protection values for a page |
71 | .long EXT(vmm_map_execute) ; Maps a page an launches VM | |
72 | .long EXT(vmm_protect_execute) ; Sets protection values for a page and launches VM | |
1c79356b A |
73 | |
74 | .set vmm_count,(.-EXT(vmm_dispatch_table))/4 ; Get the top number | |
75 | ||
76 | ||
77 | .align 5 | |
78 | .globl EXT(vmm_dispatch) | |
79 | ||
80 | LEXT(vmm_dispatch) | |
81 | ||
82 | lwz r11,saver3(r30) ; Get the selector | |
83 | mr r3,r4 ; All of our functions want the activation as the first parm | |
84 | lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table | |
85 | cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now? | |
86 | cmplwi cr1,r11,vmm_count ; See if we have a valid selector | |
87 | ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table | |
88 | lwz r4,saver4(r30) ; Get 1st parameter after selector | |
89 | beq+ EXT(switchIntoVM) ; Yes, go switch to it.... | |
90 | rlwinm r11,r11,2,0,29 ; Index into table | |
91 | bgt- cr1,vmmBogus ; It is a bogus entry | |
92 | lwzx r10,r10,r11 ; Get address of routine | |
93 | lwz r5,saver5(r30) ; Get 2nd parameter after selector | |
94 | lwz r6,saver6(r30) ; Get 3rd parameter after selector | |
95 | mtlr r10 ; Set the routine address | |
96 | lwz r7,saver7(r30) ; Get 4th parameter after selector | |
97 | ; | |
98 | ; NOTE: currently the most paramters for any call is 4. We will support at most 8 because we | |
99 | ; do not want to get into any stack based parms. However, here is where we need to add | |
100 | ; code for the 5th - 8th parms if we need them. | |
101 | ; | |
102 | ||
103 | blrl ; Call function | |
104 | ||
105 | stw r3,saver3(r30) ; Pass back the return code | |
106 | li r3,1 ; Set normal return with check for AST | |
107 | b EXT(ppcscret) ; Go back to handler... | |
108 | ||
0b4e3aa0 | 109 | vmmBogus: li r3,0 ; Bogus selector, treat like a bogus system call |
1c79356b A |
110 | b EXT(ppcscret) ; Go back to handler... |
111 | ||
112 | ||
113 | .align 5 | |
114 | .globl EXT(vmm_get_version_sel) | |
115 | ||
116 | LEXT(vmm_get_version_sel) ; Selector based version of get version | |
117 | ||
118 | lis r3,hi16(EXT(vmm_get_version)) | |
119 | ori r3,r3,lo16(EXT(vmm_get_version)) | |
120 | b selcomm | |
121 | ||
122 | ||
123 | .align 5 | |
124 | .globl EXT(vmm_get_features_sel) | |
125 | ||
126 | LEXT(vmm_get_features_sel) ; Selector based version of get features | |
127 | ||
0b4e3aa0 A |
128 | lis r3,hi16(EXT(vmm_get_features)) |
129 | ori r3,r3,lo16(EXT(vmm_get_features)) | |
1c79356b A |
130 | b selcomm |
131 | ||
132 | ||
133 | .align 5 | |
134 | .globl EXT(vmm_init_context_sel) | |
135 | ||
136 | LEXT(vmm_init_context_sel) ; Selector based version of init context | |
137 | ||
0b4e3aa0 A |
138 | lwz r4,saver4(r30) ; Get the passed in version |
139 | lwz r5,saver5(r30) ; Get the passed in comm area | |
140 | lis r3,hi16(EXT(vmm_init_context)) | |
141 | stw r4,saver3(r30) ; Cheat and move this parameter over | |
142 | ori r3,r3,lo16(EXT(vmm_init_context)) | |
143 | stw r5,saver4(r30) ; Cheat and move this parameter over | |
1c79356b A |
144 | |
145 | selcomm: mtlr r3 ; Set the real routine address | |
146 | mr r3,r30 ; Pass in the savearea | |
147 | blrl ; Call the function | |
148 | b EXT(ppcscret) ; Go back to handler... | |
149 | ||
150 | /* | |
151 | * Here is where we transition to the virtual machine. | |
152 | * | |
153 | * We will swap the register context in the savearea with that which is saved in our shared | |
154 | * context area. We will validity check a bit and clear any nasty bits in the MSR and force | |
155 | * the manditory ones on. | |
156 | * | |
157 | * Then we will setup the new address space to run with, and anything else that is normally part | |
158 | * of a context switch. | |
159 | * | |
0b4e3aa0 A |
160 | * The vmm_execute_vm entry point is for the fused vmm_map_execute and vmm_protect_execute |
161 | * calls. This is called, but never returned from. We always go directly back to the | |
162 | * user from here. | |
163 | * | |
1c79356b A |
164 | * Still need to figure out final floats and vectors. For now, we will go brute |
165 | * force and when we go into the VM, we will force save any normal floats and | |
166 | * vectors. Then we will hide them and swap the VM copy (if any) into the normal | |
167 | * chain. When we exit VM we will do the opposite. This is not as fast as I would | |
168 | * like it to be. | |
169 | * | |
170 | * | |
171 | */ | |
172 | ||
0b4e3aa0 A |
173 | |
174 | .align 5 | |
175 | .globl EXT(vmm_execute_vm) | |
176 | ||
177 | LEXT(vmm_execute_vm) | |
178 | ||
179 | lwz r30,ACT_MACT_PCB(r3) ; Restore the savearea pointer because it could be trash here | |
180 | b EXT(switchIntoVM) ; Join common... | |
181 | ||
182 | ||
1c79356b A |
183 | .align 5 |
184 | .globl EXT(switchIntoVM) | |
185 | ||
186 | LEXT(switchIntoVM) | |
0b4e3aa0 | 187 | |
1c79356b A |
188 | lwz r5,vmmControl(r3) ; Pick up the control table address |
189 | subi r4,r4,1 ; Switch to zero offset | |
190 | rlwinm. r2,r5,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we | |
191 | ; do not try this while we are transitioning off to on | |
192 | cmplwi cr1,r4,kVmmMaxContextsPerThread ; Is the index valid? | |
193 | beq- vmmBogus ; Not started, treat like a bogus system call | |
194 | mulli r2,r4,vmmCEntrySize ; Get displacement from index | |
195 | bgt- cr1,swvmmBogus ; Index is bogus... | |
196 | add r2,r2,r5 ; Point to the entry | |
197 | ||
198 | lwz r4,vmmFlags(r2) ; Get the flags for the selected entry | |
199 | lwz r5,vmmContextKern(r2) ; Get the context area address | |
200 | rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use | |
201 | bne+ swvmChkIntcpt ; We are so cool. Go do check for immediate intercepts... | |
202 | ||
203 | swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return | |
204 | li r3,1 ; Set normal return with check for AST | |
205 | stw r2,saver3(r30) ; Pass back the return code | |
206 | b EXT(ppcscret) ; Go back to handler... | |
207 | ||
208 | ; | |
209 | ; Here we check for any immediate intercepts. So far, the only | |
0b4e3aa0 A |
210 | ; two of these are a timer pop and and external stop. We will not dispatch if |
211 | ; either is true. They need to either reset the timer (i.e. set timer | |
212 | ; to 0) or to set a future time, or if it is external stop, set the vmmXStopRst flag. | |
1c79356b A |
213 | ; |
214 | ||
215 | swvmChkIntcpt: | |
0b4e3aa0 A |
216 | lwz r6,vmmCntrl(r5) ; Get the control field |
217 | rlwinm. r7,r6,0,vmmXStartb,vmmXStartb ; Clear all but start bit | |
218 | beq+ swvmChkStop ; Do not reset stop | |
219 | andc r6,r6,r7 ; Clear it | |
220 | li r8,vmmFlags ; Point to the flags | |
221 | stw r6,vmmCntrl(r5) ; Set the control field | |
222 | ||
223 | swvmtryx: lwarx r4,r8,r2 ; Pick up the flags | |
224 | rlwinm r4,r4,0,vmmXStopb+1,vmmXStopb-1 ; Clear the stop bit | |
225 | stwcx. r4,r8,r2 ; Save the updated field | |
226 | bne- swvmtryx ; Try again... | |
227 | ||
228 | swvmChkStop: | |
229 | rlwinm. r26,r4,0,vmmXStopb,vmmXStopb ; Is this VM stopped? | |
230 | beq+ swvmNoStop ; Nope... | |
231 | ||
232 | li r2,kVmmStopped ; Set stopped return | |
233 | li r3,1 ; Set normal return with check for AST | |
234 | stw r2,saver3(r30) ; Pass back the return code | |
235 | stw r2,return_code(r5) ; Save the exit code | |
236 | b EXT(ppcscret) ; Go back to handler... | |
237 | ||
238 | swvmNoStop: | |
1c79356b A |
239 | rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer pop? |
240 | beq+ swvmDoSwitch ; No... | |
241 | ||
242 | li r2,kVmmReturnNull ; Set null return | |
243 | li r3,1 ; Set normal return with check for AST | |
244 | stw r2,saver3(r30) ; Pass back the return code | |
245 | stw r2,return_code(r5) ; Save the exit code | |
246 | b EXT(ppcscret) ; Go back to handler... | |
247 | ||
248 | ; | |
249 | ; Here is where we actually swap into the VM (alternate) context. | |
250 | ; We will bulk do a wholesale swap of the registers in the context area (the VMs) | |
251 | ; with the ones in the savearea (our main code). During the copy, we will fix up the | |
252 | ; MSR, forcing on a few bits and turning off a few others. Then we will deal with the | |
253 | ; PMAP and other per_proc stuff. Finally, we will exit back through the main exception | |
254 | ; handler to deal with unstacking saveareas and ASTs, etc. | |
255 | ; | |
256 | ||
257 | swvmDoSwitch: | |
258 | ||
259 | ; | |
260 | ; First, we save the volatile registers we care about. Remember, all register | |
261 | ; handling here is pretty funky anyway, so we just pick the ones that are ok. | |
262 | ; | |
263 | mr r26,r3 ; Save the activation pointer | |
264 | mr r28,r5 ; Save the context pointer | |
265 | mr r27,r2 ; Save the context entry | |
266 | ||
267 | bl vmmxcng ; Exchange the vector and floating point contexts | |
268 | mr r5,r28 ; Restore this register | |
269 | ||
270 | lwz r11,ACT_MACT_SPF(r26) ; Get the special flags | |
271 | lwz r3,vmmPmap(r27) ; Get the pointer to the PMAP | |
272 | oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now | |
273 | bl EXT(hw_set_user_space_dis) ; Swap the address spaces | |
274 | lwz r17,vmmFlags(r27) ; Get the status flags | |
275 | mfsprg r10,0 ; Get the per_proc | |
276 | rlwinm. r0,r17,0,vmmMapDoneb,vmmMapDoneb ; Did we just do a map function? | |
277 | stw r27,vmmCEntry(r26) ; Remember what context we are running | |
278 | andc r17,r17,r0 ; Turn off map flag | |
279 | beq+ swvmNoMap ; No mapping done... | |
280 | ||
281 | ; | |
282 | ; This little bit of hoopala here (triggered by vmmMapDone) is | |
283 | ; a performance enhancement. This will change the returning savearea | |
284 | ; to look like we had a DSI rather than a system call. Then, setting | |
285 | ; the redrive bit, the exception handler will redrive the exception as | |
286 | ; a DSI, entering the last mapped address into the hash table. This keeps | |
287 | ; double faults from happening. Note that there is only a gain if the VM | |
288 | ; takes a fault, then the emulator resolves it only, and then begins | |
289 | ; the VM execution again. It seems like this should be the normal case. | |
290 | ; | |
291 | ||
292 | lwz r3,SAVflags(r30) ; Pick up the savearea flags | |
293 | lwz r2,vmmLastMap(r27) ; Get the last mapped address | |
294 | li r20,T_DATA_ACCESS ; Change to DSI fault | |
295 | oris r3,r3,hi16(SAVredrive) ; Set exception redrive | |
296 | stw r2,savedar(r30) ; Set the DAR to the last thing we mapped | |
297 | stw r3,SAVflags(r30) ; Turn on the redrive request | |
298 | lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss | |
299 | stw r20,saveexception(r30) ; Say we need to emulate a DSI | |
300 | stw r2,savedsisr(r30) ; Pretend we have a PTE miss | |
301 | ||
0b4e3aa0 A |
302 | swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area |
303 | rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits | |
304 | lwz r20,vmmCntrl(r20) ; Get the control flags | |
1c79356b | 305 | rlwimi r17,r11,8,24,31 ; Save the old spf flags |
0b4e3aa0 | 306 | rlwimi r15,r20,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key |
1c79356b A |
307 | stw r15,spcFlags(r10) ; Set per_proc copy of the special flags |
308 | stw r15,ACT_MACT_SPF(r26) ; Get the special flags | |
309 | ||
310 | stw r17,vmmFlags(r27) ; Set the status flags | |
311 | ||
312 | bl swapCtxt ; First, swap the general register state | |
313 | ||
0b4e3aa0 | 314 | lwz r17,vmmContextKern(r27) ; Get the comm area back |
1c79356b | 315 | |
0b4e3aa0 | 316 | lwz r15,vmmCntrl(r17) ; Get the control flags again |
1c79356b A |
317 | |
318 | rlwinm. r0,r15,0,vmmFloatLoadb,vmmFloatLoadb ; Are there new floating point values? | |
319 | li r14,vmmppcFPRs ; Get displacement to the new values | |
320 | andc r15,r15,r0 ; Clear the bit | |
321 | beq+ swvmNoNewFloats ; Nope, good... | |
322 | ||
323 | lwz r3,ACT_MACT_FPU(r26) ; Get the FPU savearea | |
324 | dcbt r14,r18 ; Touch in first line of new stuff | |
325 | mr. r3,r3 ; Is there one? | |
326 | bne+ swvmGotFloat ; Yes... | |
327 | ||
328 | bl EXT(save_get) ; Get a savearea | |
329 | ||
330 | li r11,0 ; Get a 0 | |
331 | lis r7,hi16(SAVfpuvalid) ; Set the allocated bit | |
332 | stw r3,ACT_MACT_FPU(r26) ; Set the floating point savearea | |
333 | stw r7,SAVflags(r3) ; Set the validity flags | |
334 | stw r11,SAVlvlfp(r3) ; Set the context level | |
335 | ||
336 | swvmGotFloat: | |
337 | dcbt r14,r17 ; Touch in first line of new stuff | |
338 | la r4,savefp0(r3) ; Point to the destination | |
339 | mr r21,r3 ; Save the save area | |
340 | la r3,vmmppcFPRs(r17) ; Point to the source | |
341 | li r5,33*8 ; Get the size (32 FP + FPSCR at 8 bytes each) | |
342 | ||
343 | bl EXT(bcopy) ; Copy the new values | |
344 | ||
345 | lwz r11,ACT_MACT_SPF(r26) ; Get the special flags | |
346 | stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad | |
347 | rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here | |
348 | lwz r14,vmmStat(r17) ; Get the status flags | |
349 | mfsprg r10,0 ; Get the per_proc | |
350 | stw r11,ACT_MACT_SPF(r26) ; Get the special flags | |
351 | rlwinm r14,r14,0,vmmFloatCngdb+1,vmmFloatCngdb-1 ; Clear the changed flag | |
352 | stw r11,spcFlags(r10) ; Set per_proc copy of the special flags | |
353 | stw r14,vmmStat(r17) ; Set the status flags sans vmmFloatCngd | |
354 | lwz r11,savefpscrpad(r21) ; Get the new fpscr pad | |
355 | lwz r14,savefpscr(r21) ; Get the new fpscr | |
356 | stw r11,savexfpscrpad(r30) ; Save the new fpscr pad | |
357 | stw r14,savexfpscr(r30) ; Save the new fpscr | |
358 | ||
359 | swvmNoNewFloats: | |
360 | rlwinm. r0,r15,0,vmmVectLoadb,vmmVectLoadb ; Are there new vector values? | |
361 | li r14,vmmppcVRs ; Get displacement to the new values | |
362 | andc r15,r15,r0 ; Clear the bit | |
363 | beq+ swvmNoNewVects ; Nope, good... | |
364 | ||
365 | lwz r3,ACT_MACT_VMX(r26) ; Get the vector savearea | |
366 | dcbt r14,r27 ; Touch in first line of new stuff | |
367 | mr. r3,r3 ; Is there one? | |
368 | bne+ swvmGotVect ; Yes... | |
369 | ||
370 | bl EXT(save_get) ; Get a savearea | |
371 | ||
372 | li r21,0 ; Get a 0 | |
373 | lis r7,hi16(SAVvmxvalid) ; Set the allocated bit | |
374 | stw r3,ACT_MACT_VMX(r26) ; Set the vector savearea indication | |
375 | stw r7,SAVflags(r3) ; Set the validity flags | |
376 | stw r21,SAVlvlvec(r3) ; Set the context level | |
377 | ||
378 | swvmGotVect: | |
379 | dcbt r14,r17 ; Touch in first line of new stuff | |
380 | mr r21,r3 ; Save the pointer to the savearea | |
381 | la r4,savevr0(r3) ; Point to the destination | |
382 | la r3,vmmppcVRs(r17) ; Point to the source | |
383 | li r5,33*16 ; Get the size (32 vectors + VSCR at 16 bytes each) | |
384 | ||
385 | bl EXT(bcopy) ; Copy the new values | |
386 | ||
387 | lwz r11,ACT_MACT_SPF(r26) ; Get the special flags | |
388 | stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad | |
389 | rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here | |
390 | lwz r14,vmmStat(r17) ; Get the status flags | |
391 | mfsprg r10,0 ; Get the per_proc | |
392 | stw r11,ACT_MACT_SPF(r26) ; Get the special flags | |
393 | rlwinm r14,r14,0,vmmVectCngdb+1,vmmVectCngdb-1 ; Clear the changed flag | |
394 | eqv r15,r15,r15 ; Get all foxes | |
395 | stw r11,spcFlags(r10) ; Set per_proc copy of the special flags | |
396 | stw r14,vmmStat(r17) ; Set the status flags sans vmmVectCngd | |
397 | stw r15,savevrvalid(r21) ; Set the valid bits to all foxes | |
398 | ||
399 | swvmNoNewVects: | |
400 | li r3,1 ; Show normal exit with check for AST | |
401 | mr r9,r26 ; Move the activation pointer | |
402 | b EXT(ppcscret) ; Go back to handler... | |
403 | ||
404 | ||
405 | ; | |
406 | ; Here is where we exchange the emulator floating and vector contexts | |
407 | ; for the virtual machines. Remember, this is not so efficient and needs | |
408 | ; a rewrite. Also remember the funky register conventions (i.e., | |
409 | ; we need to know what our callers need saved and what our callees trash. | |
410 | ; | |
411 | ; Note: we expect R26 to contain the activation and R27 to contain the context | |
412 | ; entry pointer. | |
413 | ; | |
414 | ||
415 | vmmxcng: mflr r21 ; Save the return point | |
0b4e3aa0 | 416 | mr r3,r26 ; Pass in the activation |
1c79356b | 417 | bl EXT(fpu_save) ; Save any floating point context |
0b4e3aa0 | 418 | mr r3,r26 ; Pass in the activation |
1c79356b A |
419 | bl EXT(vec_save) ; Save any vector point context |
420 | ||
421 | lis r10,hi16(EXT(per_proc_info)) ; Get top of first per_proc | |
422 | li r8,PP_FPU_THREAD ; Index to FPU owner | |
423 | ori r10,r10,lo16(EXT(per_proc_info)) ; Get bottom of first per_proc | |
424 | lis r6,hi16(EXT(real_ncpus)) ; Get number of CPUs | |
425 | li r7,0 ; Get set to clear | |
426 | ori r6,r6,lo16(EXT(real_ncpus)) ; Get number of CPUs | |
427 | li r9,PP_VMX_THREAD ; Index to vector owner | |
428 | lwz r6,0(r6) ; Get the actual CPU count | |
429 | ||
430 | vmmrt1: lwarx r3,r8,r10 ; Get FPU owner | |
431 | cmplw r3,r26 ; Do we own it? | |
432 | bne vmmrt2 ; Nope... | |
433 | stwcx. r7,r8,r10 ; Clear it | |
434 | bne- vmmrt1 ; Someone else diddled, try again.... | |
435 | ||
436 | vmmrt2: lwarx r3,r9,r10 ; Get vector owner | |
437 | cmplw r3,r26 ; Do we own it? | |
438 | bne vmmxnvec ; Nope... | |
439 | stwcx. r7,r9,r10 ; Clear it | |
440 | bne- vmmrt2 ; Someone else diddled, try again.... | |
441 | ||
442 | vmmxnvec: addic. r6,r6,-1 ; Done with all CPUs? | |
443 | addi r10,r10,ppSize ; On to the next | |
444 | bgt vmmrt1 ; Do all processors... | |
445 | ||
446 | ; | |
447 | ; At this point, the FP and Vector states for the current activation | |
448 | ; are saved and not live on any processor. Also, they should be the | |
449 | ; only contexts on the activation. Note that because we are currently | |
450 | ; taking the cowardly way out and insuring that no contexts are live, | |
451 | ; we do not need to worry about the CPU fields. | |
452 | ; | |
453 | ||
454 | lwz r8,ACT_MACT_FPU(r26) ; Get the FPU savearea | |
455 | lwz r9,ACT_MACT_VMX(r26) ; Get the vector savearea | |
456 | lwz r10,vmmFPU_pcb(r27) ; Get the FPU savearea | |
457 | lwz r11,vmmVMX_pcb(r27) ; Get the vector savearea | |
458 | li r7,0 ; Clear this | |
459 | mtlr r21 ; Restore the return | |
460 | stw r10,ACT_MACT_FPU(r26) ; Set the FPU savearea | |
461 | stw r11,ACT_MACT_VMX(r26) ; Set the vector savearea | |
462 | stw r8,vmmFPU_pcb(r27) ; Set the FPU savearea | |
463 | stw r9,vmmVMX_pcb(r27) ; Set the vector savearea | |
464 | stw r7,ACT_MACT_FPUlvl(r26) ; Make sure the level is clear | |
465 | stw r7,ACT_MACT_VMXlvl(r26) ; Make sure the level is clear | |
466 | ||
467 | mr. r8,r8 ; Do we have any old floating point context? | |
468 | lwz r7,savexfpscrpad(r30) ; Get first part of latest fpscr | |
469 | lwz r9,savexfpscr(r30) ; Get second part of the latest fpscr | |
470 | beq- xcngnold ; Nope... | |
471 | stw r7,savefpscrpad(r8) ; Set first part of fpscr | |
472 | stw r9,savefpscr(r8) ; Set fpscr | |
473 | ||
474 | xcngnold: mr. r10,r10 ; Any new context? | |
475 | li r7,0 ; Assume no FP | |
476 | li r9,0 ; Assume no FP | |
477 | beq- xcngnnew ; Nope... | |
478 | lwz r7,savefpscrpad(r10) ; Get first part of latest fpscr | |
479 | lwz r9,savefpscr(r10) ; Get second part of the latest fpscr | |
480 | ||
481 | xcngnnew: stw r7,savexfpscrpad(r30) ; Set the fpsc | |
482 | stw r9,savexfpscr(r30) ; Set the fpscr | |
483 | blr ; Return... | |
484 | ||
485 | ; | |
486 | ; Here is where we exit from vmm mode. We do this on any kind of exception. | |
487 | ; Interruptions (decrementer, external, etc.) are another story though. | |
488 | ; These we just pass through. We also switch back explicity when requested. | |
489 | ; This will happen in response to a timer pop and some kinds of ASTs. | |
490 | ; | |
491 | ; Inputs: | |
492 | ; R3 = activation | |
493 | ; R4 = savearea | |
494 | ; | |
495 | ||
496 | .align 5 | |
497 | .globl EXT(vmm_exit) | |
498 | ||
499 | LEXT(vmm_exit) | |
500 | ||
501 | lwz r2,vmmCEntry(r3) ; Get the context that is active | |
502 | lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy | |
503 | lwz r11,ACT_MACT_SPF(r3) ; Get the special flags | |
504 | lwz r19,vmmFlags(r2) ; Get the status flags | |
505 | mr r16,r3 ; R16 is safe to use for the activation address | |
506 | ||
507 | rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits | |
508 | li r0,0 ; Get a zero | |
509 | rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf | |
510 | lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation | |
511 | rlwinm r11,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag | |
512 | stw r0,vmmCEntry(r16) ; Clear pointer to active context | |
513 | stw r19,vmmFlags(r2) ; Set the status flags | |
0b4e3aa0 | 514 | rlwinm r11,r11,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key |
1c79356b A |
515 | mfsprg r10,0 ; Get the per_proc block |
516 | stw r11,ACT_MACT_SPF(r16) ; Get the special flags | |
517 | stw r11,spcFlags(r10) ; Set per_proc copy of the special flags | |
518 | ||
519 | mr r26,r16 ; Save the activation pointer | |
520 | mr r27,r2 ; Save the context entry | |
521 | ||
522 | bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator | |
523 | ||
524 | bl vmmxcng ; Exchange the vector and floating point contexts | |
525 | ||
526 | mr r2,r27 ; Restore | |
527 | lwz r5,vmmContextKern(r2) ; Get the context area address | |
528 | mr r3,r16 ; Restore activation address | |
529 | stw r19,vmmStat(r5) ; Save the changed and popped flags | |
530 | bl swapCtxt ; Exchange the VM context for the emulator one | |
531 | stw r8,saver3(r30) ; Set the return code as the return value also | |
532 | b EXT(retFromVM) ; Go back to handler... | |
533 | ||
534 | ||
535 | ; | |
536 | ; Here is where we force exit from vmm mode. We do this when as | |
537 | ; part of termination and is used to insure that we are not executing | |
538 | ; in an alternate context. Because this is called from C we need to save | |
539 | ; all non-volatile registers. | |
540 | ; | |
541 | ; Inputs: | |
542 | ; R3 = activation | |
543 | ; R4 = user savearea | |
544 | ; Interruptions disabled | |
545 | ; | |
546 | ||
547 | .align 5 | |
548 | .globl EXT(vmm_force_exit) | |
549 | ||
550 | LEXT(vmm_force_exit) | |
551 | ||
552 | stwu r1,-(FM_ALIGN(20*4)+FM_SIZE)(r1) ; Get enough space for the registers | |
553 | mflr r0 ; Save the return | |
554 | stmw r13,FM_ARG0(r1) ; Save all non-volatile registers | |
555 | stw r0,(FM_ALIGN(20*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return | |
556 | ||
557 | lwz r2,vmmCEntry(r3) ; Get the context that is active | |
558 | lwz r11,ACT_MACT_SPF(r3) ; Get the special flags | |
559 | lwz r19,vmmFlags(r2) ; Get the status flags | |
560 | lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy | |
561 | ||
562 | rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits | |
563 | mr r26,r3 ; Save the activation pointer | |
564 | rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf | |
565 | li r0,0 ; Get a zero | |
566 | rlwinm r9,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag | |
567 | cmplw r9,r11 ; Check if we were in a vm | |
568 | lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation | |
569 | beq- vfeNotRun ; We were not in a vm.... | |
0b4e3aa0 | 570 | rlwinm r9,r9,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key |
1c79356b A |
571 | stw r0,vmmCEntry(r26) ; Clear pointer to active context |
572 | mfsprg r10,0 ; Get the per_proc block | |
573 | stw r9,ACT_MACT_SPF(r26) ; Get the special flags | |
574 | stw r9,spcFlags(r10) ; Set per_proc copy of the special flags | |
575 | ||
576 | mr r27,r2 ; Save the context entry | |
577 | mr r30,r4 ; Save the savearea | |
578 | ||
579 | bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator | |
580 | ||
581 | bl vmmxcng ; Exchange the vector and floating point contexts | |
582 | ||
583 | lwz r5,vmmContextKern(r27) ; Get the context area address | |
584 | stw r19,vmmStat(r5) ; Save the changed and popped flags | |
585 | bl swapCtxt ; Exchange the VM context for the emulator one | |
586 | ||
0b4e3aa0 A |
587 | lwz r8,saveexception(r30) ; Pick up the exception code |
588 | rlwinm r8,r8,30,24,31 ; Convert exception to return code | |
1c79356b A |
589 | stw r8,saver3(r30) ; Set the return code as the return value also |
590 | ||
591 | ||
592 | vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers | |
593 | lwz r1,0(r1) ; Pop the stack | |
594 | lwz r0,FM_LR_SAVE(r1) ; Get the return address | |
595 | mtlr r0 ; Set return | |
596 | blr | |
597 | ||
598 | ; | |
599 | ; Note: we will not do any DCBTs to the savearea. It was just stored to a few cycles ago and should | |
600 | ; still be in the cache. Note also that the context area registers map identically to the savearea. | |
601 | ; | |
602 | ; NOTE: we do not save any of the non-volatile registers through this swap code | |
603 | ; NOTE NOTE: R16 is important to save!!!! | |
604 | ; NOTE: I am too dumb to figure out a faster way to swap 5 lines of memory. So I go for | |
605 | ; the simple way | |
606 | ||
607 | .align 5 | |
608 | ||
609 | swapCtxt: addi r6,r5,vmm_proc_state ; Point to the state | |
610 | li r25,32 ; Get a cache size increment | |
611 | addi r4,r30,savesrr0 ; Point to the start of the savearea | |
612 | dcbt 0,r6 ; Touch in the first line of the context area | |
613 | ||
614 | lwz r14,saveexception(r30) ; Get the exception code | |
615 | lwz r7,savesrr0(r4) ; First line of savearea | |
616 | lwz r8,savesrr1(r4) | |
617 | lwz r9,saver0(r4) | |
618 | cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call? | |
619 | lwz r10,saver1(r4) | |
620 | lwz r11,saver2(r4) | |
621 | lwz r12,saver3(r4) | |
622 | lwz r13,saver4(r4) | |
623 | lwz r14,saver5(r4) | |
624 | ||
625 | dcbt r25,r6 ; Touch second line of context area | |
626 | addi r25,r25,32 ; Bump | |
627 | ||
628 | lwz r15,savesrr0(r6) ; First line of context | |
629 | lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user | |
630 | lwz r23,savesrr1(r6) | |
631 | ori r22,r25,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user | |
632 | lwz r17,saver0(r6) | |
633 | lwz r18,saver1(r6) | |
634 | and r23,r23,r22 ; Keep only the controllable bits | |
635 | lwz r19,saver2(r6) | |
636 | oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits | |
637 | lwz r20,saver3(r6) | |
638 | ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits | |
639 | lwz r21,saver4(r6) | |
640 | lwz r22,saver5(r6) | |
641 | ||
642 | dcbt r25,r6 ; Touch third line of context area | |
643 | addi r25,r25,32 ; Bump (r25 is 64 now) | |
644 | ||
645 | stw r7,savesrr0(r6) ; Save emulator context into the context area | |
646 | stw r8,savesrr1(r6) | |
647 | stw r9,saver0(r6) | |
648 | stw r10,saver1(r6) | |
649 | stw r11,saver2(r6) | |
650 | stw r12,saver3(r6) | |
651 | stw r13,saver4(r6) | |
652 | stw r14,saver5(r6) | |
653 | ||
654 | ; | |
655 | ; Save the first 3 parameters if we are an SC (we will take care of the last later) | |
656 | ; | |
657 | bne+ cr1,swapnotsc ; Skip next if not an SC exception... | |
658 | stw r12,return_params+0(r5) ; Save the first return | |
659 | stw r13,return_params+4(r5) ; Save the second return | |
660 | stw r14,return_params+8(r5) ; Save the third return | |
661 | ||
662 | swapnotsc: stw r15,savesrr0(r4) ; Save vm context into the savearea | |
663 | stw r23,savesrr1(r4) | |
664 | stw r17,saver0(r4) | |
665 | stw r18,saver1(r4) | |
666 | stw r19,saver2(r4) | |
667 | stw r20,saver3(r4) | |
668 | stw r21,saver4(r4) | |
669 | stw r22,saver5(r4) | |
670 | ||
671 | ; | |
672 | ; The first hunk is swapped, do the rest in a loop | |
673 | ; | |
674 | li r23,4 ; Four more hunks to swap | |
675 | ||
676 | ||
677 | swaploop: addi r4,r4,32 ; Bump savearea pointer | |
678 | addi r6,r6,32 ; Bump context area pointer | |
679 | addic. r23,r23,-1 ; Count down | |
680 | dcbt r25,r6 ; Touch 4th, 5th, and 6th and 7th which are extra | |
681 | ||
682 | lwz r7,0(r4) ; Read savearea | |
683 | lwz r8,4(r4) | |
684 | lwz r9,8(r4) | |
685 | lwz r10,12(r4) | |
686 | lwz r11,16(r4) | |
687 | lwz r12,20(r4) | |
688 | lwz r13,24(r4) | |
689 | lwz r14,28(r4) | |
690 | ||
691 | lwz r15,0(r6) ; Read vm context | |
692 | lwz r24,4(r6) | |
693 | lwz r17,8(r6) | |
694 | lwz r18,12(r6) | |
695 | lwz r19,16(r6) | |
696 | lwz r20,20(r6) | |
697 | lwz r21,24(r6) | |
698 | lwz r22,28(r6) | |
699 | ||
700 | stw r7,0(r6) ; Write context | |
701 | stw r8,4(r6) | |
702 | stw r9,8(r6) | |
703 | stw r10,12(r6) | |
704 | stw r11,16(r6) | |
705 | stw r12,20(r6) | |
706 | stw r13,24(r6) | |
707 | stw r14,28(r6) | |
708 | ||
709 | stw r15,0(r4) ; Write vm context | |
710 | stw r24,4(r4) | |
711 | stw r17,8(r4) | |
712 | stw r18,12(r4) | |
713 | stw r19,16(r4) | |
714 | stw r20,20(r4) | |
715 | stw r21,24(r4) | |
716 | stw r22,28(r4) | |
717 | ||
718 | bgt+ swaploop ; Do it all... | |
719 | ||
720 | ; | |
721 | ; Cobble up the exception return code and save any specific return values | |
722 | ; | |
723 | ||
724 | lwz r7,saveexception(r30) ; Pick up the exception code | |
725 | rlwinm r8,r7,30,24,31 ; Convert exception to return code | |
726 | cmplwi r7,T_DATA_ACCESS ; Was this a DSI? | |
727 | stw r8,return_code(r5) ; Save the exit code | |
728 | cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI? | |
729 | beq+ swapDSI ; Yeah... | |
730 | cmplwi r7,T_ALIGNMENT ; Alignment exception? | |
731 | beq+ cr1,swapISI ; We had an ISI... | |
732 | cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call? | |
733 | beq+ swapDSI ; An alignment exception looks like a DSI... | |
734 | beq+ cr1,swapSC ; We had a system call... | |
735 | ||
736 | blr ; Return... | |
737 | ||
738 | ; | |
739 | ; Set exit returns for a DSI or alignment exception | |
740 | ; | |
741 | ||
742 | swapDSI: lwz r10,savedar(r30) ; Get the DAR | |
743 | lwz r7,savedsisr(r30) ; and the DSISR | |
744 | stw r10,return_params+0(r5) ; Save DAR as first return parm | |
745 | stw r7,return_params+4(r5) ; Save DSISR as second return parm | |
746 | blr ; Return... | |
747 | ||
748 | ; | |
749 | ; Set exit returns for a ISI | |
750 | ; | |
751 | ||
752 | swapISI: lwz r7,savesrr1+vmm_proc_state(r5) ; Get the SRR1 value | |
753 | lwz r10,savesrr0+vmm_proc_state(r5) ; Get the PC as failing address | |
754 | rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR | |
755 | stw r10,return_params+0(r5) ; Save PC as first return parm | |
756 | stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm | |
757 | blr ; Return... | |
758 | ||
759 | ; | |
760 | ; Set exit returns for a system call (note: we did the first 3 earlier) | |
761 | ; Do we really need to pass parameters back here???? | |
762 | ; | |
763 | ||
764 | swapSC: lwz r10,saver6+vmm_proc_state(r5) ; Get the fourth paramter | |
765 | stw r10,return_params+12(r5) ; Save it | |
766 | blr ; Return... | |
767 |