2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
28 #include <ppc/proc_reg.h>
29 #include <ppc/exception.h>
32 * This file contains implementations for the Virtual Machine Monitor
38 * int vmm_dispatch(savearea, act);
40 * vmm_dispatch is a PPC only system call. It is used with a selector (first
41 * parameter) to determine what function to enter. This is treated as an extension
45 * R4 = current activation
46 * R16 = current thread
47 * R30 = current savearea
50 .align 5 /* Line up on cache line */
51 .globl EXT(vmm_dispatch_table)
53 LEXT(vmm_dispatch_table)
55 /* Don't change the order of these routines in the table. It's */
56 /* OK to add new routines, but they must be added at the bottom. */
58 .long EXT(vmm_get_version_sel) ; Get the version of the VMM interface
59 .long 0 ; Not valid in Fam
60 .long EXT(vmm_get_features_sel) ; Get the features of the VMM interface
61 .long 0 ; Not valid in Fam
62 .long EXT(vmm_init_context_sel) ; Initializes a new VMM context
63 .long 0 ; Not valid in Fam
64 .long EXT(vmm_tear_down_context) ; Tears down a previously-allocated VMM context
65 .long 0 ; Not valid in Fam
66 .long EXT(vmm_tear_down_all) ; Tears down all VMMs
67 .long 0 ; Not valid in Fam
68 .long EXT(vmm_map_page) ; Maps a page from the main address space into the VM space
69 .long 1 ; Valid in Fam
70 .long EXT(vmm_get_page_mapping) ; Returns client va associated with VM va
71 .long 1 ; Valid in Fam
72 .long EXT(vmm_unmap_page) ; Unmaps a page from the VM space
73 .long 1 ; Valid in Fam
74 .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space
75 .long 1 ; Valid in Fam
76 .long EXT(vmm_get_page_dirty_flag) ; Gets the change bit for a page and optionally clears it
77 .long 1 ; Valid in Fam
78 .long EXT(vmm_get_float_state) ; Gets current floating point state
79 .long 0 ; not valid in Fam
80 .long EXT(vmm_get_vector_state) ; Gets current vector state
81 .long 0 ; Not valid in Fam
82 .long EXT(vmm_set_timer) ; Sets a timer value
83 .long 1 ; Valid in Fam
84 .long EXT(vmm_get_timer) ; Gets a timer value
85 .long 1 ; Valid in Fam
86 .long EXT(switchIntoVM) ; Switches to the VM context
87 .long 1 ; Valid in Fam
88 .long EXT(vmm_protect_page) ; Sets protection values for a page
89 .long 1 ; Valid in Fam
90 .long EXT(vmm_map_execute) ; Maps a page an launches VM
91 .long 1 ; Not valid in Fam
92 .long EXT(vmm_protect_execute) ; Sets protection values for a page and launches VM
93 .long 1 ; Valid in Fam
94 .long EXT(vmm_map_list) ; Maps a list of pages
95 .long 1 ; Valid in Fam
96 .long EXT(vmm_unmap_list) ; Unmaps a list of pages
97 .long 1 ; Valid in Fam
98 .long EXT(vmm_fam_reserved) ; exit from Fam to host
99 .long 1 ; Valid in Fam
100 .long EXT(vmm_fam_reserved) ; resume guest from Fam
101 .long 1 ; Valid in Fam
102 .long EXT(vmm_fam_reserved) ; get guest register from Fam
103 .long 1 ; Valid in Fam
104 .long EXT(vmm_fam_reserved) ; Set guest register from Fam
105 .long 1 ; Valid in Fam
107 .set vmm_count,(.-EXT(vmm_dispatch_table))/8 ; Get the top number
111 .globl EXT(vmm_dispatch)
115 lwz r11,saver3(r30) ; Get the selector
116 mr r3,r4 ; All of our functions want the activation as the first parm
117 lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table
118 cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now?
119 cmplwi cr1,r11,vmm_count ; See if we have a valid selector
120 ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table
121 lwz r4,saver4(r30) ; Get 1st parameter after selector
122 beq+ EXT(switchIntoVM) ; Yes, go switch to it....
123 rlwinm r11,r11,3,0,28 ; Index into table
124 bgt- cr1,vmmBogus ; It is a bogus entry
125 add r12,r10,r11 ; Get the vmm dispatch syscall entry
126 mfsprg r10,0 ; Get the per_proc
127 lwz r13,0(r12) ; Get address of routine
128 lwz r12,4(r12) ; Get validity flag
129 lwz r5,spcFlags(r10) ; Get per_proc special flags
130 cmpwi cr1,r12,0 ; Check Fam valid
131 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
132 crand cr0_eq,cr1_eq,cr0_gt ; In Fam and Invalid syscall
133 beq vmmBogus ; Intercept to host
134 lwz r5,saver5(r30) ; Get 2nd parameter after selector
135 lwz r6,saver6(r30) ; Get 3rd parameter after selector
136 mtlr r13 ; Set the routine address
137 lwz r7,saver7(r30) ; Get 4th parameter after selector
139 ; NOTE: currently the most paramters for any call is 4. We will support at most 8 because we
140 ; do not want to get into any stack based parms. However, here is where we need to add
141 ; code for the 5th - 8th parms if we need them.
146 stw r3,saver3(r30) ; Pass back the return code
147 li r3,1 ; Set normal return with check for AST
148 b EXT(ppcscret) ; Go back to handler...
151 mfsprg r10,0 ; Get the per_proc
152 mfsprg r3,1 ; Load current activation
153 lwz r5,spcFlags(r10) ; Get per_proc special flags
154 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
155 bne vmmexitcall ; Do it to it
156 li r3,0 ; Bogus selector, treat like a bogus system call
157 b EXT(ppcscret) ; Go back to handler...
161 .globl EXT(vmm_get_version_sel)
163 LEXT(vmm_get_version_sel) ; Selector based version of get version
165 lis r3,hi16(EXT(vmm_get_version))
166 ori r3,r3,lo16(EXT(vmm_get_version))
171 .globl EXT(vmm_get_features_sel)
173 LEXT(vmm_get_features_sel) ; Selector based version of get features
175 lis r3,hi16(EXT(vmm_get_features))
176 ori r3,r3,lo16(EXT(vmm_get_features))
181 .globl EXT(vmm_init_context_sel)
183 LEXT(vmm_init_context_sel) ; Selector based version of init context
185 lwz r4,saver4(r30) ; Get the passed in version
186 lwz r5,saver5(r30) ; Get the passed in comm area
187 lis r3,hi16(EXT(vmm_init_context))
188 stw r4,saver3(r30) ; Cheat and move this parameter over
189 ori r3,r3,lo16(EXT(vmm_init_context))
190 stw r5,saver4(r30) ; Cheat and move this parameter over
192 selcomm: mtlr r3 ; Set the real routine address
193 mr r3,r30 ; Pass in the savearea
194 blrl ; Call the function
195 b EXT(ppcscret) ; Go back to handler...
198 * Here is where we transition to the virtual machine.
200 * We will swap the register context in the savearea with that which is saved in our shared
201 * context area. We will validity check a bit and clear any nasty bits in the MSR and force
202 * the manditory ones on.
204 * Then we will setup the new address space to run with, and anything else that is normally part
205 * of a context switch.
207 * The vmm_execute_vm entry point is for the fused vmm_map_execute and vmm_protect_execute
208 * calls. This is called, but never returned from. We always go directly back to the
211 * Still need to figure out final floats and vectors. For now, we will go brute
212 * force and when we go into the VM, we will force save any normal floats and
213 * vectors. Then we will hide them and swap the VM copy (if any) into the normal
214 * chain. When we exit VM we will do the opposite. This is not as fast as I would
222 .globl EXT(vmm_execute_vm)
225 lwz r30,ACT_MACT_PCB(r3) ; Restore the savearea pointer because it could be trash here
226 b EXT(switchIntoVM) ; Join common...
230 .globl EXT(switchIntoVM)
233 mfsprg r10,0 ; Get the per_proc
234 lwz r15,spcFlags(r10) ; Get per_proc special flags
235 rlwinm. r0,r15,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
236 bne vmmFamGuestResume
237 lwz r5,vmmControl(r3) ; Pick up the control table address
238 subi r4,r4,1 ; Switch to zero offset
239 rlwinm. r2,r5,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we
240 ; do not try this while we are transitioning off to on
241 cmplwi cr1,r4,kVmmMaxContextsPerThread ; Is the index valid?
242 beq- vmmBogus ; Not started, treat like a bogus system call
243 mulli r2,r4,vmmCEntrySize ; Get displacement from index
244 bgt- cr1,swvmmBogus ; Index is bogus...
245 add r2,r2,r5 ; Point to the entry
247 lwz r4,vmmFlags(r2) ; Get the flags for the selected entry
248 lwz r5,vmmContextKern(r2) ; Get the context area address
249 rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use
250 bne+ swvmChkIntcpt ; We are so cool. Go do check for immediate intercepts...
252 swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return
253 li r3,1 ; Set normal return with check for AST
254 stw r2,saver3(r30) ; Pass back the return code
255 b EXT(ppcscret) ; Go back to handler...
258 ; Here we check for any immediate intercepts. So far, the only
259 ; two of these are a timer pop and and external stop. We will not dispatch if
260 ; either is true. They need to either reset the timer (i.e. set timer
261 ; to 0) or to set a future time, or if it is external stop, set the vmmXStopRst flag.
265 lwz r6,vmmCntrl(r5) ; Get the control field
266 rlwinm. r7,r6,0,vmmXStartb,vmmXStartb ; Clear all but start bit
267 beq+ swvmChkStop ; Do not reset stop
268 andc r6,r6,r7 ; Clear it
269 li r8,vmmFlags ; Point to the flags
270 stw r6,vmmCntrl(r5) ; Set the control field
272 swvmtryx: lwarx r4,r8,r2 ; Pick up the flags
273 rlwinm r4,r4,0,vmmXStopb+1,vmmXStopb-1 ; Clear the stop bit
274 stwcx. r4,r8,r2 ; Save the updated field
275 bne- swvmtryx ; Try again...
278 rlwinm. r26,r4,0,vmmXStopb,vmmXStopb ; Is this VM stopped?
279 beq+ swvmNoStop ; Nope...
281 li r2,kVmmStopped ; Set stopped return
282 li r3,1 ; Set normal return with check for AST
283 stw r2,saver3(r30) ; Pass back the return code
284 stw r2,return_code(r5) ; Save the exit code
285 b EXT(ppcscret) ; Go back to handler...
288 rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer go pop?
289 beq+ swvmDoSwitch ; No...
291 li r2,kVmmReturnNull ; Set null return
292 li r3,1 ; Set normal return with check for AST
293 stw r2,saver3(r30) ; Pass back the return code
294 stw r2,return_code(r5) ; Save the exit code
295 b EXT(ppcscret) ; Go back to handler...
298 ; Here is where we actually swap into the VM (alternate) context.
299 ; We will bulk do a wholesale swap of the registers in the context area (the VMs)
300 ; with the ones in the savearea (our main code). During the copy, we will fix up the
301 ; MSR, forcing on a few bits and turning off a few others. Then we will deal with the
302 ; PMAP and other per_proc stuff. Finally, we will exit back through the main exception
303 ; handler to deal with unstacking saveareas and ASTs, etc.
309 ; First, we save the volatile registers we care about. Remember, all register
310 ; handling here is pretty funky anyway, so we just pick the ones that are ok.
312 mr r26,r3 ; Save the activation pointer
314 la r11,vmmFacCtx(r2) ; Point to the virtual machine facility context
315 mr r27,r2 ; Save the context entry
316 stw r11,deferctx(r3) ; Start using the virtual machine facility context when we exit
318 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
319 lwz r3,vmmPmap(r27) ; Get the pointer to the PMAP
320 oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now
321 bl EXT(hw_set_user_space_dis) ; Swap the address spaces
322 lwz r17,vmmFlags(r27) ; Get the status flags
323 lwz r20,vmmContextKern(r27) ; Get the state page kernel addr
324 lwz r21,vmmCntrl(r20) ; Get vmmCntrl
325 rlwinm. r22,r21,0,vmmFamEnab,vmmFamEnab ; Is vmmFamEnab set?
326 beq swvmNoFam ; No Fam intercept
327 rlwimi r15,r21,32+vmmFamSetb-FamVMmodebit,FamVMmodebit,FamVMmodebit ; Set FamVMmode bit
328 rlwinm r21,r21,0,vmmFamSetb+1,vmmFamSetb-1 ; Clear FamSet bit
329 lwz r22,famintercepts(r20) ; Load intercept bit field
330 stw r21,vmmCntrl(r20) ; Update vmmCntrl
331 lwz r19,vmmContextPhys(r27) ; Get vmmFAMarea address
332 stw r22,vmmFAMintercept(r27) ; Get vmmFAMintercept
333 stw r22,FAMintercept(r10) ; Store vmmFAMintercept in per_proc FAMintercept
334 stw r19,vmmContextPhys(r27) ; Store vmmContextPhys
335 stw r19,VMMareaPhys(r10) ; Store VMMareaPhys
336 oris r15,r15,hi16(FamVMena) ; Set FamVMenabit
338 rlwinm. r0,r17,0,vmmMapDoneb,vmmMapDoneb ; Did we just do a map function?
339 stw r27,vmmCEntry(r26) ; Remember what context we are running
340 andc r17,r17,r0 ; Turn off map flag
341 beq+ swvmNoMap ; No mapping done...
344 ; This little bit of hoopala here (triggered by vmmMapDone) is
345 ; a performance enhancement. This will change the returning savearea
346 ; to look like we had a DSI rather than a system call. Then, setting
347 ; the redrive bit, the exception handler will redrive the exception as
348 ; a DSI, entering the last mapped address into the hash table. This keeps
349 ; double faults from happening. Note that there is only a gain if the VM
350 ; takes a fault, then the emulator resolves it only, and then begins
351 ; the VM execution again. It seems like this should be the normal case.
354 lwz r3,SAVflags(r30) ; Pick up the savearea flags
355 lwz r2,vmmLastMap(r27) ; Get the last mapped address
356 li r20,T_DATA_ACCESS ; Change to DSI fault
357 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
358 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
359 stw r3,SAVflags(r30) ; Turn on the redrive request
360 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
361 stw r20,saveexception(r30) ; Say we need to emulate a DSI
362 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
364 swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area
365 rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits
366 lwz r20,vmmCntrl(r20) ; Get the control flags
367 rlwimi r17,r11,8,24,31 ; Save the old spf flags
368 rlwimi r15,r20,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
369 stw r15,spcFlags(r10) ; Set per_proc copy of the special flags
370 stw r15,ACT_MACT_SPF(r26) ; Get the special flags
372 stw r17,vmmFlags(r27) ; Set the status flags
374 bl swapCtxt ; First, swap the general register state
376 lwz r17,vmmContextKern(r27) ; Get the comm area back
377 la r25,vmmFacCtx(r27) ; Point to the facility context
378 lwz r15,vmmCntrl(r17) ; Get the control flags again
379 mfsprg r29,0 ; Get the per_proc
382 ; Check if there is new floating point context to load
385 rlwinm. r0,r15,0,vmmFloatLoadb,vmmFloatLoadb ; Are there new floating point values?
386 lhz r29,PP_CPU_NUMBER(r29) ; Get our cpu number
387 li r14,vmmppcFPRs ; Get displacement to the new values
388 andc r15,r15,r0 ; Clear the bit
389 beq+ swvmNoNewFloats ; Nope, good...
391 lwz r19,FPUcpu(r25) ; Get the last CPU we ran on
393 stw r29,FPUcpu(r25) ; Claim the context for ourselves
395 eieio ; Make sure this stays in order
397 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
398 mulli r19,r19,ppSize ; Find offset to the owner per_proc
399 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
400 li r16,FPUowner ; Displacement to float owner
401 add r19,r18,r19 ; Point to the owner per_proc
402 li r0,0 ; Clear this out
404 swvminvfpu: lwarx r18,r16,r19 ; Get the owner
405 cmplw r18,r25 ; Does he still have this context?
406 bne swvminvfpv ; Nope...
407 stwcx. r0,r16,r19 ; Try to invalidate it
408 bne- swvminvfpu ; Try again if there was a collision...
410 swvminvfpv: lwz r3,FPUsave(r25) ; Get the FPU savearea
411 dcbt r14,r17 ; Touch in first line of new stuff
412 mr. r3,r3 ; Is there one?
413 bne+ swvmGotFloat ; Yes...
415 bl EXT(save_get) ; Get a savearea
417 li r7,SAVfloat ; Get floating point flag
418 stw r26,SAVact(r3) ; Save our activation
420 stb r7,SAVflags+2(r3) ; Set that this is floating point
421 stw r0,SAVprev(r3) ; Clear the back chain
422 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
424 stw r3,FPUsave(r25) ; Chain us to context
427 la r4,savefp0(r3) ; Point to the destination
428 mr r21,r3 ; Save the save area
429 la r3,vmmppcFPRs(r17) ; Point to the source
430 li r5,32*8 ; Get the size (32 FPRs at 8 bytes each)
432 bl EXT(bcopy) ; Copy the new values
434 lwz r14,vmmppcFPSCRshadow(r17) ; Get the fpscr pad
435 lwz r10,vmmppcFPSCRshadow+4(r17) ; Get the fpscr
436 stw r14,savefpscrpad(r30) ; Save the new fpscr pad
437 stw r10,savefpscr(r30) ; Save the new fpscr
439 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
440 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad
441 rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here
442 lwz r14,vmmStat(r17) ; Get the status flags
443 mfsprg r10,0 ; Get the per_proc
444 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
445 rlwinm r14,r14,0,vmmFloatCngdb+1,vmmFloatCngdb-1 ; Clear the changed flag
446 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
447 stw r14,vmmStat(r17) ; Set the status flags sans vmmFloatCngd
450 ; Check if there is new vector context to load
454 rlwinm. r0,r15,0,vmmVectLoadb,vmmVectLoadb ; Are there new vector values?
455 li r14,vmmppcVRs ; Get displacement to the new values
456 andc r15,r15,r0 ; Clear the bit
457 beq+ swvmNoNewVects ; Nope, good...
459 lwz r19,VMXcpu(r25) ; Get the last CPU we ran on
461 stw r29,VMXcpu(r25) ; Claim the context for ourselves
463 eieio ; Make sure this stays in order
465 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
466 mulli r19,r19,ppSize ; Find offset to the owner per_proc
467 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
468 li r16,VMXowner ; Displacement to vector owner
469 add r19,r18,r19 ; Point to the owner per_proc
470 li r0,0 ; Clear this out
472 swvminvvec: lwarx r18,r16,r19 ; Get the owner
473 cmplw r18,r25 ; Does he still have this context?
474 bne swvminvved ; Nope...
475 stwcx. r0,r16,r19 ; Try to invalidate it
476 bne- swvminvvec ; Try again if there was a collision...
478 swvminvved: lwz r3,VMXsave(r25) ; Get the vector savearea
479 dcbt r14,r17 ; Touch in first line of new stuff
480 mr. r3,r3 ; Is there one?
481 bne+ swvmGotVect ; Yes...
483 bl EXT(save_get) ; Get a savearea
485 li r7,SAVvector ; Get the vector type flag
486 stw r26,SAVact(r3) ; Save our activation
488 stb r7,SAVflags+2(r3) ; Set that this is vector
489 stw r0,SAVprev(r3) ; Clear the back chain
490 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
492 stw r3,VMXsave(r25) ; Chain us to context
495 mr r21,r3 ; Save the pointer to the savearea
496 la r4,savevr0(r3) ; Point to the destination
497 la r3,vmmppcVRs(r17) ; Point to the source
498 li r5,32*16 ; Get the size (32 vectors at 16 bytes each)
500 bl EXT(bcopy) ; Copy the new values
502 lwz r11,vmmppcVSCRshadow+0(r17) ; Get the VSCR
503 lwz r14,vmmppcVSCRshadow+4(r17) ; Get the VSCR
504 lwz r10,vmmppcVSCRshadow+8(r17) ; Get the VSCR
505 lwz r9,vmmppcVSCRshadow+12(r17) ; Get the VSCR
506 lwz r8,savevrsave(r30) ; Get the current VRSave
508 stw r11,savevscr+0(r30) ; Set the VSCR
509 stw r14,savevscr+4(r30) ; Set the VSCR
510 stw r10,savevscr+8(r30) ; Set the VSCR
511 stw r9,savevscr+12(r30) ; Set the VSCR
512 stw r8,savevrvalid(r21) ; Set the current VRSave as valid saved
514 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
515 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad
516 rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here
517 lwz r14,vmmStat(r17) ; Get the status flags
518 mfsprg r10,0 ; Get the per_proc
519 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
520 rlwinm r14,r14,0,vmmVectCngdb+1,vmmVectCngdb-1 ; Clear the changed flag
521 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
522 stw r14,vmmStat(r17) ; Set the status flags sans vmmVectCngd
525 li r3,1 ; Show normal exit with check for AST
526 lwz r16,ACT_THREAD(r26) ; Restore the thread pointer
527 b EXT(ppcscret) ; Go back to handler...
531 ; These syscalls are invalid, FAM syscall fast path
535 .globl EXT(vmm_fam_reserved)
537 LEXT(vmm_fam_reserved)
538 li r3,0 ; Force exception
539 b EXT(ppcscret) ; Go back to handler...
542 ; Here is where we exit from vmm mode. We do this on any kind of exception.
543 ; Interruptions (decrementer, external, etc.) are another story though.
544 ; These we just pass through. We also switch back explicity when requested.
545 ; This will happen in response to a timer pop and some kinds of ASTs.
558 lwz r2,vmmCEntry(r3) ; Get the context that is active
559 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
560 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
561 lwz r19,vmmFlags(r2) ; Get the status flags
562 mr r16,r3 ; R16 is safe to use for the activation address
564 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
566 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
567 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
568 rlwinm r11,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
569 stw r0,vmmCEntry(r16) ; Clear pointer to active context
570 stw r19,vmmFlags(r2) ; Set the status flags
571 rlwinm r11,r11,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
572 mfsprg r10,0 ; Get the per_proc block
573 rlwinm r11,r11,0,FamVMenabit+1,FamVMenabit-1 ; Clear FamVMEnable
574 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
575 lwz r5,vmmContextKern(r2) ; Get the state page kernel addr
576 rlwinm r11,r11,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMMode
577 lwz r6,vmmCntrl(r5) ; Get the control field
578 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
579 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
580 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
581 stw r11,ACT_MACT_SPF(r16) ; Get the special flags
582 stw r6,vmmCntrl(r5) ; Store the control field
583 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
585 mr r26,r16 ; Save the activation pointer
586 mr r27,r2 ; Save the context entry
588 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
590 la r5,facctx(r16) ; Point to the main facility context
592 stw r5,deferctx(r16) ; Start using the main facility context on the way out
593 lwz r5,vmmContextKern(r27) ; Get the context area address
594 mr r3,r16 ; Restore activation address
595 stw r19,vmmStat(r5) ; Save the changed and popped flags
596 bl swapCtxt ; Exchange the VM context for the emulator one
597 stw r8,saver3(r30) ; Set the return code as the return value also
598 b EXT(retFromVM) ; Go back to handler...
602 ; Here is where we force exit from vmm mode. We do this when as
603 ; part of termination and is used to insure that we are not executing
604 ; in an alternate context. Because this is called from C we need to save
605 ; all non-volatile registers.
610 ; Interruptions disabled
614 .globl EXT(vmm_force_exit)
618 stwu r1,-(FM_ALIGN(20*4)+FM_SIZE)(r1) ; Get enough space for the registers
619 mflr r0 ; Save the return
620 stmw r13,FM_ARG0(r1) ; Save all non-volatile registers
621 stw r0,(FM_ALIGN(20*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
623 lwz r2,vmmCEntry(r3) ; Get the context that is active
624 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
625 lwz r19,vmmFlags(r2) ; Get the status flags
626 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
628 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
629 mr r26,r3 ; Save the activation pointer
630 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
632 rlwinm r9,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
633 cmplw r9,r11 ; Check if we were in a vm
634 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
635 beq- vfeNotRun ; We were not in a vm....
636 rlwinm r9,r9,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
637 stw r0,vmmCEntry(r26) ; Clear pointer to active context
638 mfsprg r10,0 ; Get the per_proc block
639 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
640 rlwinm r9,r9,0,FamVMenabit+1,FamVMenabit-1 ; Clear Fam Enable
641 rlwinm r9,r9,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear Fam Enable
642 lwz r5,vmmContextKern(r2) ; Get the context area address
643 lwz r6,vmmCntrl(r5) ; Get the control field
644 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
645 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
646 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
647 stw r6,vmmCntrl(r5) ; Store the control field
648 stw r9,ACT_MACT_SPF(r26) ; Get the special flags
649 stw r9,spcFlags(r10) ; Set per_proc copy of the special flags
651 mr r27,r2 ; Save the context entry
652 mr r30,r4 ; Save the savearea
654 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
656 la r7,facctx(r26) ; Point to the main facility context
658 lwz r5,vmmContextKern(r27) ; Get the context area address
659 stw r19,vmmStat(r5) ; Save the changed and popped flags
660 stw r7,deferctx(r26) ; Tell context launcher to switch facility context
662 bl swapCtxt ; Exchange the VM context for the emulator one
664 lwz r8,saveexception(r30) ; Pick up the exception code
665 lwz r7,SAVflags(r30) ; Pick up the savearea flags
666 lis r9,hi16(SAVredrive) ; Get exception redrive bit
667 rlwinm r8,r8,30,24,31 ; Convert exception to return code
668 andc r7,r7,r9 ; Make sure redrive is off because we are intercepting
669 stw r8,saver3(r30) ; Set the return code as the return value also
670 stw r7,SAVflags(r30) ; Set the savearea flags
673 vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers
674 lwz r1,0(r1) ; Pop the stack
675 lwz r0,FM_LR_SAVE(r1) ; Get the return address
680 ; Note: we will not do any DCBTs to the savearea. It was just stored to a few cycles ago and should
681 ; still be in the cache.
683 ; NOTE NOTE: R16 is important to save!!!!
687 swapCtxt: la r6,vmmppcpc(r5) ; Point to the first line
689 lwz r14,saveexception(r30) ; Get the exception code
690 dcbt 0,r6 ; Touch in the first line of the context area
691 lwz r7,savesrr0(r30) ; Start moving context
694 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
699 la r6,vmmppcr6(r5) ; Point to second line
702 dcbt 0,r6 ; Touch second line of context area
704 lwz r15,vmmppcpc(r5) ; First line of context
705 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
706 lwz r23,vmmppcmsr(r5)
707 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
710 and r23,r23,r22 ; Keep only the controllable bits
712 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
714 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
718 dcbt 0,r6 ; Touch third line of context area
720 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
730 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
732 bne+ cr1,swapnotsc ; Skip next if not an SC exception...
733 stw r12,return_params+0(r5) ; Save the first return
734 stw r13,return_params+4(r5) ; Save the second return
735 stw r14,return_params+8(r5) ; Save the third return
737 swapnotsc: stw r15,savesrr0(r30) ; Save vm context into the savearea
738 stw r23,savesrr1(r30)
744 la r6,vmmppcr14(r5) ; Point to fourth line
747 dcbt 0,r6 ; Touch fourth line
751 lwz r7,saver6(r30) ; Read savearea
760 lwz r15,vmmppcr6(r5) ; Read vm context
764 lwz r19,vmmppcr10(r5)
765 lwz r20,vmmppcr11(r5)
766 lwz r21,vmmppcr12(r5)
767 lwz r22,vmmppcr13(r5)
769 stw r7,vmmppcr6(r5) ; Write context
773 stw r11,vmmppcr10(r5)
774 stw r12,vmmppcr11(r5)
775 stw r13,vmmppcr12(r5)
776 la r6,vmmppcr22(r5) ; Point to fifth line
777 stw r14,vmmppcr13(r5)
779 dcbt 0,r6 ; Touch fifth line
781 stw r15,saver6(r30) ; Write vm context
792 lwz r7,saver14(r30) ; Read savearea
801 lwz r15,vmmppcr14(r5) ; Read vm context
802 lwz r24,vmmppcr15(r5)
803 lwz r17,vmmppcr16(r5)
804 lwz r18,vmmppcr17(r5)
805 lwz r19,vmmppcr18(r5)
806 lwz r20,vmmppcr19(r5)
807 lwz r21,vmmppcr20(r5)
808 lwz r22,vmmppcr21(r5)
810 stw r7,vmmppcr14(r5) ; Write context
813 stw r10,vmmppcr17(r5)
814 stw r11,vmmppcr18(r5)
815 stw r12,vmmppcr19(r5)
816 stw r13,vmmppcr20(r5)
817 la r6,vmmppcr30(r5) ; Point to sixth line
818 stw r14,vmmppcr21(r5)
820 dcbt 0,r6 ; Touch sixth line
822 stw r15,saver14(r30) ; Write vm context
833 lwz r7,saver22(r30) ; Read savearea
842 lwz r15,vmmppcr22(r5) ; Read vm context
843 lwz r24,vmmppcr23(r5)
844 lwz r17,vmmppcr24(r5)
845 lwz r18,vmmppcr25(r5)
846 lwz r19,vmmppcr26(r5)
847 lwz r20,vmmppcr27(r5)
848 lwz r21,vmmppcr28(r5)
849 lwz r22,vmmppcr29(r5)
851 stw r7,vmmppcr22(r5) ; Write context
854 stw r10,vmmppcr25(r5)
855 stw r11,vmmppcr26(r5)
856 stw r12,vmmppcr27(r5)
857 stw r13,vmmppcr28(r5)
858 la r6,vmmppcvscr(r5) ; Point to seventh line
859 stw r14,vmmppcr29(r5)
861 dcbt 0,r6 ; Touch seventh line
863 stw r15,saver22(r30) ; Write vm context
874 lwz r7,saver30(r30) ; Read savearea
880 lwz r14,savevrsave(r30)
882 lwz r15,vmmppcr30(r5) ; Read vm context
883 lwz r24,vmmppcr31(r5)
885 lwz r18,vmmppcxer(r5)
887 lwz r20,vmmppcctr(r5)
888 lwz r22,vmmppcvrsave(r5)
890 stw r7,vmmppcr30(r5) ; Write context
893 stw r10,vmmppcxer(r5)
895 stw r12,vmmppcctr(r5)
896 stw r14,vmmppcvrsave(r5)
898 stw r15,saver30(r30) ; Write vm context
904 stw r22,savevrsave(r30)
908 lwz r7,savevscr+0(r30) ; Read savearea
909 lwz r8,savevscr+4(r30)
910 lwz r9,savevscr+8(r30)
911 lwz r10,savevscr+12(r30)
912 lwz r11,savefpscrpad(r30)
913 lwz r12,savefpscr(r30)
915 lwz r15,vmmppcvscr+0(r5) ; Read vm context
916 lwz r24,vmmppcvscr+4(r5)
917 lwz r17,vmmppcvscr+8(r5)
918 lwz r18,vmmppcvscr+12(r5)
919 lwz r19,vmmppcfpscrpad(r5)
920 lwz r20,vmmppcfpscr(r5)
922 stw r7,vmmppcvscr+0(r5) ; Write context
923 stw r8,vmmppcvscr+4(r5)
924 stw r9,vmmppcvscr+8(r5)
925 stw r10,vmmppcvscr+12(r5)
926 stw r11,vmmppcfpscrpad(r5)
927 stw r12,vmmppcfpscr(r5)
929 stw r15,savevscr+0(r30) ; Write vm context
930 stw r24,savevscr+4(r30)
931 stw r17,savevscr+8(r30)
932 stw r18,savevscr+12(r30)
933 stw r19,savefpscrpad(r30)
934 stw r20,savefpscr(r30)
938 ; Cobble up the exception return code and save any specific return values
941 lwz r7,saveexception(r30) ; Pick up the exception code
942 rlwinm r8,r7,30,24,31 ; Convert exception to return code
943 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
944 stw r8,return_code(r5) ; Save the exit code
945 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
946 beq+ swapDSI ; Yeah...
947 cmplwi r7,T_ALIGNMENT ; Alignment exception?
948 beq+ cr1,swapISI ; We had an ISI...
949 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
950 beq+ swapDSI ; An alignment exception looks like a DSI...
951 beq+ cr1,swapSC ; We had a system call...
956 ; Set exit returns for a DSI or alignment exception
959 swapDSI: lwz r10,savedar(r30) ; Get the DAR
960 lwz r7,savedsisr(r30) ; and the DSISR
961 stw r10,return_params+0(r5) ; Save DAR as first return parm
962 stw r7,return_params+4(r5) ; Save DSISR as second return parm
966 ; Set exit returns for a ISI
969 swapISI: lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
970 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
971 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
972 stw r10,return_params+0(r5) ; Save PC as first return parm
973 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
977 ; Set exit returns for a system call (note: we did the first 3 earlier)
978 ; Do we really need to pass parameters back here????
981 swapSC: lwz r10,vmmppcr6(r5) ; Get the fourth paramter
982 stw r10,return_params+12(r5) ; Save it
987 ; Restore Guest context from Fam mode.
991 mfsprg r10,0 ; Get the per_proc
992 lwz r27,vmmCEntry(r3) ; Get the context that is active
993 lwz r15,spcFlags(r10) ; Get per_proc special flags
994 mr r26,r3 ; Save the activation pointer
995 lwz r17,vmmFlags(r27) ; Get the status flags
996 lwz r20,vmmContextKern(r27) ; Get the comm area
997 rlwinm r15,r15,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
998 stw r15,spcFlags(r10) ; Update the special flags
999 rlwinm. r0,r17,0,vmmMapDoneb,vmmMapDoneb ; Did we just do a map function?
1000 lwz r7,famguestpc(r20) ; Load famguest ctx pc
1001 andc r17,r17,r0 ; Turn off map flag
1002 stw r17,vmmFlags(r27) ; Update vmmFlags
1003 beq+ vmmFamRetNoMap ; No mapping done...
1004 lwz r3,SAVflags(r30) ; Pick up the savearea flags
1005 lwz r2,vmmLastMap(r27) ; Get the last mapped address
1006 li r4,T_DATA_ACCESS ; Change to DSI fault
1007 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1008 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
1009 stw r3,SAVflags(r30) ; Turn on the redrive request
1010 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1011 stw r4,saveexception(r30) ; Say we need to emulate a DSI
1012 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
1014 mfsrr1 r4 ; Get the current MSR value
1015 stw r7,savesrr0(r30) ; Set savearea pc
1016 lwz r5,famguestmsr(r20) ; Load famguest ctx msr
1017 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1018 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1019 and r5,r5,r6 ; Keep only the controllable bits
1020 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1021 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1022 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1023 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1024 stw r5,savesrr1(r30) ; Set savearea srr1
1025 lwz r4,famguestr0(r20) ; Load famguest ctx r0
1026 lwz r5,famguestr1(r20) ; Load famguest ctx r1
1027 lwz r6,famguestr2(r20) ; Load famguest ctx r2
1028 lwz r7,famguestr3(r20) ; Load famguest ctx r3
1029 stw r4,saver0(r30) ; Set savearea r0
1030 stw r5,saver1(r30) ; Set savearea r1
1031 stw r6,saver2(r30) ; Set savearea r2
1032 stw r7,saver3(r30) ; Set savearea r3
1033 lwz r4,famguestr4(r20) ; Load famguest ctx r4
1034 lwz r5,famguestr5(r20) ; Load famguest ctx r5
1035 lwz r6,famguestr6(r20) ; Load famguest ctx r6
1036 lwz r7,famguestr7(r20) ; Load famguest ctx r7
1037 stw r4,saver4(r30) ; Set savearea r4
1038 stw r5,saver5(r30) ; Set savearea r5
1039 stw r6,saver6(r30) ; Set savearea r6
1040 stw r7,saver7(r30) ; Set savearea r7
1042 li r3,1 ; Show normal exit with check for AST
1043 lwz r16,ACT_THREAD(r26) ; Restore the thread pointer
1044 b EXT(ppcscret) ; Go back to handler...
1047 ; FAM Intercept handler
1051 .globl EXT(vmm_fam_handler)
1053 LEXT(vmm_fam_handler)
1054 lwz r4,saver4(r13) ; Load savearea r0
1055 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1056 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1057 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
1058 stw r4,famguestr4(r3) ; Save r4 in famguest ctx
1059 stw r5,famguestr5(r3) ; Save r5 in famguest ctx
1060 stw r6,famguestr6(r3) ; Save r6 in famguest ctx
1061 stw r7,famguestr7(r3) ; Save r7 in famguest ctx
1062 lwz r4,saver0(r13) ; Load savearea r0
1063 lwz r5,saver1(r13) ; Load savearea r1
1064 lwz r6,saver2(r13) ; Load savearea r2
1065 lwz r7,saver3(r13) ; Load savearea r3
1066 stw r4,famguestr0(r3) ; Save r0 in famguest ctx
1067 stw r5,famguestr1(r3) ; Save r1 in famguest ctx
1068 stw r6,famguestr2(r3) ; Save r2 in famguest ctx
1069 stw r7,famguestr3(r3) ; Save r3 in famguest ctx
1070 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1071 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1072 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1073 mfsrr0 r2 ; Get the interrupt srr0
1074 mfsrr1 r4 ; Get the interrupt srr1
1075 stw r2,famguestpc(r3) ; Save srr0 in famguest ctx
1076 stw r4,famguestmsr(r3) ; Save srr1 in famguest ctx
1077 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1078 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1079 mtsrr1 r6 ; Set srr1
1080 mr r6,r3 ; Set r6 with phys state page addr
1081 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1082 beq+ cr1,famPRG ; We had a program exception...
1084 ; We had an Alignment...
1086 mfdsisr r4 ; Load dsisr
1087 stw r3,famparam+0x4(r6) ; Set famparam 1 with dar
1088 stw r4,famparam+0x8(r6) ; Set famparam 2 with dsir
1091 stw r4,famparam+0x4(r6) ; Set famparam 1 with srr1
1092 mr r3,r4 ; Set r3 with dsisr
1093 lwz r4,famguestr4(r6) ; Load r4 from famguest context
1095 lwz r5,famguestr5(r6) ; Load r5 from famguest context
1096 lwz r13,famhandler(r6) ; Load user address to resume
1097 stw r2,famparam(r6) ; Set famparam 0 with srr0
1098 stw r7,famdispcode(r6) ; Save the exit code
1099 lwz r1,famrefcon(r6) ; load refcon
1100 mtcr r0 ; Restore cr
1101 mtsrr0 r13 ; Load srr0
1102 mr r0,r7 ; Set dispatch code
1103 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1104 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1105 mfsprg r13,2 ; Restore r13
1106 mfsprg r11,3 ; Restore r11
1110 ; FAM Intercept DSI ISI fault handler
1114 .globl EXT(vmm_fam_pf_handler)
1116 LEXT(vmm_fam_pf_handler)
1117 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1118 lwz r4,saver0(r13) ; Load savearea r0
1119 lwz r5,saver1(r13) ; Load savearea r1
1120 lwz r6,saver2(r13) ; Load savearea r2
1121 lwz r7,saver3(r13) ; Load savearea r3
1122 stw r4,famguestr0(r3) ; Save r0 in famguest
1123 stw r5,famguestr1(r3) ; Save r1 in famguest
1124 stw r6,famguestr2(r3) ; Save r2 in famguest
1125 stw r7,famguestr3(r3) ; Save r3 in famguest
1126 lwz r4,saver4(r13) ; Load savearea r0
1127 lwz r5,saver5(r13) ; Load savearea r1
1128 lwz r6,saver6(r13) ; Load savearea r2
1129 lwz r7,saver7(r13) ; Load savearea r3
1130 stw r4,famguestr4(r3) ; Save r4 in famguest
1131 lwz r4,spcFlags(r2) ; Load spcFlags
1132 stw r5,famguestr5(r3) ; Save r5 in famguest
1133 lwz r5,savesrr0(r13) ; Get the interrupt srr0
1134 stw r6,famguestr6(r3) ; Save r6 in famguest
1135 lwz r6,savesrr1(r13) ; Load srr1
1136 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1137 stw r7,famguestr7(r3) ; Save r7 in famguest
1138 stw r4,spcFlags(r2) ; Update spcFlags
1139 lwz r1,famrefcon(r3) ; Load refcon
1140 lwz r2,famhandler(r3) ; Load famhandler to resume
1141 stw r5,famguestpc(r3) ; Save srr0
1142 stw r5,saver2(r13) ; Store srr0 in savearea r2
1143 stw r5,famparam(r3) ; Store srr0 in fam param 0
1144 stw r6,famguestmsr(r3) ; Save srr1 in famguestmsr
1145 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
1146 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1147 beq+ cr1,FamPfISI ; We had an ISI...
1149 lwz r6,savedar(r13) ; Load dar from savearea
1150 lwz r4,savedsisr(r13) ; Load dsisr from savearea
1151 stw r6,famparam+0x4(r3) ; Store dar in fam param 1
1152 stw r6,saver3(r13) ; Store dar in savearea r3
1153 stw r4,famparam+0x8(r3) ; Store dsisr in fam param 2
1154 stw r4,saver4(r13) ; Store dsisr in savearea r4
1157 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
1158 stw r6,famparam+0x4(r3) ; Store srr1 in fam param 1
1159 stw r6,saver3(r13) ; Store srr1 in savearea r3
1161 stw r7,saver0(r13) ; Set dispatch code
1162 stw r7,famdispcode(r3) ; Set dispatch code
1163 stw r1,saver1(r13) ; Store refcon in savearea r1
1164 stw r2,savesrr0(r13) ; Store famhandler in srr0
1168 ; Ultra Fast Path FAM syscalls
1175 mfsprg r3,0 ; Get the per_proc area
1176 bt cr5_eq,ResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
1177 lwz r3,VMMareaPhys(r3) ; Load fast assist area
1178 cmpwi cr7,r4,0 ; Compare first arg with 0
1179 cmpwi cr5,r4,7 ; Compare first arg with 7
1180 cror cr1_eq,cr7_lt,cr5_gt ; Is it in 0 to 7 range
1181 beq cr1,ufpVMret ; Return if not in the range
1182 slwi r4,r4,2 ; multiply index by 4
1183 la r3,famguestr0(r3) ; Load the base address
1184 bt cr6_eq,SetGuestReg ; Set/get selector
1186 lwzx r3,r4,r3 ; Load the guest register
1189 stwx r5,r4,r3 ; Update the guest register
1190 li r3,0 ; Set return value
1193 lwz r7,spcFlags(r3) ; Pick up the special flags
1194 lwz r13,VMMareaPhys(r3) ; Load fast assist area
1195 mtsrr0 r4 ; Set srr0
1196 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
1197 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
1198 beq ResumeGuest_nokey ; Branch if not key switch
1200 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
1201 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
1202 beq ResumeGuest_nokey ; No, go to ResumeGuest_nokey
1203 lwz r2,PP_USERPMAP(r3) ; Get user pmap phys addr
1204 rlwinm r6,r7,userProtKeybit-2,2,2 ; Extract and shift the key bit
1205 lwz r5,PMAP_SPACE(r2) ; Load the space id
1206 oris r5,r5,hi16(SEG_REG_PROT) ; Set the protection
1207 xor r5,r5,r6 ; Flip to proper segment register key
1208 addis r4,r5,0x0000 ; Get SR0 value
1209 mtsr sr0,r4 ; Load up the SR
1210 addis r4,r5,0x0010 ; Get SR1 value
1211 mtsr sr1,r4 ; Load up the SR
1212 addis r4,r5,0x0020 ; Get SR2 value
1213 mtsr sr2,r4 ; Load up the SR
1214 addis r4,r5,0x0030 ; Get SR3 value
1215 mtsr sr3,r4 ; Load up the SR
1216 addis r4,r5,0x0040 ; Get SR4 value
1217 mtsr sr4,r4 ; Load up the SR
1218 addis r4,r5,0x0050 ; Get SR5 value
1219 mtsr sr5,r4 ; Load up the SR
1220 addis r4,r5,0x0060 ; Get SR6 value
1221 mtsr sr6,r4 ; Load up the SR
1222 addis r4,r5,0x0070 ; Get SR7 value
1223 mtsr sr7,r4 ; Load up the SR
1224 addis r4,r5,0x0080 ; Get SR8 value
1225 mtsr sr8,r4 ; Load up the SR
1226 addis r4,r5,0x0090 ; Get SR9 value
1227 mtsr sr9,r4 ; Load up the SR
1228 addis r4,r5,0x00a0 ; Get SR10 value
1229 mtsr sr10,r4 ; Load up the SR
1230 addis r4,r5,0x00b0 ; Get SR11 value
1231 mtsr sr11,r4 ; Load up the SR
1232 addis r4,r5,0x00c0 ; Get SR12 value
1233 mtsr sr12,r4 ; Load up the SR
1234 addis r4,r5,0x00d0 ; Get SR13 value
1235 mtsr sr13,r4 ; Load up the SR
1236 addis r4,r5,0x00e0 ; Get SR14 value
1237 mtsr sr14,r4 ; Load up the SR
1238 addis r4,r5,0x00f0 ; Get SR15 value
1239 mtsr sr15,r4 ; Load up the SR
1241 mfsrr1 r6 ; Get the current MSR value
1242 lwz r0,famguestr0(r13) ; Load r0
1243 lwz r1,famguestr1(r13) ; Load r1
1244 lwz r4,famguestmsr(r13) ; Load guest srr1
1245 stw r7,spcFlags(r3) ; Update the special flags
1246 lis r5,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1247 lwz r2,famguestr2(r13) ; Load r2
1248 lwz r3,famguestr3(r13) ; Load r3
1249 ori r5,r5,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1250 and r7,r4,r5 ; Keep only the controllable bits
1251 lwz r4,famguestr4(r13) ; Load r4
1252 oris r7,r7,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1253 lwz r5,famguestr5(r13) ; Load r5
1254 ori r7,r7,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1255 rlwimi r7,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1256 rlwimi r7,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1257 mtsrr1 r7 ; Set srr1
1258 lwz r6,famguestr6(r13) ; Load r6
1259 lwz r7,famguestr7(r13) ; Load r7
1261 mtcrf 0xFF,r11 ; Restore CR
1262 mfsprg r11,3 ; Restore R11
1263 mfsprg r13,2 ; Restore R13
1264 rfi ; All done, go back...