2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
28 #include <ppc/proc_reg.h>
29 #include <ppc/exception.h>
32 * This file contains implementations for the Virtual Machine Monitor
41 * int vmm_dispatch(savearea, act);
43 * vmm_dispatch is a PPC only system call. It is used with a selector (first
44 * parameter) to determine what function to enter. This is treated as an extension
48 * R4 = current activation
49 * R16 = current thread
50 * R30 = current savearea
53 .align 5 ; Line up on cache line
54 .globl EXT(vmm_dispatch_table)
56 LEXT(vmm_dispatch_table)
58 /* Don't change the order of these routines in the table. It's */
59 /* OK to add new routines, but they must be added at the bottom. */
61 .long EXT(vmm_get_version_sel) ; Get the version of the VMM interface
62 .long 0 ; Not valid in Fam
63 .long EXT(vmm_get_features_sel) ; Get the features of the VMM interface
64 .long 0 ; Not valid in Fam
65 .long EXT(vmm_init_context_sel) ; Initializes a new VMM context
66 .long 0 ; Not valid in Fam
67 .long EXT(vmm_tear_down_context) ; Tears down a previously-allocated VMM context
68 .long 0 ; Not valid in Fam
69 .long EXT(vmm_tear_down_all) ; Tears down all VMMs
70 .long 0 ; Not valid in Fam
71 .long EXT(vmm_map_page32) ; Maps a page from the main address space into the VM space - supports 32-bit
72 .long 1 ; Valid in Fam
73 .long EXT(vmm_get_page_mapping32) ; Returns client va associated with VM va - supports 32-bit
74 .long 1 ; Valid in Fam
75 .long EXT(vmm_unmap_page32) ; Unmaps a page from the VM space - supports 32-bit
76 .long 1 ; Valid in Fam
77 .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space
78 .long 1 ; Valid in Fam
79 .long EXT(vmm_get_page_dirty_flag32) ; Gets the change bit for a page and optionally clears it - supports 32-bit
80 .long 1 ; Valid in Fam
81 .long EXT(vmm_get_float_state) ; Gets current floating point state
82 .long 0 ; not valid in Fam
83 .long EXT(vmm_get_vector_state) ; Gets current vector state
84 .long 0 ; Not valid in Fam
85 .long EXT(vmm_set_timer) ; Sets a timer value
86 .long 1 ; Valid in Fam
87 .long EXT(vmm_get_timer) ; Gets a timer value
88 .long 1 ; Valid in Fam
89 .long EXT(switchIntoVM) ; Switches to the VM context
90 .long 1 ; Valid in Fam
91 .long EXT(vmm_protect_page32) ; Sets protection values for a page - supports 32-bit
92 .long 1 ; Valid in Fam
93 .long EXT(vmm_map_execute32) ; Maps a page an launches VM - supports 32-bit
94 .long 1 ; Not valid in Fam
95 .long EXT(vmm_protect_execute32) ; Sets protection values for a page and launches VM - supports 32-bit
96 .long 1 ; Valid in Fam
97 .long EXT(vmm_map_list32) ; Maps a list of pages - supports 32-bit
98 .long 1 ; Valid in Fam
99 .long EXT(vmm_unmap_list32) ; Unmaps a list of pages - supports 32-bit
100 .long 1 ; Valid in Fam
101 .long EXT(vmm_fam_reserved) ; exit from Fam to host
102 .long 1 ; Valid in Fam
103 .long EXT(vmm_fam_reserved) ; resume guest from Fam
104 .long 1 ; Valid in Fam
105 .long EXT(vmm_fam_reserved) ; get guest register from Fam
106 .long 1 ; Valid in Fam
107 .long EXT(vmm_fam_reserved) ; Set guest register from Fam
108 .long 1 ; Valid in Fam
109 .long EXT(vmm_set_XA) ; Set extended architecture features for a VM
110 .long 0 ; Not valid in Fam
111 .long EXT(vmm_get_XA) ; Get extended architecture features from a VM
112 .long 1 ; Valid in Fam
113 .long EXT(vmm_map_page) ; Map a host to guest address space - supports 64-bit
114 .long 1 ; Valid in Fam
115 .long EXT(vmm_get_page_mapping) ; Get host address of a guest page - supports 64-bit
116 .long 1 ; Valid in Fam
117 .long EXT(vmm_unmap_page) ; Unmap a guest page - supports 64-bit
118 .long 1 ; Valid in Fam
119 .long EXT(vmm_get_page_dirty_flag) ; Check if guest page modified - supports 64-bit
120 .long 1 ; Valid in Fam
121 .long EXT(vmm_protect_page) ; Sets protection values for a page - supports 64-bit
122 .long 1 ; Valid in Fam
123 .long EXT(vmm_map_execute) ; Map guest page and launch - supports 64-bit
124 .long 1 ; Valid in Fam
125 .long EXT(vmm_protect_execute) ; Set prot attributes and launch - supports 64-bit
126 .long 1 ; Valid in Fam
127 .long EXT(vmm_map_list64) ; Map a list of pages into guest address spaces - supports 64-bit
128 .long 1 ; Valid in Fam
129 .long EXT(vmm_unmap_list64) ; Unmap a list of pages from guest address spaces - supports 64-bit
130 .long 1 ; Valid in Fam
131 .long EXT(vmm_max_addr) ; Returns the maximum virtual address
132 .long 1 ; Valid in Fam
135 .set vmm_count,(.-EXT(vmm_dispatch_table))/8 ; Get the top number
139 .globl EXT(vmm_dispatch)
143 lwz r11,saver3+4(r30) ; Get the selector
144 mr r3,r4 ; All of our functions want the activation as the first parm
145 lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table
146 cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now?
147 cmplwi cr1,r11,vmm_count ; See if we have a valid selector
148 ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table
149 lwz r4,saver4+4(r30) ; Get 1st parameter after selector
150 beq+ EXT(switchIntoVM) ; Yes, go switch to it....
151 rlwinm r11,r11,3,0,28 ; Index into table
152 bge- cr1,vmmBogus ; It is a bogus entry
153 add r12,r10,r11 ; Get the vmm dispatch syscall entry
154 mfsprg r10,0 ; Get the per_proc
155 lwz r13,0(r12) ; Get address of routine
156 lwz r12,4(r12) ; Get validity flag
157 lwz r5,spcFlags(r10) ; Get per_proc special flags
158 cmpwi cr1,r12,0 ; Check Fam valid
159 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
160 crand cr0_eq,cr1_eq,cr0_gt ; In Fam and Invalid syscall
161 beq vmmBogus ; Intercept to host
162 lwz r5,saver5+4(r30) ; Get 2nd parameter after selector - note that some of these parameters may actually be long longs
163 lwz r6,saver6+4(r30) ; Get 3rd parameter after selector
164 mtlr r13 ; Set the routine address
165 lwz r7,saver7+4(r30) ; Get 4th parameter after selector
166 lwz r8,saver8+4(r30) ; Get 5th parameter after selector
167 lwz r9,saver9+4(r30) ; Get 6th parameter after selector
169 ; NOTE: some of the above parameters are actually long longs. We have glue code that transforms
170 ; all needed parameters and/or adds 32-/64-bit flavors to the needed functions.
175 vmmRetPt: li r0,0 ; Clear this out
176 stw r0,saver3(r30) ; Make sure top of RC is clear
177 stw r3,saver3+4(r30) ; Pass back the return code
178 stw r0,saver4(r30) ; Make sure bottom of RC is clear (just in case)
179 stw r4,saver4+4(r30) ; Pass back the bottom return code (just in case)
180 li r3,1 ; Set normal return with check for AST
181 b EXT(ppcscret) ; Go back to handler...
184 mfsprg r10,0 ; Get the per_proc
185 mfsprg r3,1 ; Load current activation
186 lwz r5,spcFlags(r10) ; Get per_proc special flags
187 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
188 bne vmmexitcall ; Do it to it
189 li r3,0 ; Bogus selector, treat like a bogus system call
190 b EXT(ppcscret) ; Go back to handler...
194 .globl EXT(vmm_get_version_sel)
196 LEXT(vmm_get_version_sel) ; Selector based version of get version
198 lis r3,hi16(EXT(vmm_get_version))
199 ori r3,r3,lo16(EXT(vmm_get_version))
204 .globl EXT(vmm_get_features_sel)
206 LEXT(vmm_get_features_sel) ; Selector based version of get features
208 lis r3,hi16(EXT(vmm_get_features))
209 ori r3,r3,lo16(EXT(vmm_get_features))
214 .globl EXT(vmm_init_context_sel)
216 LEXT(vmm_init_context_sel) ; Selector based version of init context
218 lwz r4,saver4+4(r30) ; Get the passed in version
219 lwz r5,saver5+4(r30) ; Get the passed in comm area
220 lis r3,hi16(EXT(vmm_init_context))
221 stw r4,saver3+4(r30) ; Cheat and move this parameter over
222 ori r3,r3,lo16(EXT(vmm_init_context))
223 stw r5,saver4+4(r30) ; Cheat and move this parameter over
225 selcomm: mtlr r3 ; Set the real routine address
226 mr r3,r30 ; Pass in the savearea
227 blrl ; Call the function
228 b EXT(ppcscret) ; Go back to handler...
231 .globl EXT(vmm_map_page32)
234 mr r9,r7 ; Move prot to correct parm
235 mr r8,r6 ; Move guest address to low half of long long
236 li r7,0 ; Clear high half of guest address
237 mr r6,r5 ; Move host address to low half of long long
238 li r5,0 ; Clear high half of host address
239 b EXT(vmm_map_page) ; Transition to real function...
242 .globl EXT(vmm_get_page_mapping32)
244 LEXT(vmm_get_page_mapping32)
245 mr r6,r5 ; Move guest address to low half of long long
246 li r5,0 ; Clear high half of guest address
247 bl EXT(vmm_get_page_mapping) ; Transition to real function...
248 mr r3,r4 ; Convert addr64_t to vm_offset_t, dropping top half
249 b vmmRetPt ; Join normal return...
252 .globl EXT(vmm_unmap_page32)
254 LEXT(vmm_unmap_page32)
255 mr r6,r5 ; Move guest address to low half of long long
256 li r5,0 ; Clear high half of guest address
257 b EXT(vmm_unmap_page) ; Transition to real function...
260 .globl EXT(vmm_get_page_dirty_flag32)
262 LEXT(vmm_get_page_dirty_flag32)
263 mr r7,r6 ; Move reset flag
264 mr r6,r5 ; Move guest address to low half of long long
265 li r5,0 ; Clear high half of guest address
266 b EXT(vmm_get_page_dirty_flag) ; Transition to real function...
269 .globl EXT(vmm_protect_page32)
271 LEXT(vmm_protect_page32)
272 mr r7,r6 ; Move protection bits
273 mr r6,r5 ; Move guest address to low half of long long
274 li r5,0 ; Clear high half of guest address
275 b EXT(vmm_protect_page) ; Transition to real function...
278 .globl EXT(vmm_map_execute32)
280 LEXT(vmm_map_execute32)
281 mr r9,r7 ; Move prot to correct parm
282 mr r8,r6 ; Move guest address to low half of long long
283 li r7,0 ; Clear high half of guest address
284 mr r6,r5 ; Move host address to low half of long long
285 li r5,0 ; Clear high half of host address
286 b EXT(vmm_map_execute) ; Transition to real function...
289 .globl EXT(vmm_protect_execute32)
291 LEXT(vmm_protect_execute32)
292 mr r7,r6 ; Move protection bits
293 mr r6,r5 ; Move guest address to low half of long long
294 li r5,0 ; Clear high half of guest address
295 b EXT(vmm_protect_execute) ; Transition to real function...
298 .globl EXT(vmm_map_list32)
301 li r6,0 ; Set 32-bit flavor
302 b EXT(vmm_map_list) ; Go to common routine...
305 .globl EXT(vmm_map_list64)
308 li r6,1 ; Set 64-bit flavor
309 b EXT(vmm_map_list) ; Go to common routine...
312 .globl EXT(vmm_map_list32)
314 LEXT(vmm_unmap_list32)
315 li r6,0 ; Set 32-bit flavor
316 b EXT(vmm_unmap_list) ; Go to common routine...
319 .globl EXT(vmm_map_list64)
321 LEXT(vmm_unmap_list64)
322 li r6,1 ; Set 64-bit flavor
323 b EXT(vmm_unmap_list) ; Go to common routine...
326 * Here is where we transition to the virtual machine.
328 * We will swap the register context in the savearea with that which is saved in our shared
329 * context area. We will validity check a bit and clear any nasty bits in the MSR and force
330 * the manditory ones on.
332 * Then we will setup the new address space to run with, and anything else that is normally part
333 * of a context switch.
335 * The vmm_execute_vm entry point is for the fused vmm_map_execute and vmm_protect_execute
336 * calls. This is called, but never returned from. We always go directly back to the
344 .globl EXT(vmm_execute_vm)
347 lwz r30,ACT_MACT_PCB(r3) ; Restore the savearea pointer because it could be trash here
348 b EXT(switchIntoVM) ; Join common...
352 .globl EXT(switchIntoVM)
355 mfsprg r10,0 ; Get the per_proc
356 rlwinm r31,r4,24,24,31 ; Get the address space
357 rlwinm r4,r4,0,24,31 ; Isolate the context id
358 lwz r28,vmmControl(r3) ; Pick up the control table address
359 subi r4,r4,1 ; Switch to zero offset
360 rlwinm. r2,r28,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we
361 ; do not try this while we are transitioning off to on
362 cmplwi cr1,r4,kVmmMaxContexts ; Is the index valid?
363 beq- vmmBogus ; Not started, treat like a bogus system call
364 subic. r31,r31,1 ; Make address space 0 based and test if we use default
365 mulli r2,r4,vmmCEntrySize ; Get displacement from index
366 bge- cr1,swvmmBogus ; Index is bogus...
367 add r2,r2,r28 ; Point to the entry
368 bge-- swvmmDAdsp ; There was an explicit address space request
369 mr r31,r4 ; Default the address space to the context ID
371 swvmmDAdsp: la r2,vmmc(r2) ; Get the offset to the context array
372 lwz r8,vmmGFlags(r28) ; Get the general flags
373 lwz r4,vmmFlags(r2) ; Get the flags for the selected entry
374 crset vmmMapDone ; Assume we will be mapping something
375 lwz r5,vmmContextKern(r2) ; Get the context area address
376 rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use
377 cmplwi cr1,r31,kVmmMaxContexts ; See if we have a valid address space ID
378 rlwinm r8,r8,0,24,31 ; Clean up address space
379 beq-- swvmmBogus ; This context is no good...
381 la r26,vmmAdsp(r28) ; Point to the pmaps
382 sub r8,r8,r31 ; Get diff between launching address space - 1 and last mapped into (should be 1 if the same)
383 rlwinm r31,r31,2,0,29 ; Index to the pmap
384 cmplwi r8,1 ; See if we have the same address space
385 bge-- cr1,swvmmBogAdsp ; Address space is no good...
386 lwzx r31,r26,r31 ; Get the requested address space pmap
387 li r0,0 ; Get a 0 in case we need to trash redrive
388 lwz r15,spcFlags(r10) ; Get per_proc special flags
389 beq swvmmAdspOk ; Do not invalidate address space if we are launching the same
390 crclr vmmMapDone ; Clear map done flag
391 stb r0,vmmGFlags+3(r28) ; Clear the last mapped address space ID so we will not redrive later
393 ; Here we check for any immediate intercepts. So far, the only
394 ; two of these are a timer pop and and external stop. We will not dispatch if
395 ; either is true. They need to either reset the timer (i.e. set timer
396 ; to 0) or to set a future time, or if it is external stop, set the vmmXStopRst flag.
400 rlwinm. r0,r15,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
401 stw r31,vmmPmap(r2) ; Save the last dispatched address space
402 bne vmmFamGuestResume
403 lwz r6,vmmCntrl(r5) ; Get the control field
404 rlwinm. r7,r6,0,vmmXStartb,vmmXStartb ; Clear all but start bit
405 beq+ swvmChkStop ; Do not reset stop
406 andc r6,r6,r7 ; Clear it
407 li r8,vmmFlags ; Point to the flags
408 stw r6,vmmCntrl(r5) ; Set the control field
410 swvmtryx: lwarx r4,r8,r2 ; Pick up the flags
411 rlwinm r4,r4,0,vmmXStopb+1,vmmXStopb-1 ; Clear the stop bit
412 stwcx. r4,r8,r2 ; Save the updated field
413 bne- swvmtryx ; Try again...
416 rlwinm. r26,r4,0,vmmXStopb,vmmXStopb ; Is this VM stopped?
417 bne-- swvmSetStop ; Yes...
419 rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer go pop?
420 cmplwi cr1,r31,0 ; Is there actually an address space defined?
421 bne-- svvmTimerPop ; Yes...
424 ; Special note: we need to intercept any attempt to launch a guest into a non-existent address space.
425 ; We will just go emulate an ISI if there is not one.
428 beq-- cr1,swvmEmulateISI ; We are trying to launch into an undefined address space. This is not so good...
431 ; Here is where we actually swap into the VM (alternate) context.
432 ; We will bulk do a wholesale swap of the registers in the context area (the VMs)
433 ; with the ones in the savearea (our main code). During the copy, we will fix up the
434 ; MSR, forcing on a few bits and turning off a few others. Then we will deal with the
435 ; PMAP and other per_proc stuff. Finally, we will exit back through the main exception
436 ; handler to deal with unstacking saveareas and ASTs, etc.
442 ; First, we save the volatile registers we care about. Remember, all register
443 ; handling here is pretty funky anyway, so we just pick the ones that are ok.
445 mr r26,r3 ; Save the activation pointer
447 la r11,vmmFacCtx(r2) ; Point to the virtual machine facility context
448 mr r27,r2 ; Save the context entry
449 stw r11,deferctx(r3) ; Start using the virtual machine facility context when we exit
451 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
452 mr r3,r31 ; Get the pointer to the PMAP
453 oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now
454 bl EXT(hw_set_user_space_dis) ; Swap the address spaces
455 lwz r17,vmmFlags(r27) ; Get the status flags
456 lwz r20,vmmContextKern(r27) ; Get the state page kernel addr
457 lwz r21,vmmCntrl(r20) ; Get vmmCntrl
458 rlwinm. r22,r21,0,vmmFamEnab,vmmFamEnab ; Is vmmFamEnab set?
459 lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
460 stw r22,VMMXAFlgs(r10) ; Store vmmXAFlgs in per_proc VMMXAFlgs
461 beq swvmNoFam ; No Fam intercept
462 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
463 rlwimi r15,r21,32+vmmFamSetb-FamVMmodebit,FamVMmodebit,FamVMmodebit ; Set FamVMmode bit
464 rlwinm r21,r21,0,vmmFamSetb+1,vmmFamSetb-1 ; Clear FamSet bit
466 lwz r22,famintercepts(r20) ; Load intercept bit field
469 lwz r22,faminterceptsX(r20) ; Load intercept bit field
471 stw r21,vmmCntrl(r20) ; Update vmmCntrl
472 lwz r19,vmmContextPhys(r27) ; Get vmmFAMarea address
473 stw r22,vmmFAMintercept(r27) ; Get vmmFAMintercept
474 stw r22,FAMintercept(r10) ; Store vmmFAMintercept in per_proc FAMintercept
475 stw r19,VMMareaPhys(r10) ; Store VMMareaPhys
476 oris r15,r15,hi16(FamVMena) ; Set FamVMenabit
478 stw r27,vmmCEntry(r26) ; Remember what context we are running
479 bf++ vmmMapDone,swvmNoMap ; We have not mapped anything or it was not for this address space
482 ; This little bit of hoopala here (triggered by vmmMapDone) is
483 ; a performance enhancement. This will change the returning savearea
484 ; to look like we had a DSI rather than a system call. Then, setting
485 ; the redrive bit, the exception handler will redrive the exception as
486 ; a DSI, entering the last mapped address into the hash table. This keeps
487 ; double faults from happening. Note that there is only a gain if the VM
488 ; takes a fault, then the emulator resolves it only, and then begins
489 ; the VM execution again. It seems like this should be the normal case.
491 ; Note that we need to revisit this when we move the virtual machines to the task because
492 ; then it will be possible for more than one thread to access this stuff at the same time.
495 lwz r3,SAVflags(r30) ; Pick up the savearea flags
496 lwz r2,vmmLastMap(r28) ; Get the last mapped address
497 lwz r14,vmmLastMap+4(r28) ; Get the last mapped address low half
498 li r20,T_DATA_ACCESS ; Change to DSI fault
499 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
500 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
501 stw r14,savedar+4(r30) ; Set the DAR to the last thing we mapped
502 stw r3,SAVflags(r30) ; Turn on the redrive request
503 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
505 stw r20,saveexception(r30) ; Say we need to emulate a DSI
506 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
507 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
509 swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area
510 rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits
511 lwz r20,vmmCntrl(r20) ; Get the control flags
512 rlwimi r17,r11,8,24,31 ; Save the old spf flags
513 rlwimi r15,r20,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
514 stw r15,spcFlags(r10) ; Set per_proc copy of the special flags
515 stw r15,ACT_MACT_SPF(r26) ; Get the special flags
517 stw r17,vmmFlags(r27) ; Set the status flags
519 bl swapCtxt ; First, swap the general register state
521 lwz r17,vmmContextKern(r27) ; Get the comm area back
522 la r25,vmmFacCtx(r27) ; Point to the facility context
523 lwz r15,vmmCntrl(r17) ; Get the control flags again
524 mfsprg r29,0 ; Get the per_proc
527 ; Check if there is new floating point context to load
530 rlwinm. r0,r15,0,vmmFloatLoadb,vmmFloatLoadb ; Are there new floating point values?
531 lhz r29,PP_CPU_NUMBER(r29) ; Get our cpu number
532 li r14,vmmppcFPRs ; Get displacement to the new values
533 andc r15,r15,r0 ; Clear the bit
534 beq+ swvmNoNewFloats ; Nope, good...
536 lwz r19,FPUcpu(r25) ; Get the last CPU we ran on
538 stw r29,FPUcpu(r25) ; Claim the context for ourselves
540 eieio ; Make sure this stays in order
542 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
543 mulli r19,r19,ppSize ; Find offset to the owner per_proc
544 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
545 li r16,FPUowner ; Displacement to float owner
546 add r19,r18,r19 ; Point to the owner per_proc
548 swvminvfpu: lwarx r18,r16,r19 ; Get the owner
550 sub r0,r18,r25 ; Subtract one from the other
551 sub r3,r25,r18 ; Subtract the other from the one
552 or r3,r3,r0 ; Combine them
553 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
554 and r18,r18,r3 ; Make 0 if same, unchanged if not
555 stwcx. r18,r16,r19 ; Try to invalidate it
556 bne-- swvminvfpu ; Try again if there was a collision...
558 lwz r3,FPUsave(r25) ; Get the FPU savearea
559 dcbt r14,r17 ; Touch in first line of new stuff
560 mr. r3,r3 ; Is there one?
561 bne+ swvmGotFloat ; Yes...
563 bl EXT(save_get) ; Get a savearea
565 li r7,SAVfloat ; Get floating point flag
566 stw r26,SAVact(r3) ; Save our activation
568 stb r7,SAVflags+2(r3) ; Set that this is floating point
569 stw r0,SAVprev+4(r3) ; Clear the back chain
570 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
572 stw r3,FPUsave(r25) ; Chain us to context
575 la r4,savefp0(r3) ; Point to the destination
576 mr r21,r3 ; Save the save area
577 la r3,vmmppcFPRs(r17) ; Point to the source
578 li r5,32*8 ; Get the size (32 FPRs at 8 bytes each)
580 bl EXT(bcopy) ; Copy the new values
582 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
583 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad
584 rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here
585 lwz r14,vmmStat(r17) ; Get the status flags
586 mfsprg r10,0 ; Get the per_proc
587 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
588 rlwinm r14,r14,0,vmmFloatCngdb+1,vmmFloatCngdb-1 ; Clear the changed flag
589 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
590 stw r14,vmmStat(r17) ; Set the status flags sans vmmFloatCngd
593 ; Check if there is new vector context to load
597 rlwinm. r0,r15,0,vmmVectLoadb,vmmVectLoadb ; Are there new vector values?
598 li r14,vmmppcVRs ; Get displacement to the new values
599 andc r15,r15,r0 ; Clear the bit
600 beq+ swvmNoNewVects ; Nope, good...
602 lwz r19,VMXcpu(r25) ; Get the last CPU we ran on
604 stw r29,VMXcpu(r25) ; Claim the context for ourselves
606 eieio ; Make sure this stays in order
608 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
609 mulli r19,r19,ppSize ; Find offset to the owner per_proc
610 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
611 li r16,VMXowner ; Displacement to vector owner
612 add r19,r18,r19 ; Point to the owner per_proc
614 swvminvvec: lwarx r18,r16,r19 ; Get the owner
616 sub r0,r18,r25 ; Subtract one from the other
617 sub r3,r25,r18 ; Subtract the other from the one
618 or r3,r3,r0 ; Combine them
619 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
620 and r18,r18,r3 ; Make 0 if same, unchanged if not
621 stwcx. r18,r16,r19 ; Try to invalidate it
622 bne-- swvminvfpu ; Try again if there was a collision...
624 swvminvved: lwz r3,VMXsave(r25) ; Get the vector savearea
625 dcbt r14,r17 ; Touch in first line of new stuff
626 mr. r3,r3 ; Is there one?
627 bne+ swvmGotVect ; Yes...
629 bl EXT(save_get) ; Get a savearea
631 li r7,SAVvector ; Get the vector type flag
632 stw r26,SAVact(r3) ; Save our activation
634 stb r7,SAVflags+2(r3) ; Set that this is vector
635 stw r0,SAVprev+4(r3) ; Clear the back chain
636 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
638 stw r3,VMXsave(r25) ; Chain us to context
641 mr r21,r3 ; Save the pointer to the savearea
642 la r4,savevr0(r3) ; Point to the destination
643 la r3,vmmppcVRs(r17) ; Point to the source
644 li r5,32*16 ; Get the size (32 vectors at 16 bytes each)
646 bl EXT(bcopy) ; Copy the new values
648 lwz r8,savevrsave(r30) ; Get the current VRSave
650 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
651 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad
652 rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here
653 stw r8,savevrvalid(r21) ; Set the current VRSave as valid saved
654 lwz r14,vmmStat(r17) ; Get the status flags
655 mfsprg r10,0 ; Get the per_proc
656 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
657 rlwinm r14,r14,0,vmmVectCngdb+1,vmmVectCngdb-1 ; Clear the changed flag
658 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
659 stw r14,vmmStat(r17) ; Set the status flags sans vmmVectCngd
662 li r3,1 ; Show normal exit with check for AST
663 lwz r16,ACT_THREAD(r26) ; Restore the thread pointer
664 b EXT(ppcscret) ; Go back to handler...
668 swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return
670 li r3,1 ; Set normal return with check for AST
671 stw r0,saver3(r30) ; Clear upper half
672 stw r2,saver3+4(r30) ; Pass back the return code
673 b EXT(ppcscret) ; Go back to handler...
676 li r2,kVmmInvalidAdSpace ; Set bogus address space return
678 li r3,1 ; Set normal return with check for AST
679 stw r0,saver3(r30) ; Clear upper half
680 stw r2,saver3+4(r30) ; Pass back the return code
681 b EXT(ppcscret) ; Go back to handler...
684 li r2,kVmmStopped ; Set stopped return
686 li r3,1 ; Set normal return with check for AST
687 stw r0,saver3(r30) ; Clear upper half
688 stw r2,saver3+4(r30) ; Pass back the return code
689 stw r2,return_code(r5) ; Save the exit code
690 b EXT(ppcscret) ; Go back to handler...
693 li r2,kVmmReturnNull ; Set null return
695 li r3,1 ; Set normal return with check for AST
696 stw r0,saver3(r30) ; Clear upper half
697 stw r2,saver3+4(r30) ; Pass back the return code
698 stw r2,return_code(r5) ; Save the exit code
699 b EXT(ppcscret) ; Go back to handler...
702 mfsprg r10,2 ; Get feature flags
703 lwz r11,vmmXAFlgs(r28) ; Get the eXtended Architecture flags
704 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
705 rlwinm. r11,r11,0,0,0 ; Are we doing a 64-bit virtual machine?
706 li r2,kVmmReturnInstrPageFault ; Set ISI
707 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
709 li r3,1 ; Set normal return with check for AST
710 stw r0,saver3(r30) ; Clear upper half
711 stw r2,saver3+4(r30) ; Pass back the return code
712 stw r2,return_code(r5) ; Save the exit code
713 lis r7,hi16(MASK(DSISR_HASH)) ; Pretend like we got a PTE miss
714 bt vmmDoing64,vmISI64 ; Go do this for a 64-bit VM...
716 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
717 stw r10,return_params+0(r5) ; Save PC as first return parm
718 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
719 b EXT(ppcscret) ; Go back to handler...
721 vmISI64: ld r10,vmmppcXpc(r5) ; Get the PC as failing address
722 std r10,return_paramsX+0(r5) ; Save PC as first return parm
723 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
724 b EXT(ppcscret) ; Go back to handler...
727 ; These syscalls are invalid, FAM syscall fast path
731 .globl EXT(vmm_fam_reserved)
733 LEXT(vmm_fam_reserved)
734 li r3,0 ; Force exception
735 b EXT(ppcscret) ; Go back to handler...
738 ; Here is where we exit from vmm mode. We do this on any kind of exception.
739 ; Interruptions (decrementer, external, etc.) are another story though.
740 ; These we just pass through. We also switch back explicity when requested.
741 ; This will happen in response to a timer pop and some kinds of ASTs.
754 lwz r2,vmmCEntry(r3) ; Get the context that is active
755 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
756 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
757 lwz r19,vmmFlags(r2) ; Get the status flags
758 mr r16,r3 ; R16 is safe to use for the activation address
760 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
762 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
763 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
764 rlwinm r11,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
765 stw r0,vmmCEntry(r16) ; Clear pointer to active context
766 stw r19,vmmFlags(r2) ; Set the status flags
767 rlwinm r11,r11,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
768 mfsprg r10,0 ; Get the per_proc block
769 rlwinm r11,r11,0,FamVMenabit+1,FamVMenabit-1 ; Clear FamVMEnable
770 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
771 lwz r5,vmmContextKern(r2) ; Get the state page kernel addr
772 rlwinm r11,r11,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMMode
773 lwz r6,vmmCntrl(r5) ; Get the control field
774 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
775 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
776 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
777 stw r11,ACT_MACT_SPF(r16) ; Get the special flags
778 stw r6,vmmCntrl(r5) ; Store the control field
779 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
781 mr r26,r16 ; Save the activation pointer
782 mr r27,r2 ; Save the context entry
784 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
786 la r5,facctx(r16) ; Point to the main facility context
788 stw r5,deferctx(r16) ; Start using the main facility context on the way out
789 lwz r5,vmmContextKern(r27) ; Get the context area address
790 mr r3,r16 ; Restore activation address
791 stw r19,vmmStat(r5) ; Save the changed and popped flags
792 bl swapCtxt ; Exchange the VM context for the emulator one
793 stw r8,saver3+4(r30) ; Set the return code as the return value also
794 b EXT(retFromVM) ; Go back to handler...
798 ; Here is where we force exit from vmm mode. We do this when as
799 ; part of termination and is used to insure that we are not executing
800 ; in an alternate context. Because this is called from C we need to save
801 ; all non-volatile registers.
806 ; Interruptions disabled
810 .globl EXT(vmm_force_exit)
814 stwu r1,-(FM_ALIGN(20*4)+FM_SIZE)(r1) ; Get enough space for the registers
815 mflr r0 ; Save the return
816 stmw r13,FM_ARG0(r1) ; Save all non-volatile registers
817 stw r0,(FM_ALIGN(20*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
819 lwz r2,vmmCEntry(r3) ; Get the context that is active
820 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
821 lwz r19,vmmFlags(r2) ; Get the status flags
822 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
824 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
825 mr r26,r3 ; Save the activation pointer
826 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
828 rlwinm r9,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
829 cmplw r9,r11 ; Check if we were in a vm
830 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
831 beq- vfeNotRun ; We were not in a vm....
832 rlwinm r9,r9,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
833 stw r0,vmmCEntry(r26) ; Clear pointer to active context
834 mfsprg r10,0 ; Get the per_proc block
835 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
836 rlwinm r9,r9,0,FamVMenabit+1,FamVMenabit-1 ; Clear Fam Enable
837 rlwinm r9,r9,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear Fam Enable
838 lwz r5,vmmContextKern(r2) ; Get the context area address
839 lwz r6,vmmCntrl(r5) ; Get the control field
840 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
841 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
842 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
843 stw r6,vmmCntrl(r5) ; Store the control field
844 stw r9,ACT_MACT_SPF(r26) ; Get the special flags
845 stw r9,spcFlags(r10) ; Set per_proc copy of the special flags
847 mr r27,r2 ; Save the context entry
848 mr r30,r4 ; Save the savearea
850 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
852 la r7,facctx(r26) ; Point to the main facility context
854 lwz r5,vmmContextKern(r27) ; Get the context area address
855 stw r19,vmmStat(r5) ; Save the changed and popped flags
856 stw r7,deferctx(r26) ; Tell context launcher to switch facility context
858 bl swapCtxt ; Exchange the VM context for the emulator one
860 lwz r8,saveexception(r30) ; Pick up the exception code
861 lwz r7,SAVflags(r30) ; Pick up the savearea flags
862 lis r9,hi16(SAVredrive) ; Get exception redrive bit
863 rlwinm r8,r8,30,24,31 ; Convert exception to return code
864 andc r7,r7,r9 ; Make sure redrive is off because we are intercepting
865 stw r8,saver3+4(r30) ; Set the return code as the return value also
866 stw r7,SAVflags(r30) ; Set the savearea flags
869 vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers
870 lwz r1,0(r1) ; Pop the stack
871 lwz r0,FM_LR_SAVE(r1) ; Get the return address
876 ; Note: we will not do any DCBTs to the savearea. It was just stored to a few cycles ago and should
877 ; still be in the cache.
879 ; NOTE NOTE: R16 is important to save!!!!
884 mfsprg r10,2 ; Get feature flags
885 la r6,vmmppcpc(r5) ; Point to the first line
886 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
888 lwz r14,saveexception(r30) ; Get the exception code
889 dcbt 0,r6 ; Touch in the first line of the context area
890 bt++ pf64Bitb,swap64 ; Go do this swap on a 64-bit machine...
892 lwz r7,savesrr0+4(r30) ; Start moving context
893 lwz r8,savesrr1+4(r30)
895 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
896 lwz r10,saver1+4(r30)
897 lwz r11,saver2+4(r30)
898 lwz r12,saver3+4(r30)
899 lwz r13,saver4+4(r30)
900 la r6,vmmppcr6(r5) ; Point to second line
901 lwz r14,saver5+4(r30)
903 dcbt 0,r6 ; Touch second line of context area
905 lwz r15,vmmppcpc(r5) ; First line of context
906 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
907 lwz r23,vmmppcmsr(r5)
908 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
911 and r23,r23,r22 ; Keep only the controllable bits
913 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
915 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
919 dcbt 0,r6 ; Touch third line of context area
921 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
931 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
933 bne+ cr1,swapnotsc ; Skip next if not an SC exception...
934 stw r12,return_params+0(r5) ; Save the first return
935 stw r13,return_params+4(r5) ; Save the second return
936 stw r14,return_params+8(r5) ; Save the third return
938 swapnotsc: li r6,0 ; Clear this out
939 stw r6,savesrr0(r30) ; Insure that high order is clear
940 stw r15,savesrr0+4(r30) ; Save vm context into the savearea
941 stw r6,savesrr1(r30) ; Insure that high order is clear
942 stw r23,savesrr1+4(r30)
943 stw r17,saver0+4(r30)
944 stw r18,saver1+4(r30)
945 stw r19,saver2+4(r30)
946 stw r20,saver3+4(r30)
947 stw r21,saver4+4(r30)
948 la r6,vmmppcr14(r5) ; Point to fourth line
949 stw r22,saver5+4(r30)
951 dcbt 0,r6 ; Touch fourth line
955 lwz r7,saver6+4(r30) ; Read savearea
958 lwz r10,saver9+4(r30)
959 lwz r11,saver10+4(r30)
960 lwz r12,saver11+4(r30)
961 lwz r13,saver12+4(r30)
962 lwz r14,saver13+4(r30)
964 lwz r15,vmmppcr6(r5) ; Read vm context
968 lwz r19,vmmppcr10(r5)
969 lwz r20,vmmppcr11(r5)
970 lwz r21,vmmppcr12(r5)
971 lwz r22,vmmppcr13(r5)
973 stw r7,vmmppcr6(r5) ; Write context
977 stw r11,vmmppcr10(r5)
978 stw r12,vmmppcr11(r5)
979 stw r13,vmmppcr12(r5)
980 la r6,vmmppcr22(r5) ; Point to fifth line
981 stw r14,vmmppcr13(r5)
983 dcbt 0,r6 ; Touch fifth line
985 stw r15,saver6+4(r30) ; Write vm context
986 stw r24,saver7+4(r30)
987 stw r17,saver8+4(r30)
988 stw r18,saver9+4(r30)
989 stw r19,saver10+4(r30)
990 stw r20,saver11+4(r30)
991 stw r21,saver12+4(r30)
992 stw r22,saver13+4(r30)
996 lwz r7,saver14+4(r30) ; Read savearea
997 lwz r8,saver15+4(r30)
998 lwz r9,saver16+4(r30)
999 lwz r10,saver17+4(r30)
1000 lwz r11,saver18+4(r30)
1001 lwz r12,saver19+4(r30)
1002 lwz r13,saver20+4(r30)
1003 lwz r14,saver21+4(r30)
1005 lwz r15,vmmppcr14(r5) ; Read vm context
1006 lwz r24,vmmppcr15(r5)
1007 lwz r17,vmmppcr16(r5)
1008 lwz r18,vmmppcr17(r5)
1009 lwz r19,vmmppcr18(r5)
1010 lwz r20,vmmppcr19(r5)
1011 lwz r21,vmmppcr20(r5)
1012 lwz r22,vmmppcr21(r5)
1014 stw r7,vmmppcr14(r5) ; Write context
1015 stw r8,vmmppcr15(r5)
1016 stw r9,vmmppcr16(r5)
1017 stw r10,vmmppcr17(r5)
1018 stw r11,vmmppcr18(r5)
1019 stw r12,vmmppcr19(r5)
1020 stw r13,vmmppcr20(r5)
1021 la r6,vmmppcr30(r5) ; Point to sixth line
1022 stw r14,vmmppcr21(r5)
1024 dcbt 0,r6 ; Touch sixth line
1026 stw r15,saver14+4(r30) ; Write vm context
1027 stw r24,saver15+4(r30)
1028 stw r17,saver16+4(r30)
1029 stw r18,saver17+4(r30)
1030 stw r19,saver18+4(r30)
1031 stw r20,saver19+4(r30)
1032 stw r21,saver20+4(r30)
1033 stw r22,saver21+4(r30)
1037 lwz r7,saver22+4(r30) ; Read savearea
1038 lwz r8,saver23+4(r30)
1039 lwz r9,saver24+4(r30)
1040 lwz r10,saver25+4(r30)
1041 lwz r11,saver26+4(r30)
1042 lwz r12,saver27+4(r30)
1043 lwz r13,saver28+4(r30)
1044 lwz r14,saver29+4(r30)
1046 lwz r15,vmmppcr22(r5) ; Read vm context
1047 lwz r24,vmmppcr23(r5)
1048 lwz r17,vmmppcr24(r5)
1049 lwz r18,vmmppcr25(r5)
1050 lwz r19,vmmppcr26(r5)
1051 lwz r20,vmmppcr27(r5)
1052 lwz r21,vmmppcr28(r5)
1053 lwz r22,vmmppcr29(r5)
1055 stw r7,vmmppcr22(r5) ; Write context
1056 stw r8,vmmppcr23(r5)
1057 stw r9,vmmppcr24(r5)
1058 stw r10,vmmppcr25(r5)
1059 stw r11,vmmppcr26(r5)
1060 stw r12,vmmppcr27(r5)
1061 stw r13,vmmppcr28(r5)
1062 la r6,vmmppcvscr(r5) ; Point to seventh line
1063 stw r14,vmmppcr29(r5)
1065 dcbt 0,r6 ; Touch seventh line
1067 stw r15,saver22+4(r30) ; Write vm context
1068 stw r24,saver23+4(r30)
1069 stw r17,saver24+4(r30)
1070 stw r18,saver25+4(r30)
1071 stw r19,saver26+4(r30)
1072 stw r20,saver27+4(r30)
1073 stw r21,saver28+4(r30)
1074 stw r22,saver29+4(r30)
1078 lwz r7,saver30+4(r30) ; Read savearea
1079 lwz r8,saver31+4(r30)
1081 lwz r10,savexer+4(r30)
1082 lwz r11,savelr+4(r30)
1083 lwz r12,savectr+4(r30)
1084 lwz r14,savevrsave(r30)
1086 lwz r15,vmmppcr30(r5) ; Read vm context
1087 lwz r24,vmmppcr31(r5)
1088 lwz r17,vmmppccr(r5)
1089 lwz r18,vmmppcxer(r5)
1090 lwz r19,vmmppclr(r5)
1091 lwz r20,vmmppcctr(r5)
1092 lwz r22,vmmppcvrsave(r5)
1094 stw r7,vmmppcr30(r5) ; Write context
1095 stw r8,vmmppcr31(r5)
1097 stw r10,vmmppcxer(r5)
1098 stw r11,vmmppclr(r5)
1099 stw r12,vmmppcctr(r5)
1100 stw r14,vmmppcvrsave(r5)
1102 stw r15,saver30+4(r30) ; Write vm context
1103 stw r24,saver31+4(r30)
1105 stw r18,savexer+4(r30)
1106 stw r19,savelr+4(r30)
1107 stw r20,savectr+4(r30)
1108 stw r22,savevrsave(r30)
1112 lwz r7,savevscr+0(r30) ; Read savearea
1113 lwz r8,savevscr+4(r30)
1114 lwz r9,savevscr+8(r30)
1115 lwz r10,savevscr+12(r30)
1116 lwz r11,savefpscrpad(r30)
1117 lwz r12,savefpscr(r30)
1119 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1120 lwz r24,vmmppcvscr+4(r5)
1121 lwz r17,vmmppcvscr+8(r5)
1122 lwz r18,vmmppcvscr+12(r5)
1123 lwz r19,vmmppcfpscrpad(r5)
1124 lwz r20,vmmppcfpscr(r5)
1126 stw r7,vmmppcvscr+0(r5) ; Write context
1127 stw r8,vmmppcvscr+4(r5)
1128 stw r9,vmmppcvscr+8(r5)
1129 stw r10,vmmppcvscr+12(r5)
1130 stw r11,vmmppcfpscrpad(r5)
1131 stw r12,vmmppcfpscr(r5)
1133 stw r15,savevscr+0(r30) ; Write vm context
1134 stw r24,savevscr+4(r30)
1135 stw r17,savevscr+8(r30)
1136 stw r18,savevscr+12(r30)
1137 stw r19,savefpscrpad(r30)
1138 stw r20,savefpscr(r30)
1142 ; Cobble up the exception return code and save any specific return values
1145 lwz r7,saveexception(r30) ; Pick up the exception code
1146 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1147 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1148 stw r8,return_code(r5) ; Save the exit code
1149 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1150 beq+ swapDSI ; Yeah...
1151 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1152 beq+ cr1,swapISI ; We had an ISI...
1153 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1154 beq+ swapDSI ; An alignment exception looks like a DSI...
1155 beq+ cr1,swapSC ; We had a system call...
1160 ; Set exit returns for a DSI or alignment exception
1163 swapDSI: lwz r10,savedar+4(r30) ; Get the DAR
1164 lwz r7,savedsisr(r30) ; and the DSISR
1165 stw r10,return_params+0(r5) ; Save DAR as first return parm
1166 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1170 ; Set exit returns for a ISI
1173 swapISI: lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1174 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1175 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1176 stw r10,return_params+0(r5) ; Save PC as first return parm
1177 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1181 ; Set exit returns for a system call (note: we did the first 3 earlier)
1182 ; Do we really need to pass parameters back here????
1185 swapSC: lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1186 stw r10,return_params+12(r5) ; Save it
1190 ; Here is the swap for 64-bit machines
1193 swap64: lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
1194 ld r7,savesrr0(r30) ; Start moving context
1197 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
1200 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
1202 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
1204 la r6,vmmppcr6(r5) ; Point to second line
1207 dcbt 0,r6 ; Touch second line of context area
1209 bt vmmDoing64,sw64x1 ; Skip to 64-bit stuff
1211 lwz r15,vmmppcpc(r5) ; First line of context
1212 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1213 lwz r23,vmmppcmsr(r5)
1214 ori r22,r25,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1215 lwz r17,vmmppcr0(r5)
1216 lwz r18,vmmppcr1(r5)
1217 and r23,r23,r22 ; Keep only the controllable bits
1218 lwz r19,vmmppcr2(r5)
1219 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1220 lwz r20,vmmppcr3(r5)
1221 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1222 lwz r21,vmmppcr4(r5)
1223 lwz r22,vmmppcr5(r5)
1225 dcbt 0,r6 ; Touch third line of context area
1227 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
1228 stw r8,vmmppcmsr(r5)
1230 stw r10,vmmppcr1(r5)
1231 stw r11,vmmppcr2(r5)
1232 stw r12,vmmppcr3(r5)
1233 stw r13,vmmppcr4(r5)
1234 stw r14,vmmppcr5(r5)
1237 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
1239 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1240 stw r12,return_params+0(r5) ; Save the first return
1241 stw r13,return_params+4(r5) ; Save the second return
1242 stw r14,return_params+8(r5) ; Save the third return
1243 b sw64x1done ; We are done with this section...
1245 sw64x1: ld r15,vmmppcXpc(r5) ; First line of context
1246 li r0,1 ; Get a 1 to turn on 64-bit
1247 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user (we will also allow 64-bit here)
1248 sldi r0,r0,63 ; Get 64-bit bit
1249 ld r23,vmmppcXmsr(r5)
1250 ori r22,r25,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1251 ld r17,vmmppcXr0(r5)
1252 or r22,r22,r0 ; Add the 64-bit bit
1253 ld r18,vmmppcXr1(r5)
1254 and r23,r23,r22 ; Keep only the controllable bits
1255 ld r19,vmmppcXr2(r5)
1256 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1257 ld r20,vmmppcXr3(r5)
1258 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1259 ld r21,vmmppcXr4(r5)
1260 ld r22,vmmppcXr5(r5)
1262 dcbt 0,r6 ; Touch third line of context area
1264 std r7,vmmppcXpc(r5) ; Save emulator context into the context area
1265 std r8,vmmppcXmsr(r5)
1266 std r9,vmmppcXr0(r5)
1267 std r10,vmmppcXr1(r5)
1268 std r11,vmmppcXr2(r5)
1269 std r12,vmmppcXr3(r5)
1270 std r13,vmmppcXr4(r5)
1271 std r14,vmmppcXr5(r5)
1274 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
1276 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1277 std r12,return_paramsX+0(r5) ; Save the first return
1278 std r13,return_paramsX+8(r5) ; Save the second return
1279 std r14,return_paramsX+16(r5) ; Save the third return
1282 std r15,savesrr0(r30) ; Save vm context into the savearea
1283 std r23,savesrr1(r30)
1289 la r6,vmmppcr14(r5) ; Point to fourth line
1292 dcbt 0,r6 ; Touch fourth line
1296 ld r7,saver6(r30) ; Read savearea
1305 bt vmmDoing64,sw64x2 ; Skip to 64-bit stuff
1307 lwz r15,vmmppcr6(r5) ; Read vm context
1308 lwz r24,vmmppcr7(r5)
1309 lwz r17,vmmppcr8(r5)
1310 lwz r18,vmmppcr9(r5)
1311 lwz r19,vmmppcr10(r5)
1312 lwz r20,vmmppcr11(r5)
1313 lwz r21,vmmppcr12(r5)
1314 lwz r22,vmmppcr13(r5)
1316 stw r7,vmmppcr6(r5) ; Write context
1319 stw r10,vmmppcr9(r5)
1320 stw r11,vmmppcr10(r5)
1321 stw r12,vmmppcr11(r5)
1322 stw r13,vmmppcr12(r5)
1323 la r6,vmmppcr22(r5) ; Point to fifth line
1324 stw r14,vmmppcr13(r5)
1326 dcbt 0,r6 ; Touch fifth line
1327 b sw64x2done ; We are done with this section...
1329 sw64x2: ld r15,vmmppcXr6(r5) ; Read vm context
1330 ld r24,vmmppcXr7(r5)
1331 ld r17,vmmppcXr8(r5)
1332 ld r18,vmmppcXr9(r5)
1333 ld r19,vmmppcXr10(r5)
1334 ld r20,vmmppcXr11(r5)
1335 ld r21,vmmppcXr12(r5)
1336 ld r22,vmmppcXr13(r5)
1338 std r7,vmmppcXr6(r5) ; Write context
1339 std r8,vmmppcXr7(r5)
1340 std r9,vmmppcXr8(r5)
1341 std r10,vmmppcXr9(r5)
1342 std r11,vmmppcXr10(r5)
1343 std r12,vmmppcXr11(r5)
1344 std r13,vmmppcXr12(r5)
1345 la r6,vmmppcXr22(r5) ; Point to fifth line
1346 std r14,vmmppcXr13(r5)
1348 dcbt 0,r6 ; Touch fifth line
1350 sw64x2done: std r15,saver6(r30) ; Write vm context
1354 std r19,saver10(r30)
1355 std r20,saver11(r30)
1356 std r21,saver12(r30)
1357 std r22,saver13(r30)
1361 ld r7,saver14(r30) ; Read savearea
1370 bt vmmDoing64,sw64x3 ; Skip to 64-bit stuff
1372 lwz r15,vmmppcr14(r5) ; Read vm context
1373 lwz r24,vmmppcr15(r5)
1374 lwz r17,vmmppcr16(r5)
1375 lwz r18,vmmppcr17(r5)
1376 lwz r19,vmmppcr18(r5)
1377 lwz r20,vmmppcr19(r5)
1378 lwz r21,vmmppcr20(r5)
1379 lwz r22,vmmppcr21(r5)
1381 stw r7,vmmppcr14(r5) ; Write context
1382 stw r8,vmmppcr15(r5)
1383 stw r9,vmmppcr16(r5)
1384 stw r10,vmmppcr17(r5)
1385 stw r11,vmmppcr18(r5)
1386 stw r12,vmmppcr19(r5)
1387 stw r13,vmmppcr20(r5)
1388 la r6,vmmppcr30(r5) ; Point to sixth line
1389 stw r14,vmmppcr21(r5)
1391 dcbt 0,r6 ; Touch sixth line
1392 b sw64x3done ; Done with this section...
1394 sw64x3: ld r15,vmmppcXr14(r5) ; Read vm context
1395 ld r24,vmmppcXr15(r5)
1396 ld r17,vmmppcXr16(r5)
1397 ld r18,vmmppcXr17(r5)
1398 ld r19,vmmppcXr18(r5)
1399 ld r20,vmmppcXr19(r5)
1400 ld r21,vmmppcXr20(r5)
1401 ld r22,vmmppcXr21(r5)
1403 std r7,vmmppcXr14(r5) ; Write context
1404 std r8,vmmppcXr15(r5)
1405 std r9,vmmppcXr16(r5)
1406 std r10,vmmppcXr17(r5)
1407 std r11,vmmppcXr18(r5)
1408 std r12,vmmppcXr19(r5)
1409 std r13,vmmppcXr20(r5)
1410 la r6,vmmppcXr30(r5) ; Point to sixth line
1411 std r14,vmmppcXr21(r5)
1413 dcbt 0,r6 ; Touch sixth line
1415 sw64x3done: std r15,saver14(r30) ; Write vm context
1416 std r24,saver15(r30)
1417 std r17,saver16(r30)
1418 std r18,saver17(r30)
1419 std r19,saver18(r30)
1420 std r20,saver19(r30)
1421 std r21,saver20(r30)
1422 std r22,saver21(r30)
1426 ld r7,saver22(r30) ; Read savearea
1435 bt vmmDoing64,sw64x4 ; Skip to 64-bit stuff
1437 lwz r15,vmmppcr22(r5) ; Read vm context
1438 lwz r24,vmmppcr23(r5)
1439 lwz r17,vmmppcr24(r5)
1440 lwz r18,vmmppcr25(r5)
1441 lwz r19,vmmppcr26(r5)
1442 lwz r20,vmmppcr27(r5)
1443 lwz r21,vmmppcr28(r5)
1444 lwz r22,vmmppcr29(r5)
1446 stw r7,vmmppcr22(r5) ; Write context
1447 stw r8,vmmppcr23(r5)
1448 stw r9,vmmppcr24(r5)
1449 stw r10,vmmppcr25(r5)
1450 stw r11,vmmppcr26(r5)
1451 stw r12,vmmppcr27(r5)
1452 stw r13,vmmppcr28(r5)
1453 la r6,vmmppcvscr(r5) ; Point to seventh line
1454 stw r14,vmmppcr29(r5)
1455 dcbt 0,r6 ; Touch seventh line
1456 b sw64x4done ; Done with this section...
1458 sw64x4: ld r15,vmmppcXr22(r5) ; Read vm context
1459 ld r24,vmmppcXr23(r5)
1460 ld r17,vmmppcXr24(r5)
1461 ld r18,vmmppcXr25(r5)
1462 ld r19,vmmppcXr26(r5)
1463 ld r20,vmmppcXr27(r5)
1464 ld r21,vmmppcXr28(r5)
1465 ld r22,vmmppcXr29(r5)
1467 std r7,vmmppcXr22(r5) ; Write context
1468 std r8,vmmppcXr23(r5)
1469 std r9,vmmppcXr24(r5)
1470 std r10,vmmppcXr25(r5)
1471 std r11,vmmppcXr26(r5)
1472 std r12,vmmppcXr27(r5)
1473 std r13,vmmppcXr28(r5)
1474 la r6,vmmppcvscr(r5) ; Point to seventh line
1475 std r14,vmmppcXr29(r5)
1477 dcbt 0,r6 ; Touch seventh line
1479 sw64x4done: std r15,saver22(r30) ; Write vm context
1480 std r24,saver23(r30)
1481 std r17,saver24(r30)
1482 std r18,saver25(r30)
1483 std r19,saver26(r30)
1484 std r20,saver27(r30)
1485 std r21,saver28(r30)
1486 std r22,saver29(r30)
1490 ld r7,saver30(r30) ; Read savearea
1496 lwz r14,savevrsave(r30)
1498 bt vmmDoing64,sw64x5 ; Skip to 64-bit stuff
1500 lwz r15,vmmppcr30(r5) ; Read vm context
1501 lwz r24,vmmppcr31(r5)
1502 lwz r17,vmmppccr(r5)
1503 lwz r18,vmmppcxer(r5)
1504 lwz r19,vmmppclr(r5)
1505 lwz r20,vmmppcctr(r5)
1506 lwz r22,vmmppcvrsave(r5)
1508 stw r7,vmmppcr30(r5) ; Write context
1509 stw r8,vmmppcr31(r5)
1511 stw r10,vmmppcxer(r5)
1512 stw r11,vmmppclr(r5)
1513 stw r12,vmmppcctr(r5)
1514 stw r14,vmmppcvrsave(r5)
1515 b sw64x5done ; Done here...
1517 sw64x5: ld r15,vmmppcXr30(r5) ; Read vm context
1518 ld r24,vmmppcXr31(r5)
1519 lwz r17,vmmppcXcr(r5)
1520 ld r18,vmmppcXxer(r5)
1521 ld r19,vmmppcXlr(r5)
1522 ld r20,vmmppcXctr(r5)
1523 lwz r22,vmmppcXvrsave(r5)
1525 std r7,vmmppcXr30(r5) ; Write context
1526 std r8,vmmppcXr31(r5)
1527 stw r9,vmmppcXcr(r5)
1528 std r10,vmmppcXxer(r5)
1529 std r11,vmmppcXlr(r5)
1530 std r12,vmmppcXctr(r5)
1531 stw r14,vmmppcXvrsave(r5)
1533 sw64x5done: std r15,saver30(r30) ; Write vm context
1534 std r24,saver31(r30)
1536 std r18,savexer(r30)
1538 std r20,savectr(r30)
1539 stw r22,savevrsave(r30)
1543 lwz r7,savevscr+0(r30) ; Read savearea
1544 lwz r8,savevscr+4(r30)
1545 lwz r9,savevscr+8(r30)
1546 lwz r10,savevscr+12(r30)
1547 lwz r11,savefpscrpad(r30)
1548 lwz r12,savefpscr(r30)
1550 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1551 lwz r24,vmmppcvscr+4(r5)
1552 lwz r17,vmmppcvscr+8(r5)
1553 lwz r18,vmmppcvscr+12(r5)
1554 lwz r19,vmmppcfpscrpad(r5)
1555 lwz r20,vmmppcfpscr(r5)
1557 stw r7,vmmppcvscr+0(r5) ; Write context
1558 stw r8,vmmppcvscr+4(r5)
1559 stw r9,vmmppcvscr+8(r5)
1560 stw r10,vmmppcvscr+12(r5)
1561 stw r11,vmmppcfpscrpad(r5)
1562 stw r12,vmmppcfpscr(r5)
1564 stw r15,savevscr+0(r30) ; Write vm context
1565 stw r24,savevscr+4(r30)
1566 stw r17,savevscr+8(r30)
1567 stw r18,savevscr+12(r30)
1568 stw r19,savefpscrpad(r30)
1569 stw r20,savefpscr(r30)
1573 ; Cobble up the exception return code and save any specific return values
1576 lwz r7,saveexception(r30) ; Pick up the exception code
1577 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1578 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1579 stw r8,return_code(r5) ; Save the exit code
1580 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1581 beq+ swapDSI64 ; Yeah...
1582 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1583 beq+ cr1,swapISI64 ; We had an ISI...
1584 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1585 beq+ swapDSI64 ; An alignment exception looks like a DSI...
1586 beq+ cr1,swapSC64 ; We had a system call...
1591 ; Set exit returns for a DSI or alignment exception
1594 swapDSI64: ld r10,savedar(r30) ; Get the DAR
1595 lwz r7,savedsisr(r30) ; and the DSISR
1596 bt vmmDoing64,sw64DSI ; Skip to 64-bit stuff...
1599 stw r10,return_params+0(r5) ; Save DAR as first return parm
1600 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1603 sw64DSI: std r10,return_paramsX+0(r5) ; Save DAR as first return parm
1604 std r7,return_paramsX+8(r5) ; Save DSISR as second return parm (note that this is expanded to 64 bits)
1608 ; Set exit returns for a ISI
1611 swapISI64: bt vmmDoing64,sw64ISI ; Skip to 64-bit stuff...
1612 lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1613 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1614 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1615 stw r10,return_params+0(r5) ; Save PC as first return parm
1616 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1619 sw64ISI: ld r7,vmmppcXmsr(r5) ; Get the SRR1 value
1620 ld r10,vmmppcXpc(r5) ; Get the PC as failing address
1621 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1622 std r10,return_paramsX+0(r5) ; Save PC as first return parm
1623 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
1627 ; Set exit returns for a system call (note: we did the first 3 earlier)
1628 ; Do we really need to pass parameters back here????
1631 swapSC64: bt vmmDoing64,sw64SC ; Skip to 64-bit stuff...
1632 lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1633 stw r10,return_params+12(r5) ; Save it
1636 sw64SC: ld r10,vmmppcXr6(r5) ; Get the fourth paramter
1637 std r10,return_paramsX+24(r5) ; Save it
1641 ; vmmFamGuestResume:
1642 ; Restore Guest context from Fam mode.
1646 mfsprg r10,0 ; Get the per_proc
1647 lwz r27,vmmCEntry(r3) ; Get the context that is active
1648 lwz r4,VMMXAFlgs(r10) ; Get the eXtended Architecture flags
1649 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1650 lwz r15,spcFlags(r10) ; Get per_proc special flags
1651 mr r26,r3 ; Save the activation pointer
1652 lwz r20,vmmContextKern(r27) ; Get the comm area
1653 rlwinm r15,r15,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
1654 stw r15,spcFlags(r10) ; Update the special flags
1656 lwz r7,famguestpc(r20) ; Load famguest ctx pc
1657 bf++ vmmMapDone,fgrNoMap ; No mapping done for this space.
1658 lwz r3,SAVflags(r30) ; Pick up the savearea flags
1659 lwz r2,vmmLastMap(r28) ; Get the last mapped address
1660 lwz r6,vmmLastMap+4(r28) ; Get the last mapped address
1661 li r4,T_DATA_ACCESS ; Change to DSI fault
1662 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1663 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
1664 stw r6,savedar+4(r30) ; Set the DAR to the last thing we mapped
1665 stw r3,SAVflags(r30) ; Turn on the redrive request
1666 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1667 stw r4,saveexception(r30) ; Say we need to emulate a DSI
1669 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
1670 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1672 lwz r4,savesrr1+4(r30) ; Get the saved MSR value
1673 stw r7,savesrr0+4(r30) ; Set savearea pc
1674 lwz r5,famguestmsr(r20) ; Load famguest ctx msr
1675 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1676 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1677 and r5,r5,r6 ; Keep only the controllable bits
1678 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1679 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1680 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1681 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1682 stw r5,savesrr1+4(r30) ; Set savearea srr1
1683 lwz r4,famguestr0(r20) ; Load famguest ctx r0
1684 lwz r5,famguestr1(r20) ; Load famguest ctx r1
1685 lwz r6,famguestr2(r20) ; Load famguest ctx r2
1686 lwz r7,famguestr3(r20) ; Load famguest ctx r3
1687 stw r4,saver0+4(r30) ; Set savearea r0
1688 stw r5,saver1+4(r30) ; Set savearea r1
1689 stw r6,saver2+4(r30) ; Set savearea r2
1690 stw r7,saver3+4(r30) ; Set savearea r3
1691 lwz r4,famguestr4(r20) ; Load famguest ctx r4
1692 lwz r5,famguestr5(r20) ; Load famguest ctx r5
1693 lwz r6,famguestr6(r20) ; Load famguest ctx r6
1694 lwz r7,famguestr7(r20) ; Load famguest ctx r7
1695 stw r4,saver4+4(r30) ; Set savearea r4
1696 stw r5,saver5+4(r30) ; Set savearea r5
1697 stw r6,saver6+4(r30) ; Set savearea r6
1698 stw r7,saver7+4(r30) ; Set savearea r7
1701 ld r7,famguestXpc(r20) ; Load famguest ctx pc
1702 bf++ vmmMapDone,fgrXNoMap ; No mapping done for this space.
1703 lwz r3,SAVflags(r30) ; Pick up the savearea flags
1704 ld r2,vmmLastMap(r28) ; Get the last mapped address
1705 li r4,T_DATA_ACCESS ; Change to DSI fault
1706 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1707 std r2,savedar(r30) ; Set the DAR to the last thing we mapped
1708 stw r3,SAVflags(r30) ; Turn on the redrive request
1709 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1710 stw r4,saveexception(r30) ; Say we need to emulate a DSI
1712 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
1713 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1715 ld r4,savesrr1(r30) ; Get the saved MSR value
1716 std r7,savesrr0(r30) ; Set savearea pc
1717 ld r5,famguestXmsr(r20) ; Load famguest ctx msr
1718 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1719 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1720 and r5,r5,r6 ; Keep only the controllable bits
1721 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1722 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1723 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1724 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1725 std r5,savesrr1(r30) ; Set savearea srr1
1726 ld r4,famguestXr0(r20) ; Load famguest ctx r0
1727 ld r5,famguestXr1(r20) ; Load famguest ctx r1
1728 ld r6,famguestXr2(r20) ; Load famguest ctx r2
1729 ld r7,famguestXr3(r20) ; Load famguest ctx r3
1730 std r4,saver0(r30) ; Set savearea r0
1731 std r5,saver1(r30) ; Set savearea r1
1732 std r6,saver2(r30) ; Set savearea r2
1733 std r7,saver3(r30) ; Set savearea r3
1734 ld r4,famguestXr4(r20) ; Load famguest ctx r4
1735 ld r5,famguestXr5(r20) ; Load famguest ctx r5
1736 ld r6,famguestXr6(r20) ; Load famguest ctx r6
1737 ld r7,famguestXr7(r20) ; Load famguest ctx r7
1738 std r4,saver4(r30) ; Set savearea r4
1739 std r5,saver5(r30) ; Set savearea r5
1740 std r6,saver6(r30) ; Set savearea r6
1741 std r7,saver7(r30) ; Set savearea r7
1743 li r3,1 ; Show normal exit with check for AST
1744 lwz r16,ACT_THREAD(r26) ; Restore the thread pointer
1745 b EXT(ppcscret) ; Go back to handler...
1748 ; FAM Intercept exception handler
1752 .globl EXT(vmm_fam_exc)
1755 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
1756 lwz r1,pfAvailable(r2) ; Get the CPU features flags
1757 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1759 lwz r4,saver4+4(r13) ; Load savearea r4
1760 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1761 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1762 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
1763 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
1764 bt++ pf64Bitb,fexcVMareaPhys64 ; Go do this on a 64-bit machine...
1765 slwi r3,r3,12 ; Change ppnum to physical address
1768 mtxer r5 ; Restore xer
1769 lwz r5,saver5+4(r13) ; Load savearea r5
1770 lwz r6,saver6+4(r13) ; Load savearea r6
1771 sldi r3,r3,12 ; Change ppnum to physical address
1773 stw r4,famguestr4(r3) ; Save r4 in famguest ctx
1774 stw r5,famguestr5(r3) ; Save r5 in famguest ctx
1775 stw r6,famguestr6(r3) ; Save r6 in famguest ctx
1776 stw r7,famguestr7(r3) ; Save r7 in famguest ctx
1777 lwz r4,saver0+4(r13) ; Load savearea r0
1778 lwz r5,saver1+4(r13) ; Load savearea r1
1779 lwz r6,saver2+4(r13) ; Load savearea r2
1780 lwz r7,saver3+4(r13) ; Load savearea r3
1781 stw r4,famguestr0(r3) ; Save r0 in famguest ctx
1782 stw r5,famguestr1(r3) ; Save r1 in famguest ctx
1783 stw r6,famguestr2(r3) ; Save r2 in famguest ctx
1784 stw r7,famguestr3(r3) ; Save r3 in famguest ctx
1785 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1786 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1787 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1788 mfsrr0 r2 ; Get the interrupt srr0
1789 mfsrr1 r4 ; Get the interrupt srr1
1790 stw r2,famguestpc(r3) ; Save srr0 in famguest ctx
1791 stw r4,famguestmsr(r3) ; Save srr1 in famguest ctx
1792 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1793 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1794 mtsrr1 r6 ; Set srr1
1795 mr r6,r3 ; Set r6 with phys state page addr
1796 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1797 beq+ cr1,fexcPRG ; We had a program exception...
1799 ; We had an Alignment...
1801 mfdsisr r4 ; Load dsisr
1802 stw r3,famparam+0x4(r6) ; Set famparam 1 with dar
1803 stw r4,famparam+0x8(r6) ; Set famparam 2 with dsir
1806 stw r4,famparam+0x4(r6) ; Set famparam 1 with srr1
1807 mr r3,r4 ; Set r3 with dsisr
1808 lwz r4,famguestr4(r6) ; Load r4 from famguest context
1810 lwz r5,famguestr5(r6) ; Load r5 from famguest context
1811 lwz r13,famhandler(r6) ; Load user address to resume
1812 stw r2,famparam(r6) ; Set famparam 0 with srr0
1813 stw r7,famdispcode(r6) ; Save the exit code
1814 lwz r1,famrefcon(r6) ; load refcon
1815 bt++ pf64Bitb,fexcrfi64 ; Go do this on a 64-bit machine...
1816 mtcr r0 ; Restore cr
1817 mtsrr0 r13 ; Load srr0
1818 mr r0,r7 ; Set dispatch code
1819 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1820 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1821 mfsprg r13,2 ; Restore r13
1822 mfsprg r11,3 ; Restore r11
1825 mtcr r0 ; Restore cr
1826 mtsrr0 r13 ; Load srr0
1827 mr r0,r7 ; Set dispatch code
1828 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1829 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1830 mfsprg r13,2 ; Restore r13
1831 mfsprg r11,3 ; Restore r11
1834 mtxer r5 ; Restore xer
1835 ld r4,saver4(r13) ; Load savearea r4
1836 ld r5,saver5(r13) ; Load savearea r5
1837 ld r6,saver6(r13) ; Load savearea r6
1838 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1839 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1840 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
1841 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
1842 sldi r3,r3,12 ; Change ppnum to physical address
1843 std r4,famguestXr4(r3) ; Save r4 in famguest ctx
1844 std r5,famguestXr5(r3) ; Save r5 in famguest ctx
1845 std r6,famguestXr6(r3) ; Save r6 in famguest ctx
1846 std r7,famguestXr7(r3) ; Save r7 in famguest ctx
1847 ld r4,saver0(r13) ; Load savearea r0
1848 ld r5,saver1(r13) ; Load savearea r1
1849 ld r6,saver2(r13) ; Load savearea r2
1850 ld r7,saver3(r13) ; Load savearea r3
1851 std r4,famguestXr0(r3) ; Save r0 in famguest ctx
1852 std r5,famguestXr1(r3) ; Save r1 in famguest ctx
1853 std r6,famguestXr2(r3) ; Save r2 in famguest ctx
1854 std r7,famguestXr3(r3) ; Save r3 in famguest ctx
1855 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1856 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1857 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1858 mfsrr0 r2 ; Get the interrupt srr0
1859 mfsrr1 r4 ; Get the interrupt srr1
1860 std r2,famguestXpc(r3) ; Save srr0 in famguest ctx
1861 std r4,famguestXmsr(r3) ; Save srr1 in famguest ctx
1862 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1863 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1864 mtsrr1 r6 ; Set srr1
1865 mr r6,r3 ; Set r6 with phys state page addr
1866 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1867 beq+ cr1,fexcXPRG ; We had a program exception...
1869 ; We had an Alignment...
1871 mfdsisr r4 ; Load dsisr
1872 std r3,famparamX+0x8(r6) ; Set famparam 1 with dar
1873 std r4,famparamX+0x10(r6) ; Set famparam 2 with dsir
1876 std r4,famparamX+0x8(r6) ; Set famparam 1 with srr1
1877 mr r3,r4 ; Set r3 with dsisr
1878 ld r4,famguestXr4(r6) ; Load r4 from famguest context
1880 ld r5,famguestXr5(r6) ; Load r5 from famguest context
1881 ld r13,famhandlerX(r6) ; Load user address to resume
1882 std r2,famparamX(r6) ; Set famparam 0 with srr0
1883 std r7,famdispcodeX(r6) ; Save the exit code
1884 ld r1,famrefconX(r6) ; load refcon
1885 mtcr r0 ; Restore cr
1886 mtsrr0 r13 ; Load srr0
1887 mr r0,r7 ; Set dispatch code
1888 ld r7,famguestXr7(r6) ; Load r7 from famguest context
1889 ld r6,famguestXr6(r6) ; Load r6 from famguest context
1890 mfsprg r13,2 ; Restore r13
1891 mfsprg r11,3 ; Restore r11
1895 ; FAM Intercept DSI ISI fault handler
1899 .globl EXT(vmm_fam_pf)
1902 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
1903 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1904 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1906 lwz r4,saver0+4(r13) ; Load savearea r0
1907 lwz r5,saver1+4(r13) ; Load savearea r1
1908 lwz r6,saver2+4(r13) ; Load savearea r2
1909 lwz r7,saver3+4(r13) ; Load savearea r3
1910 bt++ pf64Bitb,fpfVMareaPhys64 ; Go do this on a 64-bit machine...
1911 slwi r3,r3,12 ; Change ppnum to physical address
1914 sldi r3,r3,12 ; Change ppnum to physical address
1916 stw r4,famguestr0(r3) ; Save r0 in famguest
1917 stw r5,famguestr1(r3) ; Save r1 in famguest
1918 stw r6,famguestr2(r3) ; Save r2 in famguest
1919 stw r7,famguestr3(r3) ; Save r3 in famguest
1920 lwz r4,saver4+4(r13) ; Load savearea r0
1921 lwz r5,saver5+4(r13) ; Load savearea r1
1922 lwz r6,saver6+4(r13) ; Load savearea r2
1923 lwz r7,saver7+4(r13) ; Load savearea r3
1924 stw r4,famguestr4(r3) ; Save r4 in famguest
1925 lwz r4,spcFlags(r2) ; Load spcFlags
1926 stw r5,famguestr5(r3) ; Save r5 in famguest
1927 lwz r5,savesrr0+4(r13) ; Get the interrupt srr0
1928 stw r6,famguestr6(r3) ; Save r6 in famguest
1929 lwz r6,savesrr1+4(r13) ; Load srr1
1930 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1931 stw r7,famguestr7(r3) ; Save r7 in famguest
1932 stw r4,spcFlags(r2) ; Update spcFlags
1933 lwz r1,famrefcon(r3) ; Load refcon
1934 lwz r2,famhandler(r3) ; Load famhandler to resume
1935 stw r5,famguestpc(r3) ; Save srr0
1936 stw r5,saver2+4(r13) ; Store srr0 in savearea r2
1937 stw r5,famparam(r3) ; Store srr0 in fam param 0
1938 stw r6,famguestmsr(r3) ; Save srr1 in famguestmsr
1939 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
1940 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1941 beq+ cr1,fpfISI ; We had an ISI...
1943 lwz r6,savedar+4(r13) ; Load dar from savearea
1944 lwz r4,savedsisr(r13) ; Load dsisr from savearea
1945 stw r6,famparam+0x4(r3) ; Store dar in fam param 1
1946 stw r6,saver3+4(r13) ; Store dar in savearea r3
1947 stw r4,famparam+0x8(r3) ; Store dsisr in fam param 2
1948 stw r4,saver4+4(r13) ; Store dsisr in savearea r4
1951 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
1952 stw r6,famparam+0x4(r3) ; Store srr1 in fam param 1
1953 stw r6,saver3+4(r13) ; Store srr1 in savearea r3
1955 stw r7,saver0+4(r13) ; Set dispatch code
1956 stw r7,famdispcode(r3) ; Set dispatch code
1957 stw r1,saver1+4(r13) ; Store refcon in savearea r1
1958 stw r2,savesrr0+4(r13) ; Store famhandler in srr0
1961 ld r4,saver0(r13) ; Load savearea r0
1962 ld r5,saver1(r13) ; Load savearea r1
1963 ld r6,saver2(r13) ; Load savearea r2
1964 ld r7,saver3(r13) ; Load savearea r3
1965 sldi r3,r3,12 ; Change ppnum to physical address
1966 std r4,famguestXr0(r3) ; Save r0 in famguest
1967 std r5,famguestXr1(r3) ; Save r1 in famguest
1968 std r6,famguestXr2(r3) ; Save r2 in famguest
1969 std r7,famguestXr3(r3) ; Save r3 in famguest
1970 ld r4,saver4(r13) ; Load savearea r0
1971 ld r5,saver5(r13) ; Load savearea r1
1972 ld r6,saver6(r13) ; Load savearea r2
1973 ld r7,saver7(r13) ; Load savearea r3
1974 std r4,famguestXr4(r3) ; Save r4 in famguest
1975 lwz r4,spcFlags(r2) ; Load spcFlags
1976 std r5,famguestXr5(r3) ; Save r5 in famguest
1977 ld r5,savesrr0(r13) ; Get the interrupt srr0
1978 std r6,famguestXr6(r3) ; Save r6 in famguest
1979 ld r6,savesrr1(r13) ; Load srr1
1980 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1981 std r7,famguestXr7(r3) ; Save r7 in famguest
1982 stw r4,spcFlags(r2) ; Update spcFlags
1983 ld r1,famrefconX(r3) ; Load refcon
1984 ld r2,famhandlerX(r3) ; Load famhandler to resume
1985 std r5,famguestXpc(r3) ; Save srr0
1986 std r5,saver2(r13) ; Store srr0 in savearea r2
1987 std r5,famparamX(r3) ; Store srr0 in fam param 0
1988 std r6,famguestXmsr(r3) ; Save srr1 in famguestmsr
1989 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
1990 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1991 beq+ cr1,fpfXISI ; We had an ISI...
1993 ld r6,savedar(r13) ; Load dar from savearea
1994 lwz r4,savedsisr(r13) ; Load dsisr from savearea
1995 std r6,famparamX+0x8(r3) ; Store dar in fam param 1
1996 std r6,saver3(r13) ; Store dar in savearea r3
1997 std r4,famparamX+0x10(r3) ; Store dsisr in fam param 2
1998 std r4,saver4(r13) ; Store dsisr in savearea r4
2001 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
2002 std r6,famparamX+0x8(r3) ; Store srr1 in fam param 1
2003 std r6,saver3(r13) ; Store srr1 in savearea r3
2005 std r7,saver0(r13) ; Set dispatch code
2006 std r7,famdispcodeX(r3) ; Set dispatch code
2007 std r1,saver1(r13) ; Store refcon in savearea r1
2008 std r2,savesrr0(r13) ; Store famhandler in srr0
2012 ; Ultra Fast Path FAM syscalls
2019 mfsprg r3,0 ; Get the per_proc area
2020 mr r11,r13 ; Saved cr in r11
2021 lwz r13,VMMXAFlgs(r3) ; Get the eXtended Architecture flags
2022 rlwinm. r13,r13,0,0,0 ; Are we doing a 64-bit virtual machine?
2023 lwz r13,pfAvailable(r3) ; Get feature flags
2024 mtcrf 0x02,r13 ; Put pf64Bitb etc in cr6
2025 lwz r13,VMMareaPhys(r3) ; Load fast assist area
2026 bt++ pf64Bitb,ufpVMareaPhys64 ; Go do this on a 64-bit machine...
2027 slwi r13,r13,12 ; Change ppnum to physical address
2030 sldi r13,r13,12 ; Change ppnum to physical address
2033 bt cr5_eq,ufpResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
2034 cmpwi cr7,r4,0 ; Compare first arg with 0
2035 cmpwi cr5,r4,7 ; Compare first arg with 7
2036 cror cr1_eq,cr7_lt,cr5_gt ; Is it in 0 to 7 range
2037 beq cr1,ufpVMret ; Return if not in the range
2038 slwi r4,r4,2 ; multiply index by 4
2039 la r3,famguestr0(r13) ; Load the base address
2040 bt cr2_eq,ufpSetGuestReg ; Set/get selector
2042 lwzx r3,r4,r3 ; Load the guest register
2045 stwx r5,r4,r3 ; Update the guest register
2046 li r3,0 ; Set return value
2049 lwz r7,spcFlags(r3) ; Pick up the special flags
2050 mtsrr0 r4 ; Set srr0
2051 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2052 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
2053 stw r7,spcFlags(r3) ; Update the special flags
2054 mfsrr1 r6 ; Get the current MSR value
2056 lwz r4,famguestmsr(r13) ; Load guest srr1
2057 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2058 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2059 and r4,r4,r1 ; Keep only the controllable bits
2060 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2061 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2062 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2063 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2064 beq ufpnokey ; Branch if not key switch
2066 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2067 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
2068 beq ufpnokey ; No, go to ResumeGuest_nokey
2069 mr r5,r3 ; Get the per_proc area
2070 stw r7,spcFlags(r3) ; Update the special flags
2072 bt++ pf64Bitb,ufpsave64 ; Go do this on a 64-bit machine...
2074 lwz r3,next_savearea+4(r5) ; Get the exception save area
2075 stw r8,saver8+4(r3) ; Save r8
2076 stw r9,saver9+4(r3) ; Save r9
2077 stw r10,saver10+4(r3) ; Save r10
2078 stw r11,saver11+4(r3) ; Save r11
2079 stw r12,saver12+4(r3) ; Save r12
2080 stw r13,saver13+4(r3) ; Save r12
2081 stw r14,saver14+4(r3) ; Save r14
2082 stw r15,saver15+4(r3) ; Save r15
2083 stw r16,saver16+4(r3) ; Save r16
2084 stw r17,saver17+4(r3) ; Save r17
2085 stw r18,saver18+4(r3) ; Save r18
2086 stw r19,saver19+4(r3) ; Save r19
2087 stw r20,saver20+4(r3) ; Save r20
2088 stw r21,saver21+4(r3) ; Save r21
2089 stw r22,saver22+4(r3) ; Save r22
2090 stw r23,saver23+4(r3) ; Save r23
2091 stw r24,saver24+4(r3) ; Save r24
2092 stw r25,saver25+4(r3) ; Save r25
2093 stw r26,saver26+4(r3) ; Save r26
2094 stw r27,saver27+4(r3) ; Save r27
2095 stw r28,saver28+4(r3) ; Save r28
2096 stw r29,saver29+4(r3) ; Save r29
2097 stw r30,saver30+4(r3) ; Save r30
2098 stw r31,saver31+4(r3) ; Save r31
2099 b ufpsaveres ; Continue
2102 ld r3,next_savearea(r5) ; Get the exception save area
2103 std r8,saver8(r3) ; Save r8
2104 std r9,saver9(r3) ; Save r9
2105 std r10,saver10(r3) ; Save r10
2106 std r11,saver11(r3) ; Save r11
2107 std r12,saver12(r3) ; Save r12
2108 std r13,saver13(r3) ; Save r12
2109 std r14,saver14(r3) ; Save r14
2110 std r15,saver15(r3) ; Save r15
2111 std r16,saver16(r3) ; Save r16
2112 std r17,saver17(r3) ; Save r17
2113 std r18,saver18(r3) ; Save r18
2114 std r19,saver19(r3) ; Save r19
2115 std r20,saver20(r3) ; Save r20
2116 std r21,saver21(r3) ; Save r21
2117 std r22,saver22(r3) ; Save r22
2118 std r23,saver23(r3) ; Save r23
2119 std r24,saver24(r3) ; Save r24
2120 std r25,saver25(r3) ; Save r25
2121 std r26,saver26(r3) ; Save r26
2122 std r27,saver27(r3) ; Save r27
2123 std r28,saver28(r3) ; Save r28
2124 std r29,saver29(r3) ; Save r29
2126 std r30,saver30(r3) ; Save r30
2127 std r31,saver31(r3) ; Save r31
2128 std r2,savexer(r3) ; Save xer
2133 stw r7,spcFlags(r5) ; Update the special flags
2134 mr r13,r3 ; Set current savearea
2136 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2137 mr r29,r5 ; Get the per_proc area
2138 mr r3,r4 ; Set MSR value we going to
2139 bl EXT(switchSegs) ; Go handle the segment registers/STB
2140 mr r3,r13 ; Set current savearea
2141 mr r4,r21 ; Restore r4
2144 bt++ pf64Bitb,ufprestore64 ; Go do this on a 64-bit machine...
2145 lwz r8,saver8+4(r3) ; Load r8
2146 lwz r9,saver9+4(r3) ; Load r9
2147 lwz r10,saver10+4(r3) ; Load r10
2148 lwz r11,saver11+4(r3) ; Load r11
2149 lwz r12,saver12+4(r3) ; Load r12
2150 lwz r13,saver13+4(r3) ; Load r12
2151 lwz r14,saver14+4(r3) ; Load r14
2152 lwz r15,saver15+4(r3) ; Load r15
2153 lwz r16,saver16+4(r3) ; Load r16
2154 lwz r17,saver17+4(r3) ; Load r17
2155 lwz r18,saver18+4(r3) ; Load r18
2156 lwz r19,saver19+4(r3) ; Load r19
2157 lwz r20,saver20+4(r3) ; Load r20
2158 lwz r21,saver21+4(r3) ; Load r21
2159 lwz r22,saver22+4(r3) ; Load r22
2160 lwz r23,saver23+4(r3) ; Load r23
2161 lwz r24,saver24+4(r3) ; Load r24
2162 lwz r25,saver25+4(r3) ; Load r25
2163 lwz r26,saver26+4(r3) ; Load r26
2164 lwz r27,saver27+4(r3) ; Load r27
2165 lwz r28,saver28+4(r3) ; Load r28
2166 lwz r29,saver29+4(r3) ; Load r29
2167 lwz r30,saver30+4(r3) ; Load r30
2168 lwz r31,saver31+4(r3) ; Load r31
2169 b ufpnokey ; Continue
2171 ld r2,savexer(r3) ; Load xer
2172 ld r8,saver8(r3) ; Load r8
2173 ld r9,saver9(r3) ; Load r9
2174 ld r10,saver10(r3) ; Load r10
2175 mtxer r2 ; Restore xer
2176 ld r11,saver11(r3) ; Load r11
2177 ld r12,saver12(r3) ; Load r12
2178 ld r13,saver13(r3) ; Load r12
2179 ld r14,saver14(r3) ; Load r14
2180 ld r15,saver15(r3) ; Load r15
2181 ld r16,saver16(r3) ; Load r16
2182 ld r17,saver17(r3) ; Load r17
2183 ld r18,saver18(r3) ; Load r18
2184 ld r19,saver19(r3) ; Load r19
2185 ld r20,saver20(r3) ; Load r20
2186 ld r21,saver21(r3) ; Load r21
2187 ld r22,saver22(r3) ; Load r22
2188 ld r23,saver23(r3) ; Load r23
2189 ld r24,saver24(r3) ; Load r24
2190 ld r25,saver25(r3) ; Load r25
2191 ld r26,saver26(r3) ; Load r26
2192 ld r27,saver27(r3) ; Load r27
2193 ld r28,saver28(r3) ; Load r28
2194 ld r29,saver29(r3) ; Load r29
2195 ld r30,saver30(r3) ; Load r30
2196 ld r31,saver31(r3) ; Load r31
2198 mfsprg r3,0 ; Get the per_proc area
2199 mtsrr1 r4 ; Set srr1
2200 lwz r0,famguestr0(r13) ; Load r0
2201 lwz r1,famguestr1(r13) ; Load r1
2202 lwz r2,famguestr2(r13) ; Load r2
2203 lwz r3,famguestr3(r13) ; Load r3
2204 lwz r4,famguestr4(r13) ; Load r4
2205 lwz r5,famguestr5(r13) ; Load r5
2206 lwz r6,famguestr6(r13) ; Load r6
2207 lwz r7,famguestr7(r13) ; Load r7
2209 mfsprg r13,2 ; Restore R13
2210 bt++ pf64Bitb,ufpVMrfi64 ; Go do this on a 64-bit machine...
2211 mtcrf 0xFF,r11 ; Restore CR
2212 mfsprg r11,3 ; Restore R11
2213 rfi ; All done, go back...
2215 mtcrf 0xFF,r11 ; Restore CR
2216 mfsprg r11,3 ; Restore R11
2220 bt cr5_eq,ufpXResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
2221 cmpwi cr7,r4,0 ; Compare first arg with 0
2222 cmpwi cr5,r4,7 ; Compare first arg with 7
2223 cror cr1_eq,cr7_lt,cr5_gt ; Is it in 0 to 7 range
2224 beq cr1,ufpXVMret ; Return if not in the range
2225 slwi r4,r4,3 ; multiply index by 8
2226 la r3,famguestXr0(r13) ; Load the base address
2227 bt cr2_eq,ufpXSetGuestReg ; Set/get selector
2229 ldx r3,r4,r3 ; Load the guest register
2230 b ufpXVMret ; Return
2232 stdx r5,r4,r3 ; Update the guest register
2233 li r3,0 ; Set return value
2234 b ufpXVMret ; Return
2236 lwz r7,spcFlags(r3) ; Pick up the special flags
2237 mtsrr0 r4 ; Set srr0
2238 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2239 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
2240 stw r7,spcFlags(r3) ; Update the special flags
2241 mfsrr1 r6 ; Get the current MSR value
2243 ld r4,famguestXmsr(r13) ; Load guest srr1
2244 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2245 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2246 and r4,r4,r1 ; Keep only the controllable bits
2247 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2248 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2249 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2250 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2251 beq ufpXnokey ; Branch if not key switch
2253 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2254 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
2255 beq ufpXnokey ; No, go to ResumeGuest_nokey
2256 mr r5,r3 ; Get the per_proc area
2257 stw r7,spcFlags(r3) ; Update the special flags
2259 ld r3,next_savearea(r5) ; Get the exception save area
2260 std r8,saver8(r3) ; Save r8
2261 std r9,saver9(r3) ; Save r9
2262 std r10,saver10(r3) ; Save r10
2263 std r11,saver11(r3) ; Save r11
2264 std r12,saver12(r3) ; Save r12
2265 std r13,saver13(r3) ; Save r12
2266 std r14,saver14(r3) ; Save r14
2267 std r15,saver15(r3) ; Save r15
2268 std r16,saver16(r3) ; Save r16
2269 std r17,saver17(r3) ; Save r17
2270 std r18,saver18(r3) ; Save r18
2271 std r19,saver19(r3) ; Save r19
2272 std r20,saver20(r3) ; Save r20
2273 std r21,saver21(r3) ; Save r21
2274 std r22,saver22(r3) ; Save r22
2275 std r23,saver23(r3) ; Save r23
2276 std r24,saver24(r3) ; Save r24
2277 std r25,saver25(r3) ; Save r25
2278 std r26,saver26(r3) ; Save r26
2279 std r27,saver27(r3) ; Save r27
2280 std r28,saver28(r3) ; Save r28
2281 std r29,saver29(r3) ; Save r29
2283 std r30,saver30(r3) ; Save r30
2284 std r31,saver31(r3) ; Save r31
2285 std r2,savexer(r3) ; Save xer
2289 stw r7,spcFlags(r5) ; Update the special flags
2290 mr r13,r3 ; Set current savearea
2292 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2293 mr r29,r5 ; Get the per_proc area
2294 mr r3,r4 ; Set MSR value we going to
2295 bl EXT(switchSegs) ; Go handle the segment registers/STB
2296 mr r3,r13 ; Set current savearea
2297 mr r4,r21 ; Restore r4
2300 ld r2,savexer(r3) ; Load xer
2301 ld r8,saver8(r3) ; Load r8
2302 ld r9,saver9(r3) ; Load r9
2303 ld r10,saver10(r3) ; Load r10
2304 mtxer r2 ; Restore xer
2305 ld r11,saver11(r3) ; Load r11
2306 ld r12,saver12(r3) ; Load r12
2307 ld r13,saver13(r3) ; Load r12
2308 ld r14,saver14(r3) ; Load r14
2309 ld r15,saver15(r3) ; Load r15
2310 ld r16,saver16(r3) ; Load r16
2311 ld r17,saver17(r3) ; Load r17
2312 ld r18,saver18(r3) ; Load r18
2313 ld r19,saver19(r3) ; Load r19
2314 ld r20,saver20(r3) ; Load r20
2315 ld r21,saver21(r3) ; Load r21
2316 ld r22,saver22(r3) ; Load r22
2317 ld r23,saver23(r3) ; Load r23
2318 ld r24,saver24(r3) ; Load r24
2319 ld r25,saver25(r3) ; Load r25
2320 ld r26,saver26(r3) ; Load r26
2321 ld r27,saver27(r3) ; Load r27
2322 ld r28,saver28(r3) ; Load r28
2323 ld r29,saver29(r3) ; Load r29
2324 ld r30,saver30(r3) ; Load r30
2325 ld r31,saver31(r3) ; Load r31
2327 mtsrr1 r4 ; Set srr1
2328 ld r0,famguestXr0(r13) ; Load r0
2329 ld r1,famguestXr1(r13) ; Load r1
2330 ld r2,famguestXr2(r13) ; Load r2
2331 ld r3,famguestXr3(r13) ; Load r3
2332 ld r4,famguestXr4(r13) ; Load r4
2333 ld r5,famguestXr5(r13) ; Load r5
2334 ld r6,famguestXr6(r13) ; Load r6
2335 ld r7,famguestXr7(r13) ; Load r7
2337 mfsprg r13,2 ; Restore R13
2338 mtcrf 0xFF,r11 ; Restore CR
2339 mfsprg r11,3 ; Restore R11