]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/vmachmon_asm.s
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon_asm.s
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <assym.s>
29 #include <debug.h>
30 #include <ppc/asm.h>
31 #include <ppc/proc_reg.h>
32 #include <ppc/exception.h>
33
34 /*
35 * This file contains implementations for the Virtual Machine Monitor
36 * facility.
37 */
38
39 #define vmmMapDone 31
40 #define vmmDoing64 30
41
42
43 /*
44 * int vmm_dispatch(savearea, act);
45
46 * vmm_dispatch is a PPC only system call. It is used with a selector (first
47 * parameter) to determine what function to enter. This is treated as an extension
48 * of hw_exceptions.
49 *
50 * Inputs:
51 * R4 = current activation
52 * R16 = current thread
53 * R30 = current savearea
54 */
55
56 .align 5 ; Line up on cache line
57 .globl EXT(vmm_dispatch_table)
58
59 LEXT(vmm_dispatch_table)
60
61 /* Don't change the order of these routines in the table. It's */
62 /* OK to add new routines, but they must be added at the bottom. */
63
64 .long EXT(vmm_get_version_sel) ; Get the version of the VMM interface
65 .long 0 ; Not valid in Fam
66 .long EXT(vmm_get_features_sel) ; Get the features of the VMM interface
67 .long 0 ; Not valid in Fam
68 .long EXT(vmm_init_context_sel) ; Initializes a new VMM context
69 .long 0 ; Not valid in Fam
70 .long EXT(vmm_tear_down_context) ; Tears down a previously-allocated VMM context
71 .long 0 ; Not valid in Fam
72 .long EXT(vmm_tear_down_all) ; Tears down all VMMs
73 .long 0 ; Not valid in Fam
74 .long EXT(vmm_map_page32) ; Maps a page from the main address space into the VM space - supports 32-bit
75 .long 1 ; Valid in Fam
76 .long EXT(vmm_get_page_mapping32) ; Returns client va associated with VM va - supports 32-bit
77 .long 1 ; Valid in Fam
78 .long EXT(vmm_unmap_page32) ; Unmaps a page from the VM space - supports 32-bit
79 .long 1 ; Valid in Fam
80 .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space
81 .long 1 ; Valid in Fam
82 .long EXT(vmm_get_page_dirty_flag32) ; Gets the change bit for a page and optionally clears it - supports 32-bit
83 .long 1 ; Valid in Fam
84 .long EXT(vmm_get_float_state) ; Gets current floating point state
85 .long 0 ; not valid in Fam
86 .long EXT(vmm_get_vector_state) ; Gets current vector state
87 .long 0 ; Not valid in Fam
88 .long EXT(vmm_set_timer) ; Sets a timer value
89 .long 1 ; Valid in Fam
90 .long EXT(vmm_get_timer) ; Gets a timer value
91 .long 1 ; Valid in Fam
92 .long EXT(switchIntoVM) ; Switches to the VM context
93 .long 1 ; Valid in Fam
94 .long EXT(vmm_protect_page32) ; Sets protection values for a page - supports 32-bit
95 .long 1 ; Valid in Fam
96 .long EXT(vmm_map_execute32) ; Maps a page an launches VM - supports 32-bit
97 .long 1 ; Not valid in Fam
98 .long EXT(vmm_protect_execute32) ; Sets protection values for a page and launches VM - supports 32-bit
99 .long 1 ; Valid in Fam
100 .long EXT(vmm_map_list32) ; Maps a list of pages - supports 32-bit
101 .long 1 ; Valid in Fam
102 .long EXT(vmm_unmap_list32) ; Unmaps a list of pages - supports 32-bit
103 .long 1 ; Valid in Fam
104 .long EXT(vmm_fam_reserved) ; exit from Fam to host
105 .long 1 ; Valid in Fam
106 .long EXT(vmm_fam_reserved) ; resume guest from Fam
107 .long 1 ; Valid in Fam
108 .long EXT(vmm_fam_reserved) ; get guest register from Fam
109 .long 1 ; Valid in Fam
110 .long EXT(vmm_fam_reserved) ; Set guest register from Fam
111 .long 1 ; Valid in Fam
112 .long EXT(vmm_activate_XA) ; Activate extended architecture features for a VM
113 .long 0 ; Not valid in Fam
114 .long EXT(vmm_deactivate_XA) ; Deactivate extended architecture features for a VM
115 .long 0 ; Not valid in Fam
116 .long EXT(vmm_get_XA) ; Get extended architecture features from a VM
117 .long 1 ; Valid in Fam
118 .long EXT(vmm_map_page) ; Map a host to guest address space - supports 64-bit
119 .long 1 ; Valid in Fam
120 .long EXT(vmm_get_page_mapping) ; Get host address of a guest page - supports 64-bit
121 .long 1 ; Valid in Fam
122 .long EXT(vmm_unmap_page) ; Unmap a guest page - supports 64-bit
123 .long 1 ; Valid in Fam
124 .long EXT(vmm_get_page_dirty_flag) ; Check if guest page modified - supports 64-bit
125 .long 1 ; Valid in Fam
126 .long EXT(vmm_protect_page) ; Sets protection values for a page - supports 64-bit
127 .long 1 ; Valid in Fam
128 .long EXT(vmm_map_execute) ; Map guest page and launch - supports 64-bit
129 .long 1 ; Valid in Fam
130 .long EXT(vmm_protect_execute) ; Set prot attributes and launch - supports 64-bit
131 .long 1 ; Valid in Fam
132 .long EXT(vmm_map_list64) ; Map a list of pages into guest address spaces - supports 64-bit
133 .long 1 ; Valid in Fam
134 .long EXT(vmm_unmap_list64) ; Unmap a list of pages from guest address spaces - supports 64-bit
135 .long 1 ; Valid in Fam
136 .long EXT(vmm_max_addr) ; Returns the maximum virtual address
137 .long 1 ; Valid in Fam
138 #if 0
139 .long EXT(vmm_set_guest_memory) ; Set guest memory extent
140 .long 0 ; Not valid in FAM
141 .long EXT(vmm_purge_local) ; Purge all local guest mappings */
142 .long 1 ; Valid in FAM
143 #endif
144 .set vmm_count,(.-EXT(vmm_dispatch_table))/8 ; Get the top number
145
146
147 .align 5
148 .globl EXT(vmm_dispatch)
149
150 LEXT(vmm_dispatch)
151
152 lwz r11,saver3+4(r30) ; Get the selector
153 mr r3,r4 ; All of our functions want the activation as the first parm
154 lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table
155 cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now?
156 cmplwi cr1,r11,vmm_count ; See if we have a valid selector
157 ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table
158 lwz r4,saver4+4(r30) ; Get 1st parameter after selector
159 beq+ EXT(switchIntoVM) ; Yes, go switch to it....
160 rlwinm r11,r11,3,0,28 ; Index into table
161 bge- cr1,vmmBogus ; It is a bogus entry
162 add r12,r10,r11 ; Get the vmm dispatch syscall entry
163 mfsprg r10,1 ; Get the current activation
164 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
165 lwz r13,0(r12) ; Get address of routine
166 lwz r12,4(r12) ; Get validity flag
167 lwz r5,spcFlags(r10) ; Get per_proc special flags
168 cmpwi cr1,r12,0 ; Check Fam valid
169 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
170 crand cr0_eq,cr1_eq,cr0_gt ; In Fam and Invalid syscall
171 beq vmmBogus ; Intercept to host
172 lwz r5,saver5+4(r30) ; Get 2nd parameter after selector - note that some of these parameters may actually be long longs
173 lwz r6,saver6+4(r30) ; Get 3rd parameter after selector
174 mtlr r13 ; Set the routine address
175 lwz r7,saver7+4(r30) ; Get 4th parameter after selector
176 lwz r8,saver8+4(r30) ; Get 5th parameter after selector
177 lwz r9,saver9+4(r30) ; Get 6th parameter after selector
178 ;
179 ; NOTE: some of the above parameters are actually long longs. We have glue code that transforms
180 ; all needed parameters and/or adds 32-/64-bit flavors to the needed functions.
181 ;
182
183 blrl ; Call function
184
185 vmmRetPt: li r0,0 ; Clear this out
186 stw r0,saver3(r30) ; Make sure top of RC is clear
187 stw r3,saver3+4(r30) ; Pass back the return code
188 stw r0,saver4(r30) ; Make sure bottom of RC is clear (just in case)
189 stw r4,saver4+4(r30) ; Pass back the bottom return code (just in case)
190 li r3,1 ; Set normal return with check for AST
191 b EXT(ppcscret) ; Go back to handler...
192
193 vmmBogus:
194 mfsprg r3,1 ; Get the current activation
195 lwz r10,ACT_PER_PROC(r3) ; Get the per_proc block
196 lwz r5,spcFlags(r10) ; Get per_proc special flags
197 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
198 bne vmmexitcall ; Do it to it
199 li r3,0 ; Bogus selector, treat like a bogus system call
200 b EXT(ppcscret) ; Go back to handler...
201
202
203 .align 5
204 .globl EXT(vmm_get_version_sel)
205
206 LEXT(vmm_get_version_sel) ; Selector based version of get version
207
208 lis r3,hi16(EXT(vmm_get_version))
209 ori r3,r3,lo16(EXT(vmm_get_version))
210 b selcomm
211
212
213 .align 5
214 .globl EXT(vmm_get_features_sel)
215
216 LEXT(vmm_get_features_sel) ; Selector based version of get features
217
218 lis r3,hi16(EXT(vmm_get_features))
219 ori r3,r3,lo16(EXT(vmm_get_features))
220 b selcomm
221
222
223 .align 5
224 .globl EXT(vmm_init_context_sel)
225
226 LEXT(vmm_init_context_sel) ; Selector based version of init context
227
228 lwz r4,saver4+4(r30) ; Get the passed in version
229 lwz r5,saver5+4(r30) ; Get the passed in comm area
230 lis r3,hi16(EXT(vmm_init_context))
231 stw r4,saver3+4(r30) ; Cheat and move this parameter over
232 ori r3,r3,lo16(EXT(vmm_init_context))
233 stw r5,saver4+4(r30) ; Cheat and move this parameter over
234
235 selcomm: mtlr r3 ; Set the real routine address
236 mr r3,r30 ; Pass in the savearea
237 blrl ; Call the function
238 b EXT(ppcscret) ; Go back to handler...
239
240 .align 5
241 .globl EXT(vmm_map_page32)
242
243 LEXT(vmm_map_page32)
244 mr r9,r7 ; Move prot to correct parm
245 mr r8,r6 ; Move guest address to low half of long long
246 li r7,0 ; Clear high half of guest address
247 mr r6,r5 ; Move host address to low half of long long
248 li r5,0 ; Clear high half of host address
249 b EXT(vmm_map_page) ; Transition to real function...
250
251 .align 5
252 .globl EXT(vmm_get_page_mapping32)
253
254 LEXT(vmm_get_page_mapping32)
255 mr r6,r5 ; Move guest address to low half of long long
256 li r5,0 ; Clear high half of guest address
257 bl EXT(vmm_get_page_mapping) ; Transition to real function...
258 mr r3,r4 ; Convert addr64_t to vm_offset_t, dropping top half
259 b vmmRetPt ; Join normal return...
260
261 .align 5
262 .globl EXT(vmm_unmap_page32)
263
264 LEXT(vmm_unmap_page32)
265 mr r6,r5 ; Move guest address to low half of long long
266 li r5,0 ; Clear high half of guest address
267 b EXT(vmm_unmap_page) ; Transition to real function...
268
269 .align 5
270 .globl EXT(vmm_get_page_dirty_flag32)
271
272 LEXT(vmm_get_page_dirty_flag32)
273 mr r7,r6 ; Move reset flag
274 mr r6,r5 ; Move guest address to low half of long long
275 li r5,0 ; Clear high half of guest address
276 b EXT(vmm_get_page_dirty_flag) ; Transition to real function...
277
278 .align 5
279 .globl EXT(vmm_protect_page32)
280
281 LEXT(vmm_protect_page32)
282 mr r7,r6 ; Move protection bits
283 mr r6,r5 ; Move guest address to low half of long long
284 li r5,0 ; Clear high half of guest address
285 b EXT(vmm_protect_page) ; Transition to real function...
286
287 .align 5
288 .globl EXT(vmm_map_execute32)
289
290 LEXT(vmm_map_execute32)
291 mr r9,r7 ; Move prot to correct parm
292 mr r8,r6 ; Move guest address to low half of long long
293 li r7,0 ; Clear high half of guest address
294 mr r6,r5 ; Move host address to low half of long long
295 li r5,0 ; Clear high half of host address
296 b EXT(vmm_map_execute) ; Transition to real function...
297
298 .align 5
299 .globl EXT(vmm_protect_execute32)
300
301 LEXT(vmm_protect_execute32)
302 mr r7,r6 ; Move protection bits
303 mr r6,r5 ; Move guest address to low half of long long
304 li r5,0 ; Clear high half of guest address
305 b EXT(vmm_protect_execute) ; Transition to real function...
306
307 .align 5
308 .globl EXT(vmm_map_list32)
309
310 LEXT(vmm_map_list32)
311 li r6,0 ; Set 32-bit flavor
312 b EXT(vmm_map_list) ; Go to common routine...
313
314 .align 5
315 .globl EXT(vmm_map_list64)
316
317 LEXT(vmm_map_list64)
318 li r6,1 ; Set 64-bit flavor
319 b EXT(vmm_map_list) ; Go to common routine...
320
321 .align 5
322 .globl EXT(vmm_map_list32)
323
324 LEXT(vmm_unmap_list32)
325 li r6,0 ; Set 32-bit flavor
326 b EXT(vmm_unmap_list) ; Go to common routine...
327
328 .align 5
329 .globl EXT(vmm_map_list64)
330
331 LEXT(vmm_unmap_list64)
332 li r6,1 ; Set 64-bit flavor
333 b EXT(vmm_unmap_list) ; Go to common routine...
334
335 /*
336 * Here is where we transition to the virtual machine.
337 *
338 * We will swap the register context in the savearea with that which is saved in our shared
339 * context area. We will validity check a bit and clear any nasty bits in the MSR and force
340 * the manditory ones on.
341 *
342 * Then we will setup the new address space to run with, and anything else that is normally part
343 * of a context switch.
344 *
345 * The vmm_execute_vm entry point is for the fused vmm_map_execute and vmm_protect_execute
346 * calls. This is called, but never returned from. We always go directly back to the
347 * user from here.
348 *
349 *
350 */
351
352
353 .align 5
354 .globl EXT(vmm_execute_vm)
355
356 LEXT(vmm_execute_vm)
357 lwz r30,ACT_MACT_PCB(r3) ; Restore the savearea pointer because it could be trash here
358 b EXT(switchIntoVM) ; Join common...
359
360
361 .align 5
362 .globl EXT(switchIntoVM)
363
364 LEXT(switchIntoVM)
365 mfsprg r10,1 ; Get the current activation
366 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
367 rlwinm r31,r4,24,24,31 ; Get the address space
368 rlwinm r4,r4,0,24,31 ; Isolate the context id
369 lwz r28,vmmControl(r3) ; Pick up the control table address
370 subi r4,r4,1 ; Switch to zero offset
371 rlwinm. r2,r28,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we
372 ; do not try this while we are transitioning off to on
373 cmplwi cr1,r4,kVmmMaxContexts ; Is the index valid?
374 beq- vmmBogus ; Not started, treat like a bogus system call
375 subic. r31,r31,1 ; Make address space 0 based and test if we use default
376 mulli r2,r4,vmmCEntrySize ; Get displacement from index
377 bge- cr1,swvmmBogus ; Index is bogus...
378 add r2,r2,r28 ; Point to the entry
379 bge-- swvmmDAdsp ; There was an explicit address space request
380 mr r31,r4 ; Default the address space to the context ID
381
382 swvmmDAdsp: la r2,vmmc(r2) ; Get the offset to the context array
383 lwz r8,vmmGFlags(r28) ; Get the general flags
384 lwz r4,vmmFlags(r2) ; Get the flags for the selected entry
385 crset vmmMapDone ; Assume we will be mapping something
386 lwz r5,vmmContextKern(r2) ; Get the context area address
387 rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use
388 cmplwi cr1,r31,kVmmMaxContexts ; See if we have a valid address space ID
389 rlwinm r8,r8,0,24,31 ; Clean up address space
390 beq-- swvmmBogus ; This context is no good...
391
392 la r26,vmmAdsp(r28) ; Point to the pmaps
393 sub r8,r8,r31 ; Get diff between launching address space - 1 and last mapped into (should be 1 if the same)
394 rlwinm r31,r31,2,0,29 ; Index to the pmap
395 cmplwi r8,1 ; See if we have the same address space
396 bge-- cr1,swvmmBogAdsp ; Address space is no good...
397 lwzx r31,r26,r31 ; Get the requested address space pmap
398 li r0,0 ; Get a 0 in case we need to trash redrive
399 lwz r15,spcFlags(r10) ; Get per_proc special flags
400 beq swvmmAdspOk ; Do not invalidate address space if we are launching the same
401 crclr vmmMapDone ; Clear map done flag
402 stb r0,vmmGFlags+3(r28) ; Clear the last mapped address space ID so we will not redrive later
403 ;
404 ; Here we check for any immediate intercepts. So far, the only
405 ; two of these are a timer pop and and external stop. We will not dispatch if
406 ; either is true. They need to either reset the timer (i.e. set timer
407 ; to 0) or to set a future time, or if it is external stop, set the vmmXStopRst flag.
408 ;
409
410 swvmmAdspOk:
411 rlwinm. r0,r15,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
412 stw r31,vmmPmap(r2) ; Save the last dispatched address space
413 bne vmmFamGuestResume
414 lwz r6,vmmCntrl(r5) ; Get the control field
415 rlwinm. r7,r6,0,vmmXStartb,vmmXStartb ; Clear all but start bit
416 beq+ swvmChkStop ; Do not reset stop
417 andc r6,r6,r7 ; Clear it
418 li r8,vmmFlags ; Point to the flags
419 stw r6,vmmCntrl(r5) ; Set the control field
420
421 swvmtryx: lwarx r4,r8,r2 ; Pick up the flags
422 rlwinm r4,r4,0,vmmXStopb+1,vmmXStopb-1 ; Clear the stop bit
423 stwcx. r4,r8,r2 ; Save the updated field
424 bne- swvmtryx ; Try again...
425
426 swvmChkStop:
427 rlwinm. r26,r4,0,vmmXStopb,vmmXStopb ; Is this VM stopped?
428 bne-- swvmSetStop ; Yes...
429
430 rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer go pop?
431 cmplwi cr1,r31,0 ; Is there actually an address space defined?
432 bne-- svvmTimerPop ; Yes...
433
434 ;
435 ; Special note: we need to intercept any attempt to launch a guest into a non-existent address space.
436 ; We will just go emulate an ISI if there is not one.
437 ;
438
439 beq-- cr1,swvmEmulateISI ; We are trying to launch into an undefined address space. This is not so good...
440
441 ;
442 ; Here is where we actually swap into the VM (alternate) context.
443 ; We will bulk do a wholesale swap of the registers in the context area (the VMs)
444 ; with the ones in the savearea (our main code). During the copy, we will fix up the
445 ; MSR, forcing on a few bits and turning off a few others. Then we will deal with the
446 ; PMAP and other per_proc stuff. Finally, we will exit back through the main exception
447 ; handler to deal with unstacking saveareas and ASTs, etc.
448 ;
449
450 swvmDoSwitch:
451
452 ;
453 ; First, we save the volatile registers we care about. Remember, all register
454 ; handling here is pretty funky anyway, so we just pick the ones that are ok.
455 ;
456 mr r26,r3 ; Save the activation pointer
457
458 la r11,vmmFacCtx(r2) ; Point to the virtual machine facility context
459 mr r27,r2 ; Save the context entry
460 stw r11,deferctx(r3) ; Start using the virtual machine facility context when we exit
461
462 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
463 mr r3,r31 ; Get the pointer to the PMAP
464 oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now
465 bl EXT(hw_set_user_space_dis) ; Swap the address spaces
466 lwz r17,vmmFlags(r27) ; Get the status flags
467 lwz r20,vmmContextKern(r27) ; Get the state page kernel addr
468 lwz r21,vmmCntrl(r20) ; Get vmmCntrl
469 rlwinm. r22,r21,0,vmmFamEnab,vmmFamEnab ; Is vmmFamEnab set?
470 lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
471 stw r22,VMMXAFlgs(r10) ; Store vmmXAFlgs in per_proc VMMXAFlgs
472 beq swvmNoFam ; No Fam intercept
473 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
474 rlwimi r15,r21,32+vmmFamSetb-FamVMmodebit,FamVMmodebit,FamVMmodebit ; Set FamVMmode bit
475 rlwinm r21,r21,0,vmmFamSetb+1,vmmFamSetb-1 ; Clear FamSet bit
476 bne swvmXfamintercpt
477 lwz r22,famintercepts(r20) ; Load intercept bit field
478 b swvmfamintercptres
479 swvmXfamintercpt:
480 lwz r22,faminterceptsX(r20) ; Load intercept bit field
481 swvmfamintercptres:
482 stw r21,vmmCntrl(r20) ; Update vmmCntrl
483 lwz r19,vmmContextPhys(r27) ; Get vmmFAMarea address
484 stw r22,vmmFAMintercept(r27) ; Get vmmFAMintercept
485 stw r22,FAMintercept(r10) ; Store vmmFAMintercept in per_proc FAMintercept
486 stw r19,VMMareaPhys(r10) ; Store VMMareaPhys
487 oris r15,r15,hi16(FamVMena) ; Set FamVMenabit
488 swvmNoFam:
489 stw r27,vmmCEntry(r26) ; Remember what context we are running
490 bf++ vmmMapDone,swvmNoMap ; We have not mapped anything or it was not for this address space
491
492 ;
493 ; This little bit of hoopala here (triggered by vmmMapDone) is
494 ; a performance enhancement. This will change the returning savearea
495 ; to look like we had a DSI rather than a system call. Then, setting
496 ; the redrive bit, the exception handler will redrive the exception as
497 ; a DSI, entering the last mapped address into the hash table. This keeps
498 ; double faults from happening. Note that there is only a gain if the VM
499 ; takes a fault, then the emulator resolves it only, and then begins
500 ; the VM execution again. It seems like this should be the normal case.
501 ;
502 ; Note that we need to revisit this when we move the virtual machines to the task because
503 ; then it will be possible for more than one thread to access this stuff at the same time.
504 ;
505
506 lwz r3,SAVflags(r30) ; Pick up the savearea flags
507 lwz r2,vmmLastMap(r28) ; Get the last mapped address
508 lwz r14,vmmLastMap+4(r28) ; Get the last mapped address low half
509 li r20,T_DATA_ACCESS ; Change to DSI fault
510 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
511 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
512 stw r14,savedar+4(r30) ; Set the DAR to the last thing we mapped
513 stw r3,SAVflags(r30) ; Turn on the redrive request
514 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
515 li r0,0 ; Clear
516 stw r20,saveexception(r30) ; Say we need to emulate a DSI
517 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
518 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
519
520 swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area
521 rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits
522 lwz r20,vmmCntrl(r20) ; Get the control flags
523 rlwimi r17,r11,8,24,31 ; Save the old spf flags
524 rlwimi r15,r20,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
525 stw r15,spcFlags(r10) ; Set per_proc copy of the special flags
526 stw r15,ACT_MACT_SPF(r26) ; Get the special flags
527
528 stw r17,vmmFlags(r27) ; Set the status flags
529
530 bl swapCtxt ; First, swap the general register state
531
532 lwz r17,vmmContextKern(r27) ; Get the comm area back
533 la r25,vmmFacCtx(r27) ; Point to the facility context
534 lwz r15,vmmCntrl(r17) ; Get the control flags again
535 mfsprg r29,1 ; Get the current activation
536 lwz r29,ACT_PER_PROC(r29) ; Get the per_proc block
537
538 ;
539 ; Check if there is new floating point context to load
540 ;
541
542 rlwinm. r0,r15,0,vmmFloatLoadb,vmmFloatLoadb ; Are there new floating point values?
543 lhz r29,PP_CPU_NUMBER(r29) ; Get our cpu number
544 li r14,vmmppcFPRs ; Get displacement to the new values
545 andc r15,r15,r0 ; Clear the bit
546 beq+ swvmNoNewFloats ; Nope, good...
547
548 lwz r19,FPUcpu(r25) ; Get the last CPU we ran on
549
550 stw r29,FPUcpu(r25) ; Claim the context for ourselves
551
552 eieio ; Make sure this stays in order
553
554 lis r18,hi16(EXT(PerProcTable)) ; Set base PerProcTable
555 mulli r19,r19,ppeSize ; Find offset to the owner per_proc_entry
556 ori r18,r18,lo16(EXT(PerProcTable)) ; Set base PerProcTable
557 li r16,FPUowner ; Displacement to float owner
558 add r19,r18,r19 ; Point to the owner per_proc_entry
559 lwz r19,ppe_vaddr(r19) ; Point to the owner per_proc
560
561 swvminvfpu: lwarx r18,r16,r19 ; Get the owner
562
563 sub r0,r18,r25 ; Subtract one from the other
564 sub r3,r25,r18 ; Subtract the other from the one
565 or r3,r3,r0 ; Combine them
566 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
567 and r18,r18,r3 ; Make 0 if same, unchanged if not
568 stwcx. r18,r16,r19 ; Try to invalidate it
569 bne-- swvminvfpu ; Try again if there was a collision...
570
571 lwz r3,FPUsave(r25) ; Get the FPU savearea
572 dcbt r14,r17 ; Touch in first line of new stuff
573 mr. r3,r3 ; Is there one?
574 bne+ swvmGotFloat ; Yes...
575
576 bl EXT(save_get) ; Get a savearea
577
578 li r7,SAVfloat ; Get floating point flag
579 stw r26,SAVact(r3) ; Save our activation
580 li r0,0 ; Get a zero
581 stb r7,SAVflags+2(r3) ; Set that this is floating point
582 stw r0,SAVprev+4(r3) ; Clear the back chain
583 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
584
585 stw r3,FPUsave(r25) ; Chain us to context
586
587 swvmGotFloat:
588 la r4,savefp0(r3) ; Point to the destination
589 mr r21,r3 ; Save the save area
590 la r3,vmmppcFPRs(r17) ; Point to the source
591 li r5,32*8 ; Get the size (32 FPRs at 8 bytes each)
592
593 bl EXT(bcopy) ; Copy the new values
594
595 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
596 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad
597 rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here
598 lwz r14,vmmStat(r17) ; Get the status flags
599 mfsprg r10,1 ; Get the current activation
600 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
601 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
602 rlwinm r14,r14,0,vmmFloatCngdb+1,vmmFloatCngdb-1 ; Clear the changed flag
603 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
604 stw r14,vmmStat(r17) ; Set the status flags sans vmmFloatCngd
605
606 ;
607 ; Check if there is new vector context to load
608 ;
609
610 swvmNoNewFloats:
611 rlwinm. r0,r15,0,vmmVectLoadb,vmmVectLoadb ; Are there new vector values?
612 li r14,vmmppcVRs ; Get displacement to the new values
613 andc r15,r15,r0 ; Clear the bit
614 beq+ swvmNoNewVects ; Nope, good...
615
616 lwz r19,VMXcpu(r25) ; Get the last CPU we ran on
617
618 stw r29,VMXcpu(r25) ; Claim the context for ourselves
619
620 eieio ; Make sure this stays in order
621
622 lis r18,hi16(EXT(PerProcTable)) ; Set base PerProcTable
623 mulli r19,r19,ppeSize ; Find offset to the owner per_proc_entry
624 ori r18,r18,lo16(EXT(PerProcTable)) ; Set base PerProcTable
625 li r16,VMXowner ; Displacement to vector owner
626 add r19,r18,r19 ; Point to the owner per_proc_entry
627 lwz r19,ppe_vaddr(r19) ; Point to the owner per_proc
628
629 swvminvvec: lwarx r18,r16,r19 ; Get the owner
630
631 sub r0,r18,r25 ; Subtract one from the other
632 sub r3,r25,r18 ; Subtract the other from the one
633 or r3,r3,r0 ; Combine them
634 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
635 and r18,r18,r3 ; Make 0 if same, unchanged if not
636 stwcx. r18,r16,r19 ; Try to invalidate it
637 bne-- swvminvfpu ; Try again if there was a collision...
638
639 swvminvved: lwz r3,VMXsave(r25) ; Get the vector savearea
640 dcbt r14,r17 ; Touch in first line of new stuff
641 mr. r3,r3 ; Is there one?
642 bne+ swvmGotVect ; Yes...
643
644 bl EXT(save_get) ; Get a savearea
645
646 li r7,SAVvector ; Get the vector type flag
647 stw r26,SAVact(r3) ; Save our activation
648 li r0,0 ; Get a zero
649 stb r7,SAVflags+2(r3) ; Set that this is vector
650 stw r0,SAVprev+4(r3) ; Clear the back chain
651 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
652
653 stw r3,VMXsave(r25) ; Chain us to context
654
655 swvmGotVect:
656 mr r21,r3 ; Save the pointer to the savearea
657 la r4,savevr0(r3) ; Point to the destination
658 la r3,vmmppcVRs(r17) ; Point to the source
659 li r5,32*16 ; Get the size (32 vectors at 16 bytes each)
660
661 bl EXT(bcopy) ; Copy the new values
662
663 lwz r8,savevrsave(r30) ; Get the current VRSave
664
665 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
666 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad
667 rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here
668 stw r8,savevrvalid(r21) ; Set the current VRSave as valid saved
669 lwz r14,vmmStat(r17) ; Get the status flags
670 mfsprg r10,1 ; Get the current activation
671 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
672 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
673 rlwinm r14,r14,0,vmmVectCngdb+1,vmmVectCngdb-1 ; Clear the changed flag
674 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
675 stw r14,vmmStat(r17) ; Set the status flags sans vmmVectCngd
676
677 swvmNoNewVects:
678 li r3,1 ; Show normal exit with check for AST
679 mr r16,r26 ; Restore the thread pointer
680 b EXT(ppcscret) ; Go back to handler...
681
682 .align 5
683
684 swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return
685 li r0,0 ; Clear
686 li r3,1 ; Set normal return with check for AST
687 stw r0,saver3(r30) ; Clear upper half
688 stw r2,saver3+4(r30) ; Pass back the return code
689 b EXT(ppcscret) ; Go back to handler...
690
691 swvmmBogAdsp:
692 li r2,kVmmInvalidAdSpace ; Set bogus address space return
693 li r0,0 ; Clear
694 li r3,1 ; Set normal return with check for AST
695 stw r0,saver3(r30) ; Clear upper half
696 stw r2,saver3+4(r30) ; Pass back the return code
697 b EXT(ppcscret) ; Go back to handler...
698
699 swvmSetStop:
700 li r2,kVmmStopped ; Set stopped return
701 li r0,0 ; Clear
702 li r3,1 ; Set normal return with check for AST
703 stw r0,saver3(r30) ; Clear upper half
704 stw r2,saver3+4(r30) ; Pass back the return code
705 stw r2,return_code(r5) ; Save the exit code
706 b EXT(ppcscret) ; Go back to handler...
707
708 svvmTimerPop:
709 li r2,kVmmReturnNull ; Set null return
710 li r0,0 ; Clear
711 li r3,1 ; Set normal return with check for AST
712 stw r0,saver3(r30) ; Clear upper half
713 stw r2,saver3+4(r30) ; Pass back the return code
714 stw r2,return_code(r5) ; Save the exit code
715 b EXT(ppcscret) ; Go back to handler...
716
717 swvmEmulateISI:
718 mfsprg r10,2 ; Get feature flags
719 lwz r11,vmmXAFlgs(r28) ; Get the eXtended Architecture flags
720 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
721 rlwinm. r11,r11,0,0,0 ; Are we doing a 64-bit virtual machine?
722 li r2,kVmmReturnInstrPageFault ; Set ISI
723 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
724 li r0,0 ; Clear
725 li r3,1 ; Set normal return with check for AST
726 stw r0,saver3(r30) ; Clear upper half
727 stw r2,saver3+4(r30) ; Pass back the return code
728 stw r2,return_code(r5) ; Save the exit code
729 lis r7,hi16(MASK(DSISR_HASH)) ; Pretend like we got a PTE miss
730 bt vmmDoing64,vmISI64 ; Go do this for a 64-bit VM...
731
732 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
733 stw r10,return_params+0(r5) ; Save PC as first return parm
734 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
735 b EXT(ppcscret) ; Go back to handler...
736
737 vmISI64: ld r10,vmmppcXpc(r5) ; Get the PC as failing address
738 std r10,return_paramsX+0(r5) ; Save PC as first return parm
739 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
740 b EXT(ppcscret) ; Go back to handler...
741
742 ;
743 ; These syscalls are invalid, FAM syscall fast path
744 ;
745
746 .align 5
747 .globl EXT(vmm_fam_reserved)
748
749 LEXT(vmm_fam_reserved)
750 li r3,0 ; Force exception
751 b EXT(ppcscret) ; Go back to handler...
752
753 ;
754 ; Here is where we exit from vmm mode. We do this on any kind of exception.
755 ; Interruptions (decrementer, external, etc.) are another story though.
756 ; These we just pass through. We also switch back explicity when requested.
757 ; This will happen in response to a timer pop and some kinds of ASTs.
758 ;
759 ; Inputs:
760 ; R3 = activation
761 ; R4 = savearea
762 ;
763
764 .align 5
765 .globl EXT(vmm_exit)
766
767 LEXT(vmm_exit)
768
769 vmmexitcall:
770 lwz r2,vmmCEntry(r3) ; Get the context that is active
771 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
772 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
773 lwz r19,vmmFlags(r2) ; Get the status flags
774 mr r16,r3 ; R16 is safe to use for the activation address
775
776 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
777 li r0,0 ; Get a zero
778 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
779 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
780 rlwinm r11,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
781 stw r0,vmmCEntry(r16) ; Clear pointer to active context
782 stw r19,vmmFlags(r2) ; Set the status flags
783 rlwinm r11,r11,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
784 mfsprg r10,1 ; Get the current activation
785 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
786 rlwinm r11,r11,0,FamVMenabit+1,FamVMenabit-1 ; Clear FamVMEnable
787 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
788 lwz r5,vmmContextKern(r2) ; Get the state page kernel addr
789 rlwinm r11,r11,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMMode
790 lwz r6,vmmCntrl(r5) ; Get the control field
791 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
792 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
793 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
794 stw r11,ACT_MACT_SPF(r16) ; Get the special flags
795 stw r6,vmmCntrl(r5) ; Store the control field
796 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
797
798 mr r26,r16 ; Save the activation pointer
799 mr r27,r2 ; Save the context entry
800
801 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
802
803 la r5,facctx(r16) ; Point to the main facility context
804 mr r2,r27 ; Restore
805 stw r5,deferctx(r16) ; Start using the main facility context on the way out
806 lwz r5,vmmContextKern(r27) ; Get the context area address
807 mr r3,r16 ; Restore activation address
808 stw r19,vmmStat(r5) ; Save the changed and popped flags
809 bl swapCtxt ; Exchange the VM context for the emulator one
810 stw r8,saver3+4(r30) ; Set the return code as the return value also
811 b EXT(retFromVM) ; Go back to handler...
812
813
814 ;
815 ; Here is where we force exit from vmm mode. We do this when as
816 ; part of termination and is used to insure that we are not executing
817 ; in an alternate context. Because this is called from C we need to save
818 ; all non-volatile registers.
819 ;
820 ; Inputs:
821 ; R3 = activation
822 ; R4 = user savearea
823 ; Interruptions disabled
824 ;
825
826 .align 5
827 .globl EXT(vmm_force_exit)
828
829 LEXT(vmm_force_exit)
830
831 stwu r1,-(FM_ALIGN(20*4)+FM_SIZE)(r1) ; Get enough space for the registers
832 mflr r0 ; Save the return
833 stmw r13,FM_ARG0(r1) ; Save all non-volatile registers
834 stw r0,(FM_ALIGN(20*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
835
836 lwz r2,vmmCEntry(r3) ; Get the context that is active
837 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
838 lwz r19,vmmFlags(r2) ; Get the status flags
839 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
840
841 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
842 mr r26,r3 ; Save the activation pointer
843 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
844 li r0,0 ; Get a zero
845 rlwinm r9,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
846 cmplw r9,r11 ; Check if we were in a vm
847 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
848 beq- vfeNotRun ; We were not in a vm....
849 rlwinm r9,r9,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
850 stw r0,vmmCEntry(r26) ; Clear pointer to active context
851 mfsprg r10,1 ; Get the current activation
852 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
853 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
854 rlwinm r9,r9,0,FamVMenabit+1,FamVMenabit-1 ; Clear Fam Enable
855 rlwinm r9,r9,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear Fam Enable
856 lwz r5,vmmContextKern(r2) ; Get the context area address
857 lwz r6,vmmCntrl(r5) ; Get the control field
858 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
859 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
860 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
861 stw r6,vmmCntrl(r5) ; Store the control field
862 stw r9,ACT_MACT_SPF(r26) ; Get the special flags
863 stw r9,spcFlags(r10) ; Set per_proc copy of the special flags
864
865 mr r27,r2 ; Save the context entry
866 mr r30,r4 ; Save the savearea
867
868 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
869
870 la r7,facctx(r26) ; Point to the main facility context
871
872 lwz r5,vmmContextKern(r27) ; Get the context area address
873 stw r19,vmmStat(r5) ; Save the changed and popped flags
874 stw r7,deferctx(r26) ; Tell context launcher to switch facility context
875
876 bl swapCtxt ; Exchange the VM context for the emulator one
877
878 lwz r8,saveexception(r30) ; Pick up the exception code
879 lwz r7,SAVflags(r30) ; Pick up the savearea flags
880 lis r9,hi16(SAVredrive) ; Get exception redrive bit
881 rlwinm r8,r8,30,24,31 ; Convert exception to return code
882 andc r7,r7,r9 ; Make sure redrive is off because we are intercepting
883 stw r8,saver3+4(r30) ; Set the return code as the return value also
884 stw r7,SAVflags(r30) ; Set the savearea flags
885
886
887 vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers
888 lwz r1,0(r1) ; Pop the stack
889 lwz r0,FM_LR_SAVE(r1) ; Get the return address
890 mtlr r0 ; Set return
891 blr
892
893 ;
894 ; Note: we will not do any DCBTs to the savearea. It was just stored to a few cycles ago and should
895 ; still be in the cache.
896 ;
897 ; NOTE NOTE: R16 is important to save!!!!
898 ;
899 .align 5
900
901 swapCtxt:
902 mfsprg r10,2 ; Get feature flags
903 la r6,vmmppcpc(r5) ; Point to the first line
904 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
905
906 lwz r14,saveexception(r30) ; Get the exception code
907 dcbt 0,r6 ; Touch in the first line of the context area
908 bt++ pf64Bitb,swap64 ; Go do this swap on a 64-bit machine...
909
910 lwz r7,savesrr0+4(r30) ; Start moving context
911 lwz r8,savesrr1+4(r30)
912 lwz r9,saver0+4(r30)
913 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
914 lwz r10,saver1+4(r30)
915 lwz r11,saver2+4(r30)
916 lwz r12,saver3+4(r30)
917 lwz r13,saver4+4(r30)
918 la r6,vmmppcr6(r5) ; Point to second line
919 lwz r14,saver5+4(r30)
920
921 dcbt 0,r6 ; Touch second line of context area
922
923 lwz r15,vmmppcpc(r5) ; First line of context
924 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
925 lwz r23,vmmppcmsr(r5)
926 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
927 lwz r17,vmmppcr0(r5)
928 lwz r18,vmmppcr1(r5)
929 and r23,r23,r22 ; Keep only the controllable bits
930 lwz r19,vmmppcr2(r5)
931 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
932 lwz r20,vmmppcr3(r5)
933 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
934 lwz r21,vmmppcr4(r5)
935 lwz r22,vmmppcr5(r5)
936
937 dcbt 0,r6 ; Touch third line of context area
938
939 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
940 stw r8,vmmppcmsr(r5)
941 stw r9,vmmppcr0(r5)
942 stw r10,vmmppcr1(r5)
943 stw r11,vmmppcr2(r5)
944 stw r12,vmmppcr3(r5)
945 stw r13,vmmppcr4(r5)
946 stw r14,vmmppcr5(r5)
947
948 ;
949 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
950 ;
951 bne+ cr1,swapnotsc ; Skip next if not an SC exception...
952 stw r12,return_params+0(r5) ; Save the first return
953 stw r13,return_params+4(r5) ; Save the second return
954 stw r14,return_params+8(r5) ; Save the third return
955
956 swapnotsc: li r6,0 ; Clear this out
957 stw r6,savesrr0(r30) ; Insure that high order is clear
958 stw r15,savesrr0+4(r30) ; Save vm context into the savearea
959 stw r6,savesrr1(r30) ; Insure that high order is clear
960 stw r23,savesrr1+4(r30)
961 stw r17,saver0+4(r30)
962 stw r18,saver1+4(r30)
963 stw r19,saver2+4(r30)
964 stw r20,saver3+4(r30)
965 stw r21,saver4+4(r30)
966 la r6,vmmppcr14(r5) ; Point to fourth line
967 stw r22,saver5+4(r30)
968
969 dcbt 0,r6 ; Touch fourth line
970
971 ; Swap 8 registers
972
973 lwz r7,saver6+4(r30) ; Read savearea
974 lwz r8,saver7+4(r30)
975 lwz r9,saver8+4(r30)
976 lwz r10,saver9+4(r30)
977 lwz r11,saver10+4(r30)
978 lwz r12,saver11+4(r30)
979 lwz r13,saver12+4(r30)
980 lwz r14,saver13+4(r30)
981
982 lwz r15,vmmppcr6(r5) ; Read vm context
983 lwz r24,vmmppcr7(r5)
984 lwz r17,vmmppcr8(r5)
985 lwz r18,vmmppcr9(r5)
986 lwz r19,vmmppcr10(r5)
987 lwz r20,vmmppcr11(r5)
988 lwz r21,vmmppcr12(r5)
989 lwz r22,vmmppcr13(r5)
990
991 stw r7,vmmppcr6(r5) ; Write context
992 stw r8,vmmppcr7(r5)
993 stw r9,vmmppcr8(r5)
994 stw r10,vmmppcr9(r5)
995 stw r11,vmmppcr10(r5)
996 stw r12,vmmppcr11(r5)
997 stw r13,vmmppcr12(r5)
998 la r6,vmmppcr22(r5) ; Point to fifth line
999 stw r14,vmmppcr13(r5)
1000
1001 dcbt 0,r6 ; Touch fifth line
1002
1003 stw r15,saver6+4(r30) ; Write vm context
1004 stw r24,saver7+4(r30)
1005 stw r17,saver8+4(r30)
1006 stw r18,saver9+4(r30)
1007 stw r19,saver10+4(r30)
1008 stw r20,saver11+4(r30)
1009 stw r21,saver12+4(r30)
1010 stw r22,saver13+4(r30)
1011
1012 ; Swap 8 registers
1013
1014 lwz r7,saver14+4(r30) ; Read savearea
1015 lwz r8,saver15+4(r30)
1016 lwz r9,saver16+4(r30)
1017 lwz r10,saver17+4(r30)
1018 lwz r11,saver18+4(r30)
1019 lwz r12,saver19+4(r30)
1020 lwz r13,saver20+4(r30)
1021 lwz r14,saver21+4(r30)
1022
1023 lwz r15,vmmppcr14(r5) ; Read vm context
1024 lwz r24,vmmppcr15(r5)
1025 lwz r17,vmmppcr16(r5)
1026 lwz r18,vmmppcr17(r5)
1027 lwz r19,vmmppcr18(r5)
1028 lwz r20,vmmppcr19(r5)
1029 lwz r21,vmmppcr20(r5)
1030 lwz r22,vmmppcr21(r5)
1031
1032 stw r7,vmmppcr14(r5) ; Write context
1033 stw r8,vmmppcr15(r5)
1034 stw r9,vmmppcr16(r5)
1035 stw r10,vmmppcr17(r5)
1036 stw r11,vmmppcr18(r5)
1037 stw r12,vmmppcr19(r5)
1038 stw r13,vmmppcr20(r5)
1039 la r6,vmmppcr30(r5) ; Point to sixth line
1040 stw r14,vmmppcr21(r5)
1041
1042 dcbt 0,r6 ; Touch sixth line
1043
1044 stw r15,saver14+4(r30) ; Write vm context
1045 stw r24,saver15+4(r30)
1046 stw r17,saver16+4(r30)
1047 stw r18,saver17+4(r30)
1048 stw r19,saver18+4(r30)
1049 stw r20,saver19+4(r30)
1050 stw r21,saver20+4(r30)
1051 stw r22,saver21+4(r30)
1052
1053 ; Swap 8 registers
1054
1055 lwz r7,saver22+4(r30) ; Read savearea
1056 lwz r8,saver23+4(r30)
1057 lwz r9,saver24+4(r30)
1058 lwz r10,saver25+4(r30)
1059 lwz r11,saver26+4(r30)
1060 lwz r12,saver27+4(r30)
1061 lwz r13,saver28+4(r30)
1062 lwz r14,saver29+4(r30)
1063
1064 lwz r15,vmmppcr22(r5) ; Read vm context
1065 lwz r24,vmmppcr23(r5)
1066 lwz r17,vmmppcr24(r5)
1067 lwz r18,vmmppcr25(r5)
1068 lwz r19,vmmppcr26(r5)
1069 lwz r20,vmmppcr27(r5)
1070 lwz r21,vmmppcr28(r5)
1071 lwz r22,vmmppcr29(r5)
1072
1073 stw r7,vmmppcr22(r5) ; Write context
1074 stw r8,vmmppcr23(r5)
1075 stw r9,vmmppcr24(r5)
1076 stw r10,vmmppcr25(r5)
1077 stw r11,vmmppcr26(r5)
1078 stw r12,vmmppcr27(r5)
1079 stw r13,vmmppcr28(r5)
1080 la r6,vmmppcvscr(r5) ; Point to seventh line
1081 stw r14,vmmppcr29(r5)
1082
1083 dcbt 0,r6 ; Touch seventh line
1084
1085 stw r15,saver22+4(r30) ; Write vm context
1086 stw r24,saver23+4(r30)
1087 stw r17,saver24+4(r30)
1088 stw r18,saver25+4(r30)
1089 stw r19,saver26+4(r30)
1090 stw r20,saver27+4(r30)
1091 stw r21,saver28+4(r30)
1092 stw r22,saver29+4(r30)
1093
1094 ; Swap 8 registers
1095
1096 lwz r7,saver30+4(r30) ; Read savearea
1097 lwz r8,saver31+4(r30)
1098 lwz r9,savecr(r30)
1099 lwz r10,savexer+4(r30)
1100 lwz r11,savelr+4(r30)
1101 lwz r12,savectr+4(r30)
1102 lwz r14,savevrsave(r30)
1103
1104 lwz r15,vmmppcr30(r5) ; Read vm context
1105 lwz r24,vmmppcr31(r5)
1106 lwz r17,vmmppccr(r5)
1107 lwz r18,vmmppcxer(r5)
1108 lwz r19,vmmppclr(r5)
1109 lwz r20,vmmppcctr(r5)
1110 lwz r22,vmmppcvrsave(r5)
1111
1112 stw r7,vmmppcr30(r5) ; Write context
1113 stw r8,vmmppcr31(r5)
1114 stw r9,vmmppccr(r5)
1115 stw r10,vmmppcxer(r5)
1116 stw r11,vmmppclr(r5)
1117 stw r12,vmmppcctr(r5)
1118 stw r14,vmmppcvrsave(r5)
1119
1120 stw r15,saver30+4(r30) ; Write vm context
1121 stw r24,saver31+4(r30)
1122 stw r17,savecr(r30)
1123 stw r18,savexer+4(r30)
1124 stw r19,savelr+4(r30)
1125 stw r20,savectr+4(r30)
1126 stw r22,savevrsave(r30)
1127
1128 ; Swap 8 registers
1129
1130 lwz r7,savevscr+0(r30) ; Read savearea
1131 lwz r8,savevscr+4(r30)
1132 lwz r9,savevscr+8(r30)
1133 lwz r10,savevscr+12(r30)
1134 lwz r11,savefpscrpad(r30)
1135 lwz r12,savefpscr(r30)
1136
1137 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1138 lwz r24,vmmppcvscr+4(r5)
1139 lwz r17,vmmppcvscr+8(r5)
1140 lwz r18,vmmppcvscr+12(r5)
1141 lwz r19,vmmppcfpscrpad(r5)
1142 lwz r20,vmmppcfpscr(r5)
1143
1144 stw r7,vmmppcvscr+0(r5) ; Write context
1145 stw r8,vmmppcvscr+4(r5)
1146 stw r9,vmmppcvscr+8(r5)
1147 stw r10,vmmppcvscr+12(r5)
1148 stw r11,vmmppcfpscrpad(r5)
1149 stw r12,vmmppcfpscr(r5)
1150
1151 stw r15,savevscr+0(r30) ; Write vm context
1152 stw r24,savevscr+4(r30)
1153 stw r17,savevscr+8(r30)
1154 stw r18,savevscr+12(r30)
1155 stw r19,savefpscrpad(r30)
1156 stw r20,savefpscr(r30)
1157
1158
1159 ;
1160 ; Cobble up the exception return code and save any specific return values
1161 ;
1162
1163 lwz r7,saveexception(r30) ; Pick up the exception code
1164 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1165 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1166 stw r8,return_code(r5) ; Save the exit code
1167 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1168 beq+ swapDSI ; Yeah...
1169 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1170 beq+ cr1,swapISI ; We had an ISI...
1171 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1172 beq+ swapDSI ; An alignment exception looks like a DSI...
1173 beq+ cr1,swapSC ; We had a system call...
1174
1175 blr ; Return...
1176
1177 ;
1178 ; Set exit returns for a DSI or alignment exception
1179 ;
1180
1181 swapDSI: lwz r10,savedar+4(r30) ; Get the DAR
1182 lwz r7,savedsisr(r30) ; and the DSISR
1183 stw r10,return_params+0(r5) ; Save DAR as first return parm
1184 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1185 blr ; Return...
1186
1187 ;
1188 ; Set exit returns for a ISI
1189 ;
1190
1191 swapISI: lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1192 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1193 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1194 stw r10,return_params+0(r5) ; Save PC as first return parm
1195 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1196 blr ; Return...
1197
1198 ;
1199 ; Set exit returns for a system call (note: we did the first 3 earlier)
1200 ; Do we really need to pass parameters back here????
1201 ;
1202
1203 swapSC: lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1204 stw r10,return_params+12(r5) ; Save it
1205 blr ; Return...
1206
1207 ;
1208 ; Here is the swap for 64-bit machines
1209 ;
1210
1211 swap64: lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
1212 ld r7,savesrr0(r30) ; Start moving context
1213 ld r8,savesrr1(r30)
1214 ld r9,saver0(r30)
1215 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
1216 ld r10,saver1(r30)
1217 ld r11,saver2(r30)
1218 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
1219 ld r12,saver3(r30)
1220 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
1221 ld r13,saver4(r30)
1222 la r6,vmmppcr6(r5) ; Point to second line
1223 ld r14,saver5(r30)
1224
1225 dcbt 0,r6 ; Touch second line of context area
1226
1227 bt vmmDoing64,sw64x1 ; Skip to 64-bit stuff
1228
1229 lwz r15,vmmppcpc(r5) ; First line of context
1230 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1231 lwz r23,vmmppcmsr(r5)
1232 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1233 lwz r17,vmmppcr0(r5)
1234 lwz r18,vmmppcr1(r5)
1235 and r23,r23,r22 ; Keep only the controllable bits
1236 lwz r19,vmmppcr2(r5)
1237 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1238 lwz r20,vmmppcr3(r5)
1239 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1240 lwz r21,vmmppcr4(r5)
1241 lwz r22,vmmppcr5(r5)
1242
1243 dcbt 0,r6 ; Touch third line of context area
1244
1245 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
1246 stw r8,vmmppcmsr(r5)
1247 stw r9,vmmppcr0(r5)
1248 stw r10,vmmppcr1(r5)
1249 stw r11,vmmppcr2(r5)
1250 stw r12,vmmppcr3(r5)
1251 stw r13,vmmppcr4(r5)
1252 stw r14,vmmppcr5(r5)
1253
1254 ;
1255 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
1256 ;
1257 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1258 stw r12,return_params+0(r5) ; Save the first return
1259 stw r13,return_params+4(r5) ; Save the second return
1260 stw r14,return_params+8(r5) ; Save the third return
1261 b sw64x1done ; We are done with this section...
1262
1263 sw64x1: ld r15,vmmppcXpc(r5) ; First line of context
1264 li r0,1 ; Get a 1 to turn on 64-bit
1265 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user (we will also allow 64-bit here)
1266 sldi r0,r0,63 ; Get 64-bit bit
1267 ld r23,vmmppcXmsr(r5)
1268 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1269 ld r17,vmmppcXr0(r5)
1270 or r22,r22,r0 ; Add the 64-bit bit
1271 ld r18,vmmppcXr1(r5)
1272 and r23,r23,r22 ; Keep only the controllable bits
1273 ld r19,vmmppcXr2(r5)
1274 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1275 ld r20,vmmppcXr3(r5)
1276 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1277 ld r21,vmmppcXr4(r5)
1278 ld r22,vmmppcXr5(r5)
1279
1280 dcbt 0,r6 ; Touch third line of context area
1281
1282 std r7,vmmppcXpc(r5) ; Save emulator context into the context area
1283 std r8,vmmppcXmsr(r5)
1284 std r9,vmmppcXr0(r5)
1285 std r10,vmmppcXr1(r5)
1286 std r11,vmmppcXr2(r5)
1287 std r12,vmmppcXr3(r5)
1288 std r13,vmmppcXr4(r5)
1289 std r14,vmmppcXr5(r5)
1290
1291 ;
1292 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
1293 ;
1294 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1295 std r12,return_paramsX+0(r5) ; Save the first return
1296 std r13,return_paramsX+8(r5) ; Save the second return
1297 std r14,return_paramsX+16(r5) ; Save the third return
1298
1299 sw64x1done:
1300 std r15,savesrr0(r30) ; Save vm context into the savearea
1301 std r23,savesrr1(r30)
1302 std r17,saver0(r30)
1303 std r18,saver1(r30)
1304 std r19,saver2(r30)
1305 std r20,saver3(r30)
1306 std r21,saver4(r30)
1307 la r6,vmmppcr14(r5) ; Point to fourth line
1308 std r22,saver5(r30)
1309
1310 dcbt 0,r6 ; Touch fourth line
1311
1312 ; Swap 8 registers
1313
1314 ld r7,saver6(r30) ; Read savearea
1315 ld r8,saver7(r30)
1316 ld r9,saver8(r30)
1317 ld r10,saver9(r30)
1318 ld r11,saver10(r30)
1319 ld r12,saver11(r30)
1320 ld r13,saver12(r30)
1321 ld r14,saver13(r30)
1322
1323 bt vmmDoing64,sw64x2 ; Skip to 64-bit stuff
1324
1325 lwz r15,vmmppcr6(r5) ; Read vm context
1326 lwz r24,vmmppcr7(r5)
1327 lwz r17,vmmppcr8(r5)
1328 lwz r18,vmmppcr9(r5)
1329 lwz r19,vmmppcr10(r5)
1330 lwz r20,vmmppcr11(r5)
1331 lwz r21,vmmppcr12(r5)
1332 lwz r22,vmmppcr13(r5)
1333
1334 stw r7,vmmppcr6(r5) ; Write context
1335 stw r8,vmmppcr7(r5)
1336 stw r9,vmmppcr8(r5)
1337 stw r10,vmmppcr9(r5)
1338 stw r11,vmmppcr10(r5)
1339 stw r12,vmmppcr11(r5)
1340 stw r13,vmmppcr12(r5)
1341 la r6,vmmppcr22(r5) ; Point to fifth line
1342 stw r14,vmmppcr13(r5)
1343
1344 dcbt 0,r6 ; Touch fifth line
1345 b sw64x2done ; We are done with this section...
1346
1347 sw64x2: ld r15,vmmppcXr6(r5) ; Read vm context
1348 ld r24,vmmppcXr7(r5)
1349 ld r17,vmmppcXr8(r5)
1350 ld r18,vmmppcXr9(r5)
1351 ld r19,vmmppcXr10(r5)
1352 ld r20,vmmppcXr11(r5)
1353 ld r21,vmmppcXr12(r5)
1354 ld r22,vmmppcXr13(r5)
1355
1356 std r7,vmmppcXr6(r5) ; Write context
1357 std r8,vmmppcXr7(r5)
1358 std r9,vmmppcXr8(r5)
1359 std r10,vmmppcXr9(r5)
1360 std r11,vmmppcXr10(r5)
1361 std r12,vmmppcXr11(r5)
1362 std r13,vmmppcXr12(r5)
1363 la r6,vmmppcXr22(r5) ; Point to fifth line
1364 std r14,vmmppcXr13(r5)
1365
1366 dcbt 0,r6 ; Touch fifth line
1367
1368 sw64x2done: std r15,saver6(r30) ; Write vm context
1369 std r24,saver7(r30)
1370 std r17,saver8(r30)
1371 std r18,saver9(r30)
1372 std r19,saver10(r30)
1373 std r20,saver11(r30)
1374 std r21,saver12(r30)
1375 std r22,saver13(r30)
1376
1377 ; Swap 8 registers
1378
1379 ld r7,saver14(r30) ; Read savearea
1380 ld r8,saver15(r30)
1381 ld r9,saver16(r30)
1382 ld r10,saver17(r30)
1383 ld r11,saver18(r30)
1384 ld r12,saver19(r30)
1385 ld r13,saver20(r30)
1386 ld r14,saver21(r30)
1387
1388 bt vmmDoing64,sw64x3 ; Skip to 64-bit stuff
1389
1390 lwz r15,vmmppcr14(r5) ; Read vm context
1391 lwz r24,vmmppcr15(r5)
1392 lwz r17,vmmppcr16(r5)
1393 lwz r18,vmmppcr17(r5)
1394 lwz r19,vmmppcr18(r5)
1395 lwz r20,vmmppcr19(r5)
1396 lwz r21,vmmppcr20(r5)
1397 lwz r22,vmmppcr21(r5)
1398
1399 stw r7,vmmppcr14(r5) ; Write context
1400 stw r8,vmmppcr15(r5)
1401 stw r9,vmmppcr16(r5)
1402 stw r10,vmmppcr17(r5)
1403 stw r11,vmmppcr18(r5)
1404 stw r12,vmmppcr19(r5)
1405 stw r13,vmmppcr20(r5)
1406 la r6,vmmppcr30(r5) ; Point to sixth line
1407 stw r14,vmmppcr21(r5)
1408
1409 dcbt 0,r6 ; Touch sixth line
1410 b sw64x3done ; Done with this section...
1411
1412 sw64x3: ld r15,vmmppcXr14(r5) ; Read vm context
1413 ld r24,vmmppcXr15(r5)
1414 ld r17,vmmppcXr16(r5)
1415 ld r18,vmmppcXr17(r5)
1416 ld r19,vmmppcXr18(r5)
1417 ld r20,vmmppcXr19(r5)
1418 ld r21,vmmppcXr20(r5)
1419 ld r22,vmmppcXr21(r5)
1420
1421 std r7,vmmppcXr14(r5) ; Write context
1422 std r8,vmmppcXr15(r5)
1423 std r9,vmmppcXr16(r5)
1424 std r10,vmmppcXr17(r5)
1425 std r11,vmmppcXr18(r5)
1426 std r12,vmmppcXr19(r5)
1427 std r13,vmmppcXr20(r5)
1428 la r6,vmmppcXr30(r5) ; Point to sixth line
1429 std r14,vmmppcXr21(r5)
1430
1431 dcbt 0,r6 ; Touch sixth line
1432
1433 sw64x3done: std r15,saver14(r30) ; Write vm context
1434 std r24,saver15(r30)
1435 std r17,saver16(r30)
1436 std r18,saver17(r30)
1437 std r19,saver18(r30)
1438 std r20,saver19(r30)
1439 std r21,saver20(r30)
1440 std r22,saver21(r30)
1441
1442 ; Swap 8 registers
1443
1444 ld r7,saver22(r30) ; Read savearea
1445 ld r8,saver23(r30)
1446 ld r9,saver24(r30)
1447 ld r10,saver25(r30)
1448 ld r11,saver26(r30)
1449 ld r12,saver27(r30)
1450 ld r13,saver28(r30)
1451 ld r14,saver29(r30)
1452
1453 bt vmmDoing64,sw64x4 ; Skip to 64-bit stuff
1454
1455 lwz r15,vmmppcr22(r5) ; Read vm context
1456 lwz r24,vmmppcr23(r5)
1457 lwz r17,vmmppcr24(r5)
1458 lwz r18,vmmppcr25(r5)
1459 lwz r19,vmmppcr26(r5)
1460 lwz r20,vmmppcr27(r5)
1461 lwz r21,vmmppcr28(r5)
1462 lwz r22,vmmppcr29(r5)
1463
1464 stw r7,vmmppcr22(r5) ; Write context
1465 stw r8,vmmppcr23(r5)
1466 stw r9,vmmppcr24(r5)
1467 stw r10,vmmppcr25(r5)
1468 stw r11,vmmppcr26(r5)
1469 stw r12,vmmppcr27(r5)
1470 stw r13,vmmppcr28(r5)
1471 la r6,vmmppcvscr(r5) ; Point to seventh line
1472 stw r14,vmmppcr29(r5)
1473 dcbt 0,r6 ; Touch seventh line
1474 b sw64x4done ; Done with this section...
1475
1476 sw64x4: ld r15,vmmppcXr22(r5) ; Read vm context
1477 ld r24,vmmppcXr23(r5)
1478 ld r17,vmmppcXr24(r5)
1479 ld r18,vmmppcXr25(r5)
1480 ld r19,vmmppcXr26(r5)
1481 ld r20,vmmppcXr27(r5)
1482 ld r21,vmmppcXr28(r5)
1483 ld r22,vmmppcXr29(r5)
1484
1485 std r7,vmmppcXr22(r5) ; Write context
1486 std r8,vmmppcXr23(r5)
1487 std r9,vmmppcXr24(r5)
1488 std r10,vmmppcXr25(r5)
1489 std r11,vmmppcXr26(r5)
1490 std r12,vmmppcXr27(r5)
1491 std r13,vmmppcXr28(r5)
1492 la r6,vmmppcvscr(r5) ; Point to seventh line
1493 std r14,vmmppcXr29(r5)
1494
1495 dcbt 0,r6 ; Touch seventh line
1496
1497 sw64x4done: std r15,saver22(r30) ; Write vm context
1498 std r24,saver23(r30)
1499 std r17,saver24(r30)
1500 std r18,saver25(r30)
1501 std r19,saver26(r30)
1502 std r20,saver27(r30)
1503 std r21,saver28(r30)
1504 std r22,saver29(r30)
1505
1506 ; Swap 8 registers
1507
1508 ld r7,saver30(r30) ; Read savearea
1509 ld r8,saver31(r30)
1510 lwz r9,savecr(r30)
1511 ld r10,savexer(r30)
1512 ld r11,savelr(r30)
1513 ld r12,savectr(r30)
1514 lwz r14,savevrsave(r30)
1515
1516 bt vmmDoing64,sw64x5 ; Skip to 64-bit stuff
1517
1518 lwz r15,vmmppcr30(r5) ; Read vm context
1519 lwz r24,vmmppcr31(r5)
1520 lwz r17,vmmppccr(r5)
1521 lwz r18,vmmppcxer(r5)
1522 lwz r19,vmmppclr(r5)
1523 lwz r20,vmmppcctr(r5)
1524 lwz r22,vmmppcvrsave(r5)
1525
1526 stw r7,vmmppcr30(r5) ; Write context
1527 stw r8,vmmppcr31(r5)
1528 stw r9,vmmppccr(r5)
1529 stw r10,vmmppcxer(r5)
1530 stw r11,vmmppclr(r5)
1531 stw r12,vmmppcctr(r5)
1532 stw r14,vmmppcvrsave(r5)
1533 b sw64x5done ; Done here...
1534
1535 sw64x5: ld r15,vmmppcXr30(r5) ; Read vm context
1536 ld r24,vmmppcXr31(r5)
1537 lwz r17,vmmppcXcr(r5)
1538 ld r18,vmmppcXxer(r5)
1539 ld r19,vmmppcXlr(r5)
1540 ld r20,vmmppcXctr(r5)
1541 lwz r22,vmmppcXvrsave(r5)
1542
1543 std r7,vmmppcXr30(r5) ; Write context
1544 std r8,vmmppcXr31(r5)
1545 stw r9,vmmppcXcr(r5)
1546 std r10,vmmppcXxer(r5)
1547 std r11,vmmppcXlr(r5)
1548 std r12,vmmppcXctr(r5)
1549 stw r14,vmmppcXvrsave(r5)
1550
1551 sw64x5done: std r15,saver30(r30) ; Write vm context
1552 std r24,saver31(r30)
1553 stw r17,savecr(r30)
1554 std r18,savexer(r30)
1555 std r19,savelr(r30)
1556 std r20,savectr(r30)
1557 stw r22,savevrsave(r30)
1558
1559 ; Swap 8 registers
1560
1561 lwz r7,savevscr+0(r30) ; Read savearea
1562 lwz r8,savevscr+4(r30)
1563 lwz r9,savevscr+8(r30)
1564 lwz r10,savevscr+12(r30)
1565 lwz r11,savefpscrpad(r30)
1566 lwz r12,savefpscr(r30)
1567
1568 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1569 lwz r24,vmmppcvscr+4(r5)
1570 lwz r17,vmmppcvscr+8(r5)
1571 lwz r18,vmmppcvscr+12(r5)
1572 lwz r19,vmmppcfpscrpad(r5)
1573 lwz r20,vmmppcfpscr(r5)
1574
1575 stw r7,vmmppcvscr+0(r5) ; Write context
1576 stw r8,vmmppcvscr+4(r5)
1577 stw r9,vmmppcvscr+8(r5)
1578 stw r10,vmmppcvscr+12(r5)
1579 stw r11,vmmppcfpscrpad(r5)
1580 stw r12,vmmppcfpscr(r5)
1581
1582 stw r15,savevscr+0(r30) ; Write vm context
1583 stw r24,savevscr+4(r30)
1584 stw r17,savevscr+8(r30)
1585 stw r18,savevscr+12(r30)
1586 stw r19,savefpscrpad(r30)
1587 stw r20,savefpscr(r30)
1588
1589
1590 ;
1591 ; Cobble up the exception return code and save any specific return values
1592 ;
1593
1594 lwz r7,saveexception(r30) ; Pick up the exception code
1595 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1596 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1597 stw r8,return_code(r5) ; Save the exit code
1598 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1599 beq+ swapDSI64 ; Yeah...
1600 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1601 beq+ cr1,swapISI64 ; We had an ISI...
1602 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1603 beq+ swapDSI64 ; An alignment exception looks like a DSI...
1604 beq+ cr1,swapSC64 ; We had a system call...
1605
1606 blr ; Return...
1607
1608 ;
1609 ; Set exit returns for a DSI or alignment exception
1610 ;
1611
1612 swapDSI64: ld r10,savedar(r30) ; Get the DAR
1613 lwz r7,savedsisr(r30) ; and the DSISR
1614 bt vmmDoing64,sw64DSI ; Skip to 64-bit stuff...
1615
1616
1617 stw r10,return_params+0(r5) ; Save DAR as first return parm
1618 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1619 blr ; Return...
1620
1621 sw64DSI: std r10,return_paramsX+0(r5) ; Save DAR as first return parm
1622 std r7,return_paramsX+8(r5) ; Save DSISR as second return parm (note that this is expanded to 64 bits)
1623 blr ; Return...
1624
1625 ;
1626 ; Set exit returns for a ISI
1627 ;
1628
1629 swapISI64: bt vmmDoing64,sw64ISI ; Skip to 64-bit stuff...
1630 lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1631 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1632 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1633 stw r10,return_params+0(r5) ; Save PC as first return parm
1634 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1635 blr ; Return...
1636
1637 sw64ISI: ld r7,vmmppcXmsr(r5) ; Get the SRR1 value
1638 ld r10,vmmppcXpc(r5) ; Get the PC as failing address
1639 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1640 std r10,return_paramsX+0(r5) ; Save PC as first return parm
1641 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
1642 blr ; Return...
1643
1644 ;
1645 ; Set exit returns for a system call (note: we did the first 3 earlier)
1646 ; Do we really need to pass parameters back here????
1647 ;
1648
1649 swapSC64: bt vmmDoing64,sw64SC ; Skip to 64-bit stuff...
1650 lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1651 stw r10,return_params+12(r5) ; Save it
1652 blr ; Return...
1653
1654 sw64SC: ld r10,vmmppcXr6(r5) ; Get the fourth paramter
1655 std r10,return_paramsX+24(r5) ; Save it
1656 blr ; Return...
1657
1658 ;
1659 ; vmmFamGuestResume:
1660 ; Restore Guest context from Fam mode.
1661 ;
1662
1663 vmmFamGuestResume:
1664 mfsprg r10,1 ; Get the current activation
1665 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
1666 lwz r27,vmmCEntry(r3) ; Get the context that is active
1667 lwz r4,VMMXAFlgs(r10) ; Get the eXtended Architecture flags
1668 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1669 lwz r15,spcFlags(r10) ; Get per_proc special flags
1670 mr r26,r3 ; Save the activation pointer
1671 lwz r20,vmmContextKern(r27) ; Get the comm area
1672 rlwinm r15,r15,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
1673 stw r15,spcFlags(r10) ; Update the special flags
1674 bne fgrX
1675 lwz r7,famguestpc(r20) ; Load famguest ctx pc
1676 bf++ vmmMapDone,fgrNoMap ; No mapping done for this space.
1677 lwz r3,SAVflags(r30) ; Pick up the savearea flags
1678 lwz r2,vmmLastMap(r28) ; Get the last mapped address
1679 lwz r6,vmmLastMap+4(r28) ; Get the last mapped address
1680 li r4,T_DATA_ACCESS ; Change to DSI fault
1681 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1682 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
1683 stw r6,savedar+4(r30) ; Set the DAR to the last thing we mapped
1684 stw r3,SAVflags(r30) ; Turn on the redrive request
1685 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1686 stw r4,saveexception(r30) ; Say we need to emulate a DSI
1687 li r0,0 ; Clear
1688 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
1689 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1690 fgrNoMap:
1691 lwz r4,savesrr1+4(r30) ; Get the saved MSR value
1692 stw r7,savesrr0+4(r30) ; Set savearea pc
1693 lwz r5,famguestmsr(r20) ; Load famguest ctx msr
1694 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1695 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1696 and r5,r5,r6 ; Keep only the controllable bits
1697 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1698 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1699 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1700 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1701 stw r5,savesrr1+4(r30) ; Set savearea srr1
1702 lwz r4,famguestr0(r20) ; Load famguest ctx r0
1703 lwz r5,famguestr1(r20) ; Load famguest ctx r1
1704 lwz r6,famguestr2(r20) ; Load famguest ctx r2
1705 lwz r7,famguestr3(r20) ; Load famguest ctx r3
1706 stw r4,saver0+4(r30) ; Set savearea r0
1707 stw r5,saver1+4(r30) ; Set savearea r1
1708 stw r6,saver2+4(r30) ; Set savearea r2
1709 stw r7,saver3+4(r30) ; Set savearea r3
1710 lwz r4,famguestr4(r20) ; Load famguest ctx r4
1711 lwz r5,famguestr5(r20) ; Load famguest ctx r5
1712 lwz r6,famguestr6(r20) ; Load famguest ctx r6
1713 lwz r7,famguestr7(r20) ; Load famguest ctx r7
1714 stw r4,saver4+4(r30) ; Set savearea r4
1715 stw r5,saver5+4(r30) ; Set savearea r5
1716 stw r6,saver6+4(r30) ; Set savearea r6
1717 stw r7,saver7+4(r30) ; Set savearea r7
1718 b fgrret
1719 fgrX:
1720 ld r7,famguestXpc(r20) ; Load famguest ctx pc
1721 bf++ vmmMapDone,fgrXNoMap ; No mapping done for this space.
1722 lwz r3,SAVflags(r30) ; Pick up the savearea flags
1723 ld r2,vmmLastMap(r28) ; Get the last mapped address
1724 li r4,T_DATA_ACCESS ; Change to DSI fault
1725 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1726 std r2,savedar(r30) ; Set the DAR to the last thing we mapped
1727 stw r3,SAVflags(r30) ; Turn on the redrive request
1728 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1729 stw r4,saveexception(r30) ; Say we need to emulate a DSI
1730 li r0,0 ; Clear
1731 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
1732 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1733 fgrXNoMap:
1734 ld r4,savesrr1(r30) ; Get the saved MSR value
1735 std r7,savesrr0(r30) ; Set savearea pc
1736 ld r5,famguestXmsr(r20) ; Load famguest ctx msr
1737 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1738 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1739 and r5,r5,r6 ; Keep only the controllable bits
1740 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1741 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1742 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1743 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1744 std r5,savesrr1(r30) ; Set savearea srr1
1745 ld r4,famguestXr0(r20) ; Load famguest ctx r0
1746 ld r5,famguestXr1(r20) ; Load famguest ctx r1
1747 ld r6,famguestXr2(r20) ; Load famguest ctx r2
1748 ld r7,famguestXr3(r20) ; Load famguest ctx r3
1749 std r4,saver0(r30) ; Set savearea r0
1750 std r5,saver1(r30) ; Set savearea r1
1751 std r6,saver2(r30) ; Set savearea r2
1752 std r7,saver3(r30) ; Set savearea r3
1753 ld r4,famguestXr4(r20) ; Load famguest ctx r4
1754 ld r5,famguestXr5(r20) ; Load famguest ctx r5
1755 ld r6,famguestXr6(r20) ; Load famguest ctx r6
1756 ld r7,famguestXr7(r20) ; Load famguest ctx r7
1757 std r4,saver4(r30) ; Set savearea r4
1758 std r5,saver5(r30) ; Set savearea r5
1759 std r6,saver6(r30) ; Set savearea r6
1760 std r7,saver7(r30) ; Set savearea r7
1761 fgrret:
1762 li r3,1 ; Show normal exit with check for AST
1763 mr r16,r26 ; Restore the thread pointer
1764 b EXT(ppcscret) ; Go back to handler...
1765
1766 ;
1767 ; FAM Intercept exception handler
1768 ;
1769
1770 .align 5
1771 .globl EXT(vmm_fam_exc)
1772
1773 LEXT(vmm_fam_exc)
1774 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
1775 lwz r1,pfAvailable(r2) ; Get the CPU features flags
1776 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1777 bne fexcX
1778 lwz r4,saver4+4(r13) ; Load savearea r4
1779 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1780 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1781 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
1782 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
1783 bt++ pf64Bitb,fexcVMareaPhys64 ; Go do this on a 64-bit machine...
1784 slwi r3,r3,12 ; Change ppnum to physical address
1785 b fexcVMareaPhysres
1786 fexcVMareaPhys64:
1787 mtxer r5 ; Restore xer
1788 lwz r5,saver5+4(r13) ; Load savearea r5
1789 lwz r6,saver6+4(r13) ; Load savearea r6
1790 sldi r3,r3,12 ; Change ppnum to physical address
1791 fexcVMareaPhysres:
1792 stw r4,famguestr4(r3) ; Save r4 in famguest ctx
1793 stw r5,famguestr5(r3) ; Save r5 in famguest ctx
1794 stw r6,famguestr6(r3) ; Save r6 in famguest ctx
1795 stw r7,famguestr7(r3) ; Save r7 in famguest ctx
1796 lwz r4,saver0+4(r13) ; Load savearea r0
1797 lwz r5,saver1+4(r13) ; Load savearea r1
1798 lwz r6,saver2+4(r13) ; Load savearea r2
1799 lwz r7,saver3+4(r13) ; Load savearea r3
1800 stw r4,famguestr0(r3) ; Save r0 in famguest ctx
1801 stw r5,famguestr1(r3) ; Save r1 in famguest ctx
1802 stw r6,famguestr2(r3) ; Save r2 in famguest ctx
1803 stw r7,famguestr3(r3) ; Save r3 in famguest ctx
1804 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1805 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1806 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1807 mfsrr0 r2 ; Get the interrupt srr0
1808 mfsrr1 r4 ; Get the interrupt srr1
1809 stw r2,famguestpc(r3) ; Save srr0 in famguest ctx
1810 stw r4,famguestmsr(r3) ; Save srr1 in famguest ctx
1811 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1812 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1813 mtsrr1 r6 ; Set srr1
1814 mr r6,r3 ; Set r6 with phys state page addr
1815 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1816 beq+ cr1,fexcPRG ; We had a program exception...
1817 bne+ fexcret
1818 ; We had an Alignment...
1819 mfdar r3 ; Load dar
1820 mfdsisr r4 ; Load dsisr
1821 stw r3,famparam+0x4(r6) ; Set famparam 1 with dar
1822 stw r4,famparam+0x8(r6) ; Set famparam 2 with dsir
1823 b fexcret ;
1824 fexcPRG:
1825 stw r4,famparam+0x4(r6) ; Set famparam 1 with srr1
1826 mr r3,r4 ; Set r3 with dsisr
1827 lwz r4,famguestr4(r6) ; Load r4 from famguest context
1828 fexcret:
1829 lwz r5,famguestr5(r6) ; Load r5 from famguest context
1830 lwz r13,famhandler(r6) ; Load user address to resume
1831 stw r2,famparam(r6) ; Set famparam 0 with srr0
1832 stw r7,famdispcode(r6) ; Save the exit code
1833 lwz r1,famrefcon(r6) ; load refcon
1834 bt++ pf64Bitb,fexcrfi64 ; Go do this on a 64-bit machine...
1835 mtcr r0 ; Restore cr
1836 mtsrr0 r13 ; Load srr0
1837 mr r0,r7 ; Set dispatch code
1838 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1839 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1840 mfsprg r13,2 ; Restore r13
1841 mfsprg r11,3 ; Restore r11
1842 rfi
1843 fexcrfi64:
1844 mtcr r0 ; Restore cr
1845 mtsrr0 r13 ; Load srr0
1846 mr r0,r7 ; Set dispatch code
1847 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1848 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1849 mfsprg r13,2 ; Restore r13
1850 mfsprg r11,3 ; Restore r11
1851 rfid
1852 fexcX:
1853 mtxer r5 ; Restore xer
1854 ld r4,saver4(r13) ; Load savearea r4
1855 ld r5,saver5(r13) ; Load savearea r5
1856 ld r6,saver6(r13) ; Load savearea r6
1857 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1858 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1859 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
1860 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
1861 sldi r3,r3,12 ; Change ppnum to physical address
1862 std r4,famguestXr4(r3) ; Save r4 in famguest ctx
1863 std r5,famguestXr5(r3) ; Save r5 in famguest ctx
1864 std r6,famguestXr6(r3) ; Save r6 in famguest ctx
1865 std r7,famguestXr7(r3) ; Save r7 in famguest ctx
1866 ld r4,saver0(r13) ; Load savearea r0
1867 ld r5,saver1(r13) ; Load savearea r1
1868 ld r6,saver2(r13) ; Load savearea r2
1869 ld r7,saver3(r13) ; Load savearea r3
1870 std r4,famguestXr0(r3) ; Save r0 in famguest ctx
1871 std r5,famguestXr1(r3) ; Save r1 in famguest ctx
1872 std r6,famguestXr2(r3) ; Save r2 in famguest ctx
1873 std r7,famguestXr3(r3) ; Save r3 in famguest ctx
1874 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1875 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1876 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1877 mfsrr0 r2 ; Get the interrupt srr0
1878 mfsrr1 r4 ; Get the interrupt srr1
1879 std r2,famguestXpc(r3) ; Save srr0 in famguest ctx
1880 std r4,famguestXmsr(r3) ; Save srr1 in famguest ctx
1881 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1882 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1883 mtsrr1 r6 ; Set srr1
1884 mr r6,r3 ; Set r6 with phys state page addr
1885 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1886 beq+ cr1,fexcXPRG ; We had a program exception...
1887 bne+ fexcXret
1888 ; We had an Alignment...
1889 mfdar r3 ; Load dar
1890 mfdsisr r4 ; Load dsisr
1891 std r3,famparamX+0x8(r6) ; Set famparam 1 with dar
1892 std r4,famparamX+0x10(r6) ; Set famparam 2 with dsir
1893 b fexcXret
1894 fexcXPRG:
1895 std r4,famparamX+0x8(r6) ; Set famparam 1 with srr1
1896 mr r3,r4 ; Set r3 with dsisr
1897 ld r4,famguestXr4(r6) ; Load r4 from famguest context
1898 fexcXret:
1899 ld r5,famguestXr5(r6) ; Load r5 from famguest context
1900 ld r13,famhandlerX(r6) ; Load user address to resume
1901 std r2,famparamX(r6) ; Set famparam 0 with srr0
1902 std r7,famdispcodeX(r6) ; Save the exit code
1903 ld r1,famrefconX(r6) ; load refcon
1904 mtcr r0 ; Restore cr
1905 mtsrr0 r13 ; Load srr0
1906 mr r0,r7 ; Set dispatch code
1907 ld r7,famguestXr7(r6) ; Load r7 from famguest context
1908 ld r6,famguestXr6(r6) ; Load r6 from famguest context
1909 mfsprg r13,2 ; Restore r13
1910 mfsprg r11,3 ; Restore r11
1911 rfid
1912
1913 ;
1914 ; FAM Intercept DSI ISI fault handler
1915 ;
1916
1917 .align 5
1918 .globl EXT(vmm_fam_pf)
1919
1920 LEXT(vmm_fam_pf)
1921 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
1922 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1923 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1924 bne fpfX
1925 lwz r4,saver0+4(r13) ; Load savearea r0
1926 lwz r5,saver1+4(r13) ; Load savearea r1
1927 lwz r6,saver2+4(r13) ; Load savearea r2
1928 lwz r7,saver3+4(r13) ; Load savearea r3
1929 bt++ pf64Bitb,fpfVMareaPhys64 ; Go do this on a 64-bit machine...
1930 slwi r3,r3,12 ; Change ppnum to physical address
1931 b fpfVMareaPhysret
1932 fpfVMareaPhys64:
1933 sldi r3,r3,12 ; Change ppnum to physical address
1934 fpfVMareaPhysret:
1935 stw r4,famguestr0(r3) ; Save r0 in famguest
1936 stw r5,famguestr1(r3) ; Save r1 in famguest
1937 stw r6,famguestr2(r3) ; Save r2 in famguest
1938 stw r7,famguestr3(r3) ; Save r3 in famguest
1939 lwz r4,saver4+4(r13) ; Load savearea r0
1940 lwz r5,saver5+4(r13) ; Load savearea r1
1941 lwz r6,saver6+4(r13) ; Load savearea r2
1942 lwz r7,saver7+4(r13) ; Load savearea r3
1943 stw r4,famguestr4(r3) ; Save r4 in famguest
1944 lwz r4,spcFlags(r2) ; Load spcFlags
1945 stw r5,famguestr5(r3) ; Save r5 in famguest
1946 lwz r5,savesrr0+4(r13) ; Get the interrupt srr0
1947 stw r6,famguestr6(r3) ; Save r6 in famguest
1948 lwz r6,savesrr1+4(r13) ; Load srr1
1949 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1950 stw r7,famguestr7(r3) ; Save r7 in famguest
1951 stw r4,spcFlags(r2) ; Update spcFlags
1952 lwz r1,famrefcon(r3) ; Load refcon
1953 lwz r2,famhandler(r3) ; Load famhandler to resume
1954 stw r5,famguestpc(r3) ; Save srr0
1955 stw r5,saver2+4(r13) ; Store srr0 in savearea r2
1956 stw r5,famparam(r3) ; Store srr0 in fam param 0
1957 stw r6,famguestmsr(r3) ; Save srr1 in famguestmsr
1958 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
1959 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1960 beq+ cr1,fpfISI ; We had an ISI...
1961 ; fpfDSI
1962 lwz r6,savedar+4(r13) ; Load dar from savearea
1963 lwz r4,savedsisr(r13) ; Load dsisr from savearea
1964 stw r6,famparam+0x4(r3) ; Store dar in fam param 1
1965 stw r6,saver3+4(r13) ; Store dar in savearea r3
1966 stw r4,famparam+0x8(r3) ; Store dsisr in fam param 2
1967 stw r4,saver4+4(r13) ; Store dsisr in savearea r4
1968 b fpfret
1969 fpfISI:
1970 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
1971 stw r6,famparam+0x4(r3) ; Store srr1 in fam param 1
1972 stw r6,saver3+4(r13) ; Store srr1 in savearea r3
1973 fpfret:
1974 stw r7,saver0+4(r13) ; Set dispatch code
1975 stw r7,famdispcode(r3) ; Set dispatch code
1976 stw r1,saver1+4(r13) ; Store refcon in savearea r1
1977 stw r2,savesrr0+4(r13) ; Store famhandler in srr0
1978 blr
1979 fpfX:
1980 ld r4,saver0(r13) ; Load savearea r0
1981 ld r5,saver1(r13) ; Load savearea r1
1982 ld r6,saver2(r13) ; Load savearea r2
1983 ld r7,saver3(r13) ; Load savearea r3
1984 sldi r3,r3,12 ; Change ppnum to physical address
1985 std r4,famguestXr0(r3) ; Save r0 in famguest
1986 std r5,famguestXr1(r3) ; Save r1 in famguest
1987 std r6,famguestXr2(r3) ; Save r2 in famguest
1988 std r7,famguestXr3(r3) ; Save r3 in famguest
1989 ld r4,saver4(r13) ; Load savearea r0
1990 ld r5,saver5(r13) ; Load savearea r1
1991 ld r6,saver6(r13) ; Load savearea r2
1992 ld r7,saver7(r13) ; Load savearea r3
1993 std r4,famguestXr4(r3) ; Save r4 in famguest
1994 lwz r4,spcFlags(r2) ; Load spcFlags
1995 std r5,famguestXr5(r3) ; Save r5 in famguest
1996 ld r5,savesrr0(r13) ; Get the interrupt srr0
1997 std r6,famguestXr6(r3) ; Save r6 in famguest
1998 ld r6,savesrr1(r13) ; Load srr1
1999 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
2000 std r7,famguestXr7(r3) ; Save r7 in famguest
2001 stw r4,spcFlags(r2) ; Update spcFlags
2002 ld r1,famrefconX(r3) ; Load refcon
2003 ld r2,famhandlerX(r3) ; Load famhandler to resume
2004 std r5,famguestXpc(r3) ; Save srr0
2005 std r5,saver2(r13) ; Store srr0 in savearea r2
2006 std r5,famparamX(r3) ; Store srr0 in fam param 0
2007 std r6,famguestXmsr(r3) ; Save srr1 in famguestmsr
2008 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
2009 rlwinm r7,r11,30,24,31 ; Convert exception to return code
2010 beq+ cr1,fpfXISI ; We had an ISI...
2011 ; fpfXDSI
2012 ld r6,savedar(r13) ; Load dar from savearea
2013 lwz r4,savedsisr(r13) ; Load dsisr from savearea
2014 std r6,famparamX+0x8(r3) ; Store dar in fam param 1
2015 std r6,saver3(r13) ; Store dar in savearea r3
2016 std r4,famparamX+0x10(r3) ; Store dsisr in fam param 2
2017 std r4,saver4(r13) ; Store dsisr in savearea r4
2018 b fpfXret
2019 fpfXISI:
2020 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
2021 std r6,famparamX+0x8(r3) ; Store srr1 in fam param 1
2022 std r6,saver3(r13) ; Store srr1 in savearea r3
2023 fpfXret:
2024 std r7,saver0(r13) ; Set dispatch code
2025 std r7,famdispcodeX(r3) ; Set dispatch code
2026 std r1,saver1(r13) ; Store refcon in savearea r1
2027 std r2,savesrr0(r13) ; Store famhandler in srr0
2028 blr
2029
2030 /*
2031 * Ultra Fast Path FAM syscalls
2032 *
2033 * The UFT FAMs are those from kvmmResumeGuest to kvmmSetGuestRegister, inclusive.
2034 * We get here directly from the syscall vector, with interrupts and translation off,
2035 * 64-bit mode on if supported, and all registers live except:
2036 *
2037 * r13 = holds caller's cr
2038 * sprg2 = holds caller's r13
2039 * sprg3 = holds caller's r11
2040 * cr2 = set on (r3==kvmmSetGuestRegister)
2041 * cr5 = set on (r3==kvmmResumeGuest)
2042 */
2043
2044 .align 5
2045 .globl EXT(vmm_ufp)
2046
2047 LEXT(vmm_ufp)
2048 mfsprg r3,0 ; Get the per_proc area
2049 mr r11,r13 ; Move saved cr to r11
2050 lwz r13,VMMXAFlgs(r3) ; Get the eXtended Architecture flags
2051 rlwinm. r13,r13,0,0,0 ; Are we doing a 64-bit virtual machine?
2052
2053 lwz r13,pfAvailable(r3) ; Get feature flags
2054 mtcrf 0x02,r13 ; Put pf64Bitb etc in cr6
2055 lwz r13,VMMareaPhys(r3) ; Load fast assist area
2056 bt++ pf64Bitb,ufpVMareaPhys64 ; Go do this on a 64-bit machine...
2057 slwi r13,r13,12 ; Change ppnum to physical address
2058 b ufpVMareaPhysret
2059 ufpVMareaPhys64:
2060 sldi r13,r13,12 ; Change ppnum to physical address
2061 ufpVMareaPhysret:
2062 bne ufpX ; go handle a 64-bit virtual machine
2063
2064 bt cr5_eq,ufpResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
2065 cmplwi cr5,r4,7 ; First argument in range? (ie, 0-7)
2066 bgt cr5,ufpVMret ; Return if not in the range
2067 slwi r4,r4,2 ; multiply index by 4
2068 la r3,famguestr0(r13) ; Load the base address
2069 bt cr2_eq,ufpSetGuestReg ; Set/get selector
2070 ; ufpGetGuestReg
2071 lwzx r3,r4,r3 ; Load the guest register
2072 b ufpVMret ; Return
2073 ufpSetGuestReg:
2074 stwx r5,r4,r3 ; Update the guest register
2075 li r3,0 ; Set return value
2076 b ufpVMret ; Return
2077 ufpResumeGuest:
2078 lwz r7,spcFlags(r3) ; Pick up the special flags
2079 mtsrr0 r4 ; Set srr0
2080 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2081 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
2082 stw r7,spcFlags(r3) ; Update the special flags
2083 mfsrr1 r6 ; Get the current MSR value
2084
2085 lwz r4,famguestmsr(r13) ; Load guest srr1
2086 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2087 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2088 and r4,r4,r1 ; Keep only the controllable bits
2089 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2090 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2091 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2092 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2093 beq ufpnokey ; Branch if not key switch
2094 mr r2,r7 ; Save r7
2095 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2096 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
2097 beq ufpnokey ; No, go to ResumeGuest_nokey
2098 mr r5,r3 ; Get the per_proc area
2099 stw r7,spcFlags(r3) ; Update the special flags
2100
2101 bt++ pf64Bitb,ufpsave64 ; Go do this on a 64-bit machine...
2102
2103 lwz r3,next_savearea+4(r5) ; Get the exception save area
2104 stw r8,saver8+4(r3) ; Save r8
2105 stw r9,saver9+4(r3) ; Save r9
2106 stw r10,saver10+4(r3) ; Save r10
2107 stw r11,saver11+4(r3) ; Save r11
2108 stw r12,saver12+4(r3) ; Save r12
2109 stw r13,saver13+4(r3) ; Save r12
2110 stw r14,saver14+4(r3) ; Save r14
2111 stw r15,saver15+4(r3) ; Save r15
2112 stw r16,saver16+4(r3) ; Save r16
2113 stw r17,saver17+4(r3) ; Save r17
2114 stw r18,saver18+4(r3) ; Save r18
2115 stw r19,saver19+4(r3) ; Save r19
2116 stw r20,saver20+4(r3) ; Save r20
2117 stw r21,saver21+4(r3) ; Save r21
2118 stw r22,saver22+4(r3) ; Save r22
2119 stw r23,saver23+4(r3) ; Save r23
2120 stw r24,saver24+4(r3) ; Save r24
2121 stw r25,saver25+4(r3) ; Save r25
2122 stw r26,saver26+4(r3) ; Save r26
2123 stw r27,saver27+4(r3) ; Save r27
2124 stw r28,saver28+4(r3) ; Save r28
2125 stw r29,saver29+4(r3) ; Save r29
2126 stw r30,saver30+4(r3) ; Save r30
2127 stw r31,saver31+4(r3) ; Save r31
2128 b ufpsaveres ; Continue
2129
2130 ufpsave64:
2131 ld r3,next_savearea(r5) ; Get the exception save area
2132 std r8,saver8(r3) ; Save r8
2133 std r9,saver9(r3) ; Save r9
2134 std r10,saver10(r3) ; Save r10
2135 std r11,saver11(r3) ; Save r11
2136 std r12,saver12(r3) ; Save r12
2137 std r13,saver13(r3) ; Save r12
2138 std r14,saver14(r3) ; Save r14
2139 std r15,saver15(r3) ; Save r15
2140 std r16,saver16(r3) ; Save r16
2141 std r17,saver17(r3) ; Save r17
2142 std r18,saver18(r3) ; Save r18
2143 std r19,saver19(r3) ; Save r19
2144 std r20,saver20(r3) ; Save r20
2145 std r21,saver21(r3) ; Save r21
2146 std r22,saver22(r3) ; Save r22
2147 std r23,saver23(r3) ; Save r23
2148 std r24,saver24(r3) ; Save r24
2149 std r25,saver25(r3) ; Save r25
2150 std r26,saver26(r3) ; Save r26
2151 std r27,saver27(r3) ; Save r27
2152 std r28,saver28(r3) ; Save r28
2153 std r29,saver29(r3) ; Save r29
2154 mfxer r2 ; Get xer
2155 std r30,saver30(r3) ; Save r30
2156 std r31,saver31(r3) ; Save r31
2157 std r2,savexer(r3) ; Save xer
2158
2159 ufpsaveres:
2160 mflr r20 ; Get lr
2161 li r2,1 ; Set to 1
2162 stw r7,spcFlags(r5) ; Update the special flags
2163 mr r13,r3 ; Set current savearea
2164 mr r21,r4 ; Save r4
2165 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2166 mr r29,r5 ; Get the per_proc area
2167 mr r3,r4 ; Set MSR value we going to
2168 bl EXT(switchSegs) ; Go handle the segment registers/STB
2169 mr r3,r13 ; Set current savearea
2170 mr r4,r21 ; Restore r4
2171 mtlr r20 ; Set lr
2172
2173 bt++ pf64Bitb,ufprestore64 ; Go do this on a 64-bit machine...
2174 lwz r8,saver8+4(r3) ; Load r8
2175 lwz r9,saver9+4(r3) ; Load r9
2176 lwz r10,saver10+4(r3) ; Load r10
2177 lwz r11,saver11+4(r3) ; Load r11
2178 lwz r12,saver12+4(r3) ; Load r12
2179 lwz r13,saver13+4(r3) ; Load r12
2180 lwz r14,saver14+4(r3) ; Load r14
2181 lwz r15,saver15+4(r3) ; Load r15
2182 lwz r16,saver16+4(r3) ; Load r16
2183 lwz r17,saver17+4(r3) ; Load r17
2184 lwz r18,saver18+4(r3) ; Load r18
2185 lwz r19,saver19+4(r3) ; Load r19
2186 lwz r20,saver20+4(r3) ; Load r20
2187 lwz r21,saver21+4(r3) ; Load r21
2188 lwz r22,saver22+4(r3) ; Load r22
2189 lwz r23,saver23+4(r3) ; Load r23
2190 lwz r24,saver24+4(r3) ; Load r24
2191 lwz r25,saver25+4(r3) ; Load r25
2192 lwz r26,saver26+4(r3) ; Load r26
2193 lwz r27,saver27+4(r3) ; Load r27
2194 lwz r28,saver28+4(r3) ; Load r28
2195 lwz r29,saver29+4(r3) ; Load r29
2196 lwz r30,saver30+4(r3) ; Load r30
2197 lwz r31,saver31+4(r3) ; Load r31
2198 b ufpnokey ; Continue
2199 ufprestore64:
2200 ld r2,savexer(r3) ; Load xer
2201 ld r8,saver8(r3) ; Load r8
2202 ld r9,saver9(r3) ; Load r9
2203 ld r10,saver10(r3) ; Load r10
2204 mtxer r2 ; Restore xer
2205 ld r11,saver11(r3) ; Load r11
2206 ld r12,saver12(r3) ; Load r12
2207 ld r13,saver13(r3) ; Load r12
2208 ld r14,saver14(r3) ; Load r14
2209 ld r15,saver15(r3) ; Load r15
2210 ld r16,saver16(r3) ; Load r16
2211 ld r17,saver17(r3) ; Load r17
2212 ld r18,saver18(r3) ; Load r18
2213 ld r19,saver19(r3) ; Load r19
2214 ld r20,saver20(r3) ; Load r20
2215 ld r21,saver21(r3) ; Load r21
2216 ld r22,saver22(r3) ; Load r22
2217 ld r23,saver23(r3) ; Load r23
2218 ld r24,saver24(r3) ; Load r24
2219 ld r25,saver25(r3) ; Load r25
2220 ld r26,saver26(r3) ; Load r26
2221 ld r27,saver27(r3) ; Load r27
2222 ld r28,saver28(r3) ; Load r28
2223 ld r29,saver29(r3) ; Load r29
2224 ld r30,saver30(r3) ; Load r30
2225 ld r31,saver31(r3) ; Load r31
2226 ufpnokey:
2227 mfsprg r3,0 ; Get the per_proc area
2228 mtsrr1 r4 ; Set srr1
2229 lwz r0,famguestr0(r13) ; Load r0
2230 lwz r1,famguestr1(r13) ; Load r1
2231 lwz r2,famguestr2(r13) ; Load r2
2232 lwz r3,famguestr3(r13) ; Load r3
2233 lwz r4,famguestr4(r13) ; Load r4
2234 lwz r5,famguestr5(r13) ; Load r5
2235 lwz r6,famguestr6(r13) ; Load r6
2236 lwz r7,famguestr7(r13) ; Load r7
2237 ufpVMret:
2238 mfsprg r13,2 ; Restore R13
2239 bt++ pf64Bitb,ufpVMrfi64 ; Go do this on a 64-bit machine...
2240 mtcrf 0xFF,r11 ; Restore CR
2241 mfsprg r11,3 ; Restore R11
2242 rfi ; All done, go back...
2243 ufpVMrfi64:
2244 mtcrf 0xFF,r11 ; Restore CR
2245 mfsprg r11,3 ; Restore R11
2246 rfid
2247
2248 ufpX: ; here if virtual machine is 64-bit
2249 bt cr5_eq,ufpXResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
2250 cmplwi cr5,r4,7 ; Is first arg in range 0-7?
2251 bgt cr5,ufpXVMret ; Return if not in the range
2252 slwi r4,r4,3 ; multiply index by 8
2253 la r3,famguestXr0(r13) ; Load the base address
2254 bt cr2_eq,ufpXSetGuestReg ; Set/get selector
2255 ; ufpXGetGuestReg
2256 ldx r3,r4,r3 ; Load the guest register
2257 b ufpXVMret ; Return
2258 ufpXSetGuestReg:
2259 stdx r5,r4,r3 ; Update the guest register
2260 li r3,0 ; Set return value
2261 b ufpXVMret ; Return
2262 ufpXResumeGuest:
2263 lwz r7,spcFlags(r3) ; Pick up the special flags
2264 mtsrr0 r4 ; Set srr0
2265 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2266 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
2267 stw r7,spcFlags(r3) ; Update the special flags
2268 mfsrr1 r6 ; Get the current MSR value
2269
2270 ld r4,famguestXmsr(r13) ; Load guest srr1
2271 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2272 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2273 and r4,r4,r1 ; Keep only the controllable bits
2274 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2275 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2276 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2277 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2278 beq ufpXnokey ; Branch if not key switch
2279 mr r2,r7 ; Save r7
2280 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2281 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
2282 beq ufpXnokey ; No, go to ResumeGuest_nokey
2283 mr r5,r3 ; Get the per_proc area
2284 stw r7,spcFlags(r3) ; Update the special flags
2285
2286 ld r3,next_savearea(r5) ; Get the exception save area
2287 std r8,saver8(r3) ; Save r8
2288 std r9,saver9(r3) ; Save r9
2289 std r10,saver10(r3) ; Save r10
2290 std r11,saver11(r3) ; Save r11
2291 std r12,saver12(r3) ; Save r12
2292 std r13,saver13(r3) ; Save r12
2293 std r14,saver14(r3) ; Save r14
2294 std r15,saver15(r3) ; Save r15
2295 std r16,saver16(r3) ; Save r16
2296 std r17,saver17(r3) ; Save r17
2297 std r18,saver18(r3) ; Save r18
2298 std r19,saver19(r3) ; Save r19
2299 std r20,saver20(r3) ; Save r20
2300 std r21,saver21(r3) ; Save r21
2301 std r22,saver22(r3) ; Save r22
2302 std r23,saver23(r3) ; Save r23
2303 std r24,saver24(r3) ; Save r24
2304 std r25,saver25(r3) ; Save r25
2305 std r26,saver26(r3) ; Save r26
2306 std r27,saver27(r3) ; Save r27
2307 std r28,saver28(r3) ; Save r28
2308 std r29,saver29(r3) ; Save r29
2309 mfxer r2 ; Get xer
2310 std r30,saver30(r3) ; Save r30
2311 std r31,saver31(r3) ; Save r31
2312 std r2,savexer(r3) ; Save xer
2313
2314 mflr r20 ; Get lr
2315 li r2,1 ; Set to 1
2316 stw r7,spcFlags(r5) ; Update the special flags
2317 mr r13,r3 ; Set current savearea
2318 mr r21,r4 ; Save r4
2319 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2320 mr r29,r5 ; Get the per_proc area
2321 mr r3,r4 ; Set MSR value we going to
2322 bl EXT(switchSegs) ; Go handle the segment registers/STB
2323 mr r3,r13 ; Set current savearea
2324 mr r4,r21 ; Restore r4
2325 mtlr r20 ; Set lr
2326
2327 ld r2,savexer(r3) ; Load xer
2328 ld r8,saver8(r3) ; Load r8
2329 ld r9,saver9(r3) ; Load r9
2330 ld r10,saver10(r3) ; Load r10
2331 mtxer r2 ; Restore xer
2332 ld r11,saver11(r3) ; Load r11
2333 ld r12,saver12(r3) ; Load r12
2334 ld r13,saver13(r3) ; Load r12
2335 ld r14,saver14(r3) ; Load r14
2336 ld r15,saver15(r3) ; Load r15
2337 ld r16,saver16(r3) ; Load r16
2338 ld r17,saver17(r3) ; Load r17
2339 ld r18,saver18(r3) ; Load r18
2340 ld r19,saver19(r3) ; Load r19
2341 ld r20,saver20(r3) ; Load r20
2342 ld r21,saver21(r3) ; Load r21
2343 ld r22,saver22(r3) ; Load r22
2344 ld r23,saver23(r3) ; Load r23
2345 ld r24,saver24(r3) ; Load r24
2346 ld r25,saver25(r3) ; Load r25
2347 ld r26,saver26(r3) ; Load r26
2348 ld r27,saver27(r3) ; Load r27
2349 ld r28,saver28(r3) ; Load r28
2350 ld r29,saver29(r3) ; Load r29
2351 ld r30,saver30(r3) ; Load r30
2352 ld r31,saver31(r3) ; Load r31
2353 ufpXnokey:
2354 mtsrr1 r4 ; Set srr1
2355 ld r0,famguestXr0(r13) ; Load r0
2356 ld r1,famguestXr1(r13) ; Load r1
2357 ld r2,famguestXr2(r13) ; Load r2
2358 ld r3,famguestXr3(r13) ; Load r3
2359 ld r4,famguestXr4(r13) ; Load r4
2360 ld r5,famguestXr5(r13) ; Load r5
2361 ld r6,famguestXr6(r13) ; Load r6
2362 ld r7,famguestXr7(r13) ; Load r7
2363 ufpXVMret:
2364 mfsprg r13,2 ; Restore R13
2365 mtcrf 0xFF,r11 ; Restore CR
2366 mfsprg r11,3 ; Restore R11
2367 rfid
2368