]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/vmachmon_asm.s
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon_asm.s
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
1c79356b 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
1c79356b
A
21 */
22#include <assym.s>
23#include <debug.h>
24#include <ppc/asm.h>
25#include <ppc/proc_reg.h>
26#include <ppc/exception.h>
27
28/*
29 * This file contains implementations for the Virtual Machine Monitor
30 * facility.
31 */
32
55e303ae
A
33#define vmmMapDone 31
34#define vmmDoing64 30
35
1c79356b
A
36
37/*
38 * int vmm_dispatch(savearea, act);
39
40 * vmm_dispatch is a PPC only system call. It is used with a selector (first
41 * parameter) to determine what function to enter. This is treated as an extension
42 * of hw_exceptions.
43 *
44 * Inputs:
45 * R4 = current activation
46 * R16 = current thread
47 * R30 = current savearea
48 */
49
55e303ae 50 .align 5 ; Line up on cache line
1c79356b
A
51 .globl EXT(vmm_dispatch_table)
52
53LEXT(vmm_dispatch_table)
54
55 /* Don't change the order of these routines in the table. It's */
56 /* OK to add new routines, but they must be added at the bottom. */
57
58 .long EXT(vmm_get_version_sel) ; Get the version of the VMM interface
d7e50217 59 .long 0 ; Not valid in Fam
1c79356b 60 .long EXT(vmm_get_features_sel) ; Get the features of the VMM interface
d7e50217 61 .long 0 ; Not valid in Fam
1c79356b 62 .long EXT(vmm_init_context_sel) ; Initializes a new VMM context
d7e50217 63 .long 0 ; Not valid in Fam
1c79356b 64 .long EXT(vmm_tear_down_context) ; Tears down a previously-allocated VMM context
d7e50217 65 .long 0 ; Not valid in Fam
1c79356b 66 .long EXT(vmm_tear_down_all) ; Tears down all VMMs
d7e50217 67 .long 0 ; Not valid in Fam
55e303ae 68 .long EXT(vmm_map_page32) ; Maps a page from the main address space into the VM space - supports 32-bit
d7e50217 69 .long 1 ; Valid in Fam
55e303ae 70 .long EXT(vmm_get_page_mapping32) ; Returns client va associated with VM va - supports 32-bit
d7e50217 71 .long 1 ; Valid in Fam
55e303ae 72 .long EXT(vmm_unmap_page32) ; Unmaps a page from the VM space - supports 32-bit
d7e50217 73 .long 1 ; Valid in Fam
55e303ae 74 .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space
d7e50217 75 .long 1 ; Valid in Fam
55e303ae 76 .long EXT(vmm_get_page_dirty_flag32) ; Gets the change bit for a page and optionally clears it - supports 32-bit
d7e50217 77 .long 1 ; Valid in Fam
1c79356b 78 .long EXT(vmm_get_float_state) ; Gets current floating point state
d7e50217 79 .long 0 ; not valid in Fam
1c79356b 80 .long EXT(vmm_get_vector_state) ; Gets current vector state
d7e50217 81 .long 0 ; Not valid in Fam
1c79356b 82 .long EXT(vmm_set_timer) ; Sets a timer value
d7e50217 83 .long 1 ; Valid in Fam
1c79356b 84 .long EXT(vmm_get_timer) ; Gets a timer value
d7e50217 85 .long 1 ; Valid in Fam
1c79356b 86 .long EXT(switchIntoVM) ; Switches to the VM context
d7e50217 87 .long 1 ; Valid in Fam
55e303ae 88 .long EXT(vmm_protect_page32) ; Sets protection values for a page - supports 32-bit
d7e50217 89 .long 1 ; Valid in Fam
55e303ae 90 .long EXT(vmm_map_execute32) ; Maps a page an launches VM - supports 32-bit
d7e50217 91 .long 1 ; Not valid in Fam
55e303ae 92 .long EXT(vmm_protect_execute32) ; Sets protection values for a page and launches VM - supports 32-bit
d7e50217 93 .long 1 ; Valid in Fam
55e303ae 94 .long EXT(vmm_map_list32) ; Maps a list of pages - supports 32-bit
d7e50217 95 .long 1 ; Valid in Fam
55e303ae 96 .long EXT(vmm_unmap_list32) ; Unmaps a list of pages - supports 32-bit
d7e50217
A
97 .long 1 ; Valid in Fam
98 .long EXT(vmm_fam_reserved) ; exit from Fam to host
99 .long 1 ; Valid in Fam
100 .long EXT(vmm_fam_reserved) ; resume guest from Fam
101 .long 1 ; Valid in Fam
102 .long EXT(vmm_fam_reserved) ; get guest register from Fam
103 .long 1 ; Valid in Fam
104 .long EXT(vmm_fam_reserved) ; Set guest register from Fam
105 .long 1 ; Valid in Fam
91447636
A
106 .long EXT(vmm_activate_XA) ; Activate extended architecture features for a VM
107 .long 0 ; Not valid in Fam
108 .long EXT(vmm_deactivate_XA) ; Deactivate extended architecture features for a VM
55e303ae
A
109 .long 0 ; Not valid in Fam
110 .long EXT(vmm_get_XA) ; Get extended architecture features from a VM
111 .long 1 ; Valid in Fam
112 .long EXT(vmm_map_page) ; Map a host to guest address space - supports 64-bit
113 .long 1 ; Valid in Fam
114 .long EXT(vmm_get_page_mapping) ; Get host address of a guest page - supports 64-bit
115 .long 1 ; Valid in Fam
116 .long EXT(vmm_unmap_page) ; Unmap a guest page - supports 64-bit
117 .long 1 ; Valid in Fam
118 .long EXT(vmm_get_page_dirty_flag) ; Check if guest page modified - supports 64-bit
119 .long 1 ; Valid in Fam
120 .long EXT(vmm_protect_page) ; Sets protection values for a page - supports 64-bit
121 .long 1 ; Valid in Fam
122 .long EXT(vmm_map_execute) ; Map guest page and launch - supports 64-bit
123 .long 1 ; Valid in Fam
124 .long EXT(vmm_protect_execute) ; Set prot attributes and launch - supports 64-bit
125 .long 1 ; Valid in Fam
126 .long EXT(vmm_map_list64) ; Map a list of pages into guest address spaces - supports 64-bit
127 .long 1 ; Valid in Fam
128 .long EXT(vmm_unmap_list64) ; Unmap a list of pages from guest address spaces - supports 64-bit
129 .long 1 ; Valid in Fam
130 .long EXT(vmm_max_addr) ; Returns the maximum virtual address
131 .long 1 ; Valid in Fam
91447636
A
132#if 0
133 .long EXT(vmm_set_guest_memory) ; Set guest memory extent
134 .long 0 ; Not valid in FAM
135 .long EXT(vmm_purge_local) ; Purge all local guest mappings */
136 .long 1 ; Valid in FAM
137#endif
d7e50217 138 .set vmm_count,(.-EXT(vmm_dispatch_table))/8 ; Get the top number
1c79356b
A
139
140
141 .align 5
142 .globl EXT(vmm_dispatch)
143
144LEXT(vmm_dispatch)
145
55e303ae 146 lwz r11,saver3+4(r30) ; Get the selector
1c79356b
A
147 mr r3,r4 ; All of our functions want the activation as the first parm
148 lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table
149 cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now?
150 cmplwi cr1,r11,vmm_count ; See if we have a valid selector
151 ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table
55e303ae 152 lwz r4,saver4+4(r30) ; Get 1st parameter after selector
1c79356b 153 beq+ EXT(switchIntoVM) ; Yes, go switch to it....
d7e50217 154 rlwinm r11,r11,3,0,28 ; Index into table
55e303ae 155 bge- cr1,vmmBogus ; It is a bogus entry
d7e50217 156 add r12,r10,r11 ; Get the vmm dispatch syscall entry
91447636
A
157 mfsprg r10,1 ; Get the current activation
158 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
d7e50217
A
159 lwz r13,0(r12) ; Get address of routine
160 lwz r12,4(r12) ; Get validity flag
161 lwz r5,spcFlags(r10) ; Get per_proc special flags
162 cmpwi cr1,r12,0 ; Check Fam valid
163 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
164 crand cr0_eq,cr1_eq,cr0_gt ; In Fam and Invalid syscall
165 beq vmmBogus ; Intercept to host
55e303ae
A
166 lwz r5,saver5+4(r30) ; Get 2nd parameter after selector - note that some of these parameters may actually be long longs
167 lwz r6,saver6+4(r30) ; Get 3rd parameter after selector
d7e50217 168 mtlr r13 ; Set the routine address
55e303ae
A
169 lwz r7,saver7+4(r30) ; Get 4th parameter after selector
170 lwz r8,saver8+4(r30) ; Get 5th parameter after selector
171 lwz r9,saver9+4(r30) ; Get 6th parameter after selector
d7e50217 172;
55e303ae
A
173; NOTE: some of the above parameters are actually long longs. We have glue code that transforms
174; all needed parameters and/or adds 32-/64-bit flavors to the needed functions.
1c79356b
A
175;
176
177 blrl ; Call function
55e303ae
A
178
179vmmRetPt: li r0,0 ; Clear this out
180 stw r0,saver3(r30) ; Make sure top of RC is clear
181 stw r3,saver3+4(r30) ; Pass back the return code
182 stw r0,saver4(r30) ; Make sure bottom of RC is clear (just in case)
183 stw r4,saver4+4(r30) ; Pass back the bottom return code (just in case)
1c79356b
A
184 li r3,1 ; Set normal return with check for AST
185 b EXT(ppcscret) ; Go back to handler...
186
d7e50217 187vmmBogus:
91447636
A
188 mfsprg r3,1 ; Get the current activation
189 lwz r10,ACT_PER_PROC(r3) ; Get the per_proc block
d7e50217
A
190 lwz r5,spcFlags(r10) ; Get per_proc special flags
191 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
192 bne vmmexitcall ; Do it to it
193 li r3,0 ; Bogus selector, treat like a bogus system call
1c79356b
A
194 b EXT(ppcscret) ; Go back to handler...
195
196
197 .align 5
198 .globl EXT(vmm_get_version_sel)
199
200LEXT(vmm_get_version_sel) ; Selector based version of get version
201
202 lis r3,hi16(EXT(vmm_get_version))
203 ori r3,r3,lo16(EXT(vmm_get_version))
204 b selcomm
205
206
207 .align 5
208 .globl EXT(vmm_get_features_sel)
209
210LEXT(vmm_get_features_sel) ; Selector based version of get features
211
0b4e3aa0
A
212 lis r3,hi16(EXT(vmm_get_features))
213 ori r3,r3,lo16(EXT(vmm_get_features))
1c79356b
A
214 b selcomm
215
216
217 .align 5
218 .globl EXT(vmm_init_context_sel)
219
220LEXT(vmm_init_context_sel) ; Selector based version of init context
221
55e303ae
A
222 lwz r4,saver4+4(r30) ; Get the passed in version
223 lwz r5,saver5+4(r30) ; Get the passed in comm area
0b4e3aa0 224 lis r3,hi16(EXT(vmm_init_context))
55e303ae 225 stw r4,saver3+4(r30) ; Cheat and move this parameter over
0b4e3aa0 226 ori r3,r3,lo16(EXT(vmm_init_context))
55e303ae 227 stw r5,saver4+4(r30) ; Cheat and move this parameter over
1c79356b
A
228
229selcomm: mtlr r3 ; Set the real routine address
230 mr r3,r30 ; Pass in the savearea
231 blrl ; Call the function
232 b EXT(ppcscret) ; Go back to handler...
233
55e303ae
A
234 .align 5
235 .globl EXT(vmm_map_page32)
236
237LEXT(vmm_map_page32)
238 mr r9,r7 ; Move prot to correct parm
239 mr r8,r6 ; Move guest address to low half of long long
240 li r7,0 ; Clear high half of guest address
241 mr r6,r5 ; Move host address to low half of long long
242 li r5,0 ; Clear high half of host address
243 b EXT(vmm_map_page) ; Transition to real function...
244
245 .align 5
246 .globl EXT(vmm_get_page_mapping32)
247
248LEXT(vmm_get_page_mapping32)
249 mr r6,r5 ; Move guest address to low half of long long
250 li r5,0 ; Clear high half of guest address
251 bl EXT(vmm_get_page_mapping) ; Transition to real function...
252 mr r3,r4 ; Convert addr64_t to vm_offset_t, dropping top half
253 b vmmRetPt ; Join normal return...
254
255 .align 5
256 .globl EXT(vmm_unmap_page32)
257
258LEXT(vmm_unmap_page32)
259 mr r6,r5 ; Move guest address to low half of long long
260 li r5,0 ; Clear high half of guest address
261 b EXT(vmm_unmap_page) ; Transition to real function...
262
263 .align 5
264 .globl EXT(vmm_get_page_dirty_flag32)
265
266LEXT(vmm_get_page_dirty_flag32)
267 mr r7,r6 ; Move reset flag
268 mr r6,r5 ; Move guest address to low half of long long
269 li r5,0 ; Clear high half of guest address
270 b EXT(vmm_get_page_dirty_flag) ; Transition to real function...
271
272 .align 5
273 .globl EXT(vmm_protect_page32)
274
275LEXT(vmm_protect_page32)
276 mr r7,r6 ; Move protection bits
277 mr r6,r5 ; Move guest address to low half of long long
278 li r5,0 ; Clear high half of guest address
279 b EXT(vmm_protect_page) ; Transition to real function...
280
281 .align 5
282 .globl EXT(vmm_map_execute32)
283
284LEXT(vmm_map_execute32)
285 mr r9,r7 ; Move prot to correct parm
286 mr r8,r6 ; Move guest address to low half of long long
287 li r7,0 ; Clear high half of guest address
288 mr r6,r5 ; Move host address to low half of long long
289 li r5,0 ; Clear high half of host address
290 b EXT(vmm_map_execute) ; Transition to real function...
291
292 .align 5
293 .globl EXT(vmm_protect_execute32)
294
295LEXT(vmm_protect_execute32)
296 mr r7,r6 ; Move protection bits
297 mr r6,r5 ; Move guest address to low half of long long
298 li r5,0 ; Clear high half of guest address
299 b EXT(vmm_protect_execute) ; Transition to real function...
300
301 .align 5
302 .globl EXT(vmm_map_list32)
303
304LEXT(vmm_map_list32)
305 li r6,0 ; Set 32-bit flavor
306 b EXT(vmm_map_list) ; Go to common routine...
307
308 .align 5
309 .globl EXT(vmm_map_list64)
310
311LEXT(vmm_map_list64)
312 li r6,1 ; Set 64-bit flavor
313 b EXT(vmm_map_list) ; Go to common routine...
314
315 .align 5
316 .globl EXT(vmm_map_list32)
317
318LEXT(vmm_unmap_list32)
319 li r6,0 ; Set 32-bit flavor
320 b EXT(vmm_unmap_list) ; Go to common routine...
321
322 .align 5
323 .globl EXT(vmm_map_list64)
324
325LEXT(vmm_unmap_list64)
326 li r6,1 ; Set 64-bit flavor
327 b EXT(vmm_unmap_list) ; Go to common routine...
328
1c79356b
A
329/*
330 * Here is where we transition to the virtual machine.
331 *
332 * We will swap the register context in the savearea with that which is saved in our shared
333 * context area. We will validity check a bit and clear any nasty bits in the MSR and force
334 * the manditory ones on.
335 *
336 * Then we will setup the new address space to run with, and anything else that is normally part
337 * of a context switch.
338 *
0b4e3aa0
A
339 * The vmm_execute_vm entry point is for the fused vmm_map_execute and vmm_protect_execute
340 * calls. This is called, but never returned from. We always go directly back to the
341 * user from here.
342 *
1c79356b
A
343 *
344 */
345
0b4e3aa0
A
346
347 .align 5
348 .globl EXT(vmm_execute_vm)
349
350LEXT(vmm_execute_vm)
0b4e3aa0
A
351 lwz r30,ACT_MACT_PCB(r3) ; Restore the savearea pointer because it could be trash here
352 b EXT(switchIntoVM) ; Join common...
353
354
1c79356b
A
355 .align 5
356 .globl EXT(switchIntoVM)
357
358LEXT(switchIntoVM)
91447636
A
359 mfsprg r10,1 ; Get the current activation
360 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
55e303ae
A
361 rlwinm r31,r4,24,24,31 ; Get the address space
362 rlwinm r4,r4,0,24,31 ; Isolate the context id
363 lwz r28,vmmControl(r3) ; Pick up the control table address
1c79356b 364 subi r4,r4,1 ; Switch to zero offset
55e303ae 365 rlwinm. r2,r28,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we
1c79356b 366 ; do not try this while we are transitioning off to on
55e303ae 367 cmplwi cr1,r4,kVmmMaxContexts ; Is the index valid?
1c79356b 368 beq- vmmBogus ; Not started, treat like a bogus system call
55e303ae 369 subic. r31,r31,1 ; Make address space 0 based and test if we use default
1c79356b 370 mulli r2,r4,vmmCEntrySize ; Get displacement from index
55e303ae
A
371 bge- cr1,swvmmBogus ; Index is bogus...
372 add r2,r2,r28 ; Point to the entry
373 bge-- swvmmDAdsp ; There was an explicit address space request
374 mr r31,r4 ; Default the address space to the context ID
375
376swvmmDAdsp: la r2,vmmc(r2) ; Get the offset to the context array
377 lwz r8,vmmGFlags(r28) ; Get the general flags
1c79356b 378 lwz r4,vmmFlags(r2) ; Get the flags for the selected entry
55e303ae 379 crset vmmMapDone ; Assume we will be mapping something
1c79356b
A
380 lwz r5,vmmContextKern(r2) ; Get the context area address
381 rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use
55e303ae
A
382 cmplwi cr1,r31,kVmmMaxContexts ; See if we have a valid address space ID
383 rlwinm r8,r8,0,24,31 ; Clean up address space
384 beq-- swvmmBogus ; This context is no good...
385
386 la r26,vmmAdsp(r28) ; Point to the pmaps
387 sub r8,r8,r31 ; Get diff between launching address space - 1 and last mapped into (should be 1 if the same)
388 rlwinm r31,r31,2,0,29 ; Index to the pmap
389 cmplwi r8,1 ; See if we have the same address space
390 bge-- cr1,swvmmBogAdsp ; Address space is no good...
391 lwzx r31,r26,r31 ; Get the requested address space pmap
392 li r0,0 ; Get a 0 in case we need to trash redrive
393 lwz r15,spcFlags(r10) ; Get per_proc special flags
394 beq swvmmAdspOk ; Do not invalidate address space if we are launching the same
395 crclr vmmMapDone ; Clear map done flag
396 stb r0,vmmGFlags+3(r28) ; Clear the last mapped address space ID so we will not redrive later
1c79356b
A
397;
398; Here we check for any immediate intercepts. So far, the only
0b4e3aa0
A
399; two of these are a timer pop and and external stop. We will not dispatch if
400; either is true. They need to either reset the timer (i.e. set timer
401; to 0) or to set a future time, or if it is external stop, set the vmmXStopRst flag.
1c79356b
A
402;
403
55e303ae
A
404swvmmAdspOk:
405 rlwinm. r0,r15,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
406 stw r31,vmmPmap(r2) ; Save the last dispatched address space
407 bne vmmFamGuestResume
0b4e3aa0
A
408 lwz r6,vmmCntrl(r5) ; Get the control field
409 rlwinm. r7,r6,0,vmmXStartb,vmmXStartb ; Clear all but start bit
410 beq+ swvmChkStop ; Do not reset stop
411 andc r6,r6,r7 ; Clear it
412 li r8,vmmFlags ; Point to the flags
413 stw r6,vmmCntrl(r5) ; Set the control field
414
415swvmtryx: lwarx r4,r8,r2 ; Pick up the flags
416 rlwinm r4,r4,0,vmmXStopb+1,vmmXStopb-1 ; Clear the stop bit
417 stwcx. r4,r8,r2 ; Save the updated field
418 bne- swvmtryx ; Try again...
419
420swvmChkStop:
421 rlwinm. r26,r4,0,vmmXStopb,vmmXStopb ; Is this VM stopped?
55e303ae 422 bne-- swvmSetStop ; Yes...
0b4e3aa0 423
9bccf70c 424 rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer go pop?
55e303ae
A
425 cmplwi cr1,r31,0 ; Is there actually an address space defined?
426 bne-- svvmTimerPop ; Yes...
427
428;
429; Special note: we need to intercept any attempt to launch a guest into a non-existent address space.
430; We will just go emulate an ISI if there is not one.
431;
432
433 beq-- cr1,swvmEmulateISI ; We are trying to launch into an undefined address space. This is not so good...
1c79356b
A
434
435;
436; Here is where we actually swap into the VM (alternate) context.
437; We will bulk do a wholesale swap of the registers in the context area (the VMs)
438; with the ones in the savearea (our main code). During the copy, we will fix up the
439; MSR, forcing on a few bits and turning off a few others. Then we will deal with the
440; PMAP and other per_proc stuff. Finally, we will exit back through the main exception
441; handler to deal with unstacking saveareas and ASTs, etc.
442;
443
444swvmDoSwitch:
445
446;
447; First, we save the volatile registers we care about. Remember, all register
448; handling here is pretty funky anyway, so we just pick the ones that are ok.
449;
450 mr r26,r3 ; Save the activation pointer
1c79356b 451
9bccf70c
A
452 la r11,vmmFacCtx(r2) ; Point to the virtual machine facility context
453 mr r27,r2 ; Save the context entry
454 stw r11,deferctx(r3) ; Start using the virtual machine facility context when we exit
1c79356b
A
455
456 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
55e303ae 457 mr r3,r31 ; Get the pointer to the PMAP
1c79356b
A
458 oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now
459 bl EXT(hw_set_user_space_dis) ; Swap the address spaces
460 lwz r17,vmmFlags(r27) ; Get the status flags
d7e50217
A
461 lwz r20,vmmContextKern(r27) ; Get the state page kernel addr
462 lwz r21,vmmCntrl(r20) ; Get vmmCntrl
463 rlwinm. r22,r21,0,vmmFamEnab,vmmFamEnab ; Is vmmFamEnab set?
55e303ae
A
464 lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
465 stw r22,VMMXAFlgs(r10) ; Store vmmXAFlgs in per_proc VMMXAFlgs
d7e50217 466 beq swvmNoFam ; No Fam intercept
55e303ae 467 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
d7e50217
A
468 rlwimi r15,r21,32+vmmFamSetb-FamVMmodebit,FamVMmodebit,FamVMmodebit ; Set FamVMmode bit
469 rlwinm r21,r21,0,vmmFamSetb+1,vmmFamSetb-1 ; Clear FamSet bit
55e303ae 470 bne swvmXfamintercpt
d7e50217 471 lwz r22,famintercepts(r20) ; Load intercept bit field
55e303ae
A
472 b swvmfamintercptres
473swvmXfamintercpt:
474 lwz r22,faminterceptsX(r20) ; Load intercept bit field
475swvmfamintercptres:
d7e50217
A
476 stw r21,vmmCntrl(r20) ; Update vmmCntrl
477 lwz r19,vmmContextPhys(r27) ; Get vmmFAMarea address
478 stw r22,vmmFAMintercept(r27) ; Get vmmFAMintercept
479 stw r22,FAMintercept(r10) ; Store vmmFAMintercept in per_proc FAMintercept
480 stw r19,VMMareaPhys(r10) ; Store VMMareaPhys
481 oris r15,r15,hi16(FamVMena) ; Set FamVMenabit
482swvmNoFam:
1c79356b 483 stw r27,vmmCEntry(r26) ; Remember what context we are running
55e303ae 484 bf++ vmmMapDone,swvmNoMap ; We have not mapped anything or it was not for this address space
1c79356b
A
485
486;
487; This little bit of hoopala here (triggered by vmmMapDone) is
488; a performance enhancement. This will change the returning savearea
489; to look like we had a DSI rather than a system call. Then, setting
490; the redrive bit, the exception handler will redrive the exception as
491; a DSI, entering the last mapped address into the hash table. This keeps
492; double faults from happening. Note that there is only a gain if the VM
493; takes a fault, then the emulator resolves it only, and then begins
494; the VM execution again. It seems like this should be the normal case.
55e303ae
A
495;
496; Note that we need to revisit this when we move the virtual machines to the task because
497; then it will be possible for more than one thread to access this stuff at the same time.
1c79356b
A
498;
499
500 lwz r3,SAVflags(r30) ; Pick up the savearea flags
55e303ae
A
501 lwz r2,vmmLastMap(r28) ; Get the last mapped address
502 lwz r14,vmmLastMap+4(r28) ; Get the last mapped address low half
1c79356b
A
503 li r20,T_DATA_ACCESS ; Change to DSI fault
504 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
505 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
55e303ae 506 stw r14,savedar+4(r30) ; Set the DAR to the last thing we mapped
1c79356b
A
507 stw r3,SAVflags(r30) ; Turn on the redrive request
508 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
55e303ae 509 li r0,0 ; Clear
1c79356b
A
510 stw r20,saveexception(r30) ; Say we need to emulate a DSI
511 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
55e303ae 512 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1c79356b 513
0b4e3aa0
A
514swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area
515 rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits
516 lwz r20,vmmCntrl(r20) ; Get the control flags
1c79356b 517 rlwimi r17,r11,8,24,31 ; Save the old spf flags
0b4e3aa0 518 rlwimi r15,r20,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
1c79356b
A
519 stw r15,spcFlags(r10) ; Set per_proc copy of the special flags
520 stw r15,ACT_MACT_SPF(r26) ; Get the special flags
521
522 stw r17,vmmFlags(r27) ; Set the status flags
523
524 bl swapCtxt ; First, swap the general register state
525
0b4e3aa0 526 lwz r17,vmmContextKern(r27) ; Get the comm area back
9bccf70c 527 la r25,vmmFacCtx(r27) ; Point to the facility context
0b4e3aa0 528 lwz r15,vmmCntrl(r17) ; Get the control flags again
91447636
A
529 mfsprg r29,1 ; Get the current activation
530 lwz r29,ACT_PER_PROC(r29) ; Get the per_proc block
1c79356b 531
9bccf70c
A
532;
533; Check if there is new floating point context to load
534;
535
1c79356b 536 rlwinm. r0,r15,0,vmmFloatLoadb,vmmFloatLoadb ; Are there new floating point values?
9bccf70c 537 lhz r29,PP_CPU_NUMBER(r29) ; Get our cpu number
1c79356b
A
538 li r14,vmmppcFPRs ; Get displacement to the new values
539 andc r15,r15,r0 ; Clear the bit
540 beq+ swvmNoNewFloats ; Nope, good...
541
9bccf70c
A
542 lwz r19,FPUcpu(r25) ; Get the last CPU we ran on
543
544 stw r29,FPUcpu(r25) ; Claim the context for ourselves
545
546 eieio ; Make sure this stays in order
547
91447636
A
548 lis r18,hi16(EXT(PerProcTable)) ; Set base PerProcTable
549 mulli r19,r19,ppeSize ; Find offset to the owner per_proc_entry
550 ori r18,r18,lo16(EXT(PerProcTable)) ; Set base PerProcTable
9bccf70c 551 li r16,FPUowner ; Displacement to float owner
91447636
A
552 add r19,r18,r19 ; Point to the owner per_proc_entry
553 lwz r19,ppe_vaddr(r19) ; Point to the owner per_proc
9bccf70c
A
554
555swvminvfpu: lwarx r18,r16,r19 ; Get the owner
55e303ae
A
556
557 sub r0,r18,r25 ; Subtract one from the other
558 sub r3,r25,r18 ; Subtract the other from the one
559 or r3,r3,r0 ; Combine them
560 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
561 and r18,r18,r3 ; Make 0 if same, unchanged if not
562 stwcx. r18,r16,r19 ; Try to invalidate it
563 bne-- swvminvfpu ; Try again if there was a collision...
564
565 lwz r3,FPUsave(r25) ; Get the FPU savearea
9bccf70c 566 dcbt r14,r17 ; Touch in first line of new stuff
1c79356b
A
567 mr. r3,r3 ; Is there one?
568 bne+ swvmGotFloat ; Yes...
569
570 bl EXT(save_get) ; Get a savearea
571
9bccf70c
A
572 li r7,SAVfloat ; Get floating point flag
573 stw r26,SAVact(r3) ; Save our activation
574 li r0,0 ; Get a zero
575 stb r7,SAVflags+2(r3) ; Set that this is floating point
55e303ae 576 stw r0,SAVprev+4(r3) ; Clear the back chain
9bccf70c
A
577 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
578
579 stw r3,FPUsave(r25) ; Chain us to context
1c79356b
A
580
581swvmGotFloat:
1c79356b
A
582 la r4,savefp0(r3) ; Point to the destination
583 mr r21,r3 ; Save the save area
584 la r3,vmmppcFPRs(r17) ; Point to the source
9bccf70c 585 li r5,32*8 ; Get the size (32 FPRs at 8 bytes each)
1c79356b
A
586
587 bl EXT(bcopy) ; Copy the new values
9bccf70c 588
1c79356b
A
589 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
590 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad
591 rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here
592 lwz r14,vmmStat(r17) ; Get the status flags
91447636
A
593 mfsprg r10,1 ; Get the current activation
594 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
1c79356b
A
595 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
596 rlwinm r14,r14,0,vmmFloatCngdb+1,vmmFloatCngdb-1 ; Clear the changed flag
597 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
598 stw r14,vmmStat(r17) ; Set the status flags sans vmmFloatCngd
1c79356b 599
9bccf70c
A
600;
601; Check if there is new vector context to load
602;
603
1c79356b
A
604swvmNoNewFloats:
605 rlwinm. r0,r15,0,vmmVectLoadb,vmmVectLoadb ; Are there new vector values?
606 li r14,vmmppcVRs ; Get displacement to the new values
607 andc r15,r15,r0 ; Clear the bit
608 beq+ swvmNoNewVects ; Nope, good...
609
9bccf70c
A
610 lwz r19,VMXcpu(r25) ; Get the last CPU we ran on
611
612 stw r29,VMXcpu(r25) ; Claim the context for ourselves
613
614 eieio ; Make sure this stays in order
615
91447636
A
616 lis r18,hi16(EXT(PerProcTable)) ; Set base PerProcTable
617 mulli r19,r19,ppeSize ; Find offset to the owner per_proc_entry
618 ori r18,r18,lo16(EXT(PerProcTable)) ; Set base PerProcTable
9bccf70c 619 li r16,VMXowner ; Displacement to vector owner
91447636
A
620 add r19,r18,r19 ; Point to the owner per_proc_entry
621 lwz r19,ppe_vaddr(r19) ; Point to the owner per_proc
9bccf70c
A
622
623swvminvvec: lwarx r18,r16,r19 ; Get the owner
55e303ae
A
624
625 sub r0,r18,r25 ; Subtract one from the other
626 sub r3,r25,r18 ; Subtract the other from the one
627 or r3,r3,r0 ; Combine them
628 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
629 and r18,r18,r3 ; Make 0 if same, unchanged if not
630 stwcx. r18,r16,r19 ; Try to invalidate it
631 bne-- swvminvfpu ; Try again if there was a collision...
9bccf70c
A
632
633swvminvved: lwz r3,VMXsave(r25) ; Get the vector savearea
634 dcbt r14,r17 ; Touch in first line of new stuff
1c79356b
A
635 mr. r3,r3 ; Is there one?
636 bne+ swvmGotVect ; Yes...
637
638 bl EXT(save_get) ; Get a savearea
639
9bccf70c
A
640 li r7,SAVvector ; Get the vector type flag
641 stw r26,SAVact(r3) ; Save our activation
642 li r0,0 ; Get a zero
643 stb r7,SAVflags+2(r3) ; Set that this is vector
55e303ae 644 stw r0,SAVprev+4(r3) ; Clear the back chain
9bccf70c
A
645 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
646
647 stw r3,VMXsave(r25) ; Chain us to context
1c79356b
A
648
649swvmGotVect:
1c79356b
A
650 mr r21,r3 ; Save the pointer to the savearea
651 la r4,savevr0(r3) ; Point to the destination
652 la r3,vmmppcVRs(r17) ; Point to the source
9bccf70c 653 li r5,32*16 ; Get the size (32 vectors at 16 bytes each)
1c79356b
A
654
655 bl EXT(bcopy) ; Copy the new values
656
9bccf70c
A
657 lwz r8,savevrsave(r30) ; Get the current VRSave
658
1c79356b
A
659 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
660 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad
661 rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here
55e303ae 662 stw r8,savevrvalid(r21) ; Set the current VRSave as valid saved
1c79356b 663 lwz r14,vmmStat(r17) ; Get the status flags
91447636
A
664 mfsprg r10,1 ; Get the current activation
665 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
1c79356b
A
666 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
667 rlwinm r14,r14,0,vmmVectCngdb+1,vmmVectCngdb-1 ; Clear the changed flag
1c79356b
A
668 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
669 stw r14,vmmStat(r17) ; Set the status flags sans vmmVectCngd
1c79356b
A
670
671swvmNoNewVects:
672 li r3,1 ; Show normal exit with check for AST
91447636 673 mr r16,r26 ; Restore the thread pointer
1c79356b
A
674 b EXT(ppcscret) ; Go back to handler...
675
55e303ae
A
676 .align 5
677
678swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return
679 li r0,0 ; Clear
680 li r3,1 ; Set normal return with check for AST
681 stw r0,saver3(r30) ; Clear upper half
682 stw r2,saver3+4(r30) ; Pass back the return code
683 b EXT(ppcscret) ; Go back to handler...
684
685swvmmBogAdsp:
686 li r2,kVmmInvalidAdSpace ; Set bogus address space return
687 li r0,0 ; Clear
688 li r3,1 ; Set normal return with check for AST
689 stw r0,saver3(r30) ; Clear upper half
690 stw r2,saver3+4(r30) ; Pass back the return code
691 b EXT(ppcscret) ; Go back to handler...
692
693swvmSetStop:
694 li r2,kVmmStopped ; Set stopped return
695 li r0,0 ; Clear
696 li r3,1 ; Set normal return with check for AST
697 stw r0,saver3(r30) ; Clear upper half
698 stw r2,saver3+4(r30) ; Pass back the return code
699 stw r2,return_code(r5) ; Save the exit code
700 b EXT(ppcscret) ; Go back to handler...
701
702svvmTimerPop:
703 li r2,kVmmReturnNull ; Set null return
704 li r0,0 ; Clear
705 li r3,1 ; Set normal return with check for AST
706 stw r0,saver3(r30) ; Clear upper half
707 stw r2,saver3+4(r30) ; Pass back the return code
708 stw r2,return_code(r5) ; Save the exit code
709 b EXT(ppcscret) ; Go back to handler...
710
711swvmEmulateISI:
712 mfsprg r10,2 ; Get feature flags
713 lwz r11,vmmXAFlgs(r28) ; Get the eXtended Architecture flags
714 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
715 rlwinm. r11,r11,0,0,0 ; Are we doing a 64-bit virtual machine?
716 li r2,kVmmReturnInstrPageFault ; Set ISI
717 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
718 li r0,0 ; Clear
719 li r3,1 ; Set normal return with check for AST
720 stw r0,saver3(r30) ; Clear upper half
721 stw r2,saver3+4(r30) ; Pass back the return code
722 stw r2,return_code(r5) ; Save the exit code
723 lis r7,hi16(MASK(DSISR_HASH)) ; Pretend like we got a PTE miss
724 bt vmmDoing64,vmISI64 ; Go do this for a 64-bit VM...
725
726 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
727 stw r10,return_params+0(r5) ; Save PC as first return parm
728 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
729 b EXT(ppcscret) ; Go back to handler...
730
731vmISI64: ld r10,vmmppcXpc(r5) ; Get the PC as failing address
732 std r10,return_paramsX+0(r5) ; Save PC as first return parm
733 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
734 b EXT(ppcscret) ; Go back to handler...
d7e50217
A
735
736;
737; These syscalls are invalid, FAM syscall fast path
738;
739
740 .align 5
741 .globl EXT(vmm_fam_reserved)
742
743LEXT(vmm_fam_reserved)
744 li r3,0 ; Force exception
745 b EXT(ppcscret) ; Go back to handler...
1c79356b 746
1c79356b
A
747;
748; Here is where we exit from vmm mode. We do this on any kind of exception.
749; Interruptions (decrementer, external, etc.) are another story though.
750; These we just pass through. We also switch back explicity when requested.
751; This will happen in response to a timer pop and some kinds of ASTs.
752;
753; Inputs:
754; R3 = activation
755; R4 = savearea
756;
757
758 .align 5
759 .globl EXT(vmm_exit)
760
761LEXT(vmm_exit)
762
d7e50217 763vmmexitcall:
1c79356b
A
764 lwz r2,vmmCEntry(r3) ; Get the context that is active
765 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
766 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
767 lwz r19,vmmFlags(r2) ; Get the status flags
768 mr r16,r3 ; R16 is safe to use for the activation address
769
770 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
771 li r0,0 ; Get a zero
772 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
773 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
774 rlwinm r11,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
775 stw r0,vmmCEntry(r16) ; Clear pointer to active context
776 stw r19,vmmFlags(r2) ; Set the status flags
0b4e3aa0 777 rlwinm r11,r11,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
91447636
A
778 mfsprg r10,1 ; Get the current activation
779 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
d7e50217
A
780 rlwinm r11,r11,0,FamVMenabit+1,FamVMenabit-1 ; Clear FamVMEnable
781 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
782 lwz r5,vmmContextKern(r2) ; Get the state page kernel addr
783 rlwinm r11,r11,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMMode
784 lwz r6,vmmCntrl(r5) ; Get the control field
785 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
786 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
787 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
1c79356b 788 stw r11,ACT_MACT_SPF(r16) ; Get the special flags
d7e50217 789 stw r6,vmmCntrl(r5) ; Store the control field
1c79356b
A
790 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
791
792 mr r26,r16 ; Save the activation pointer
793 mr r27,r2 ; Save the context entry
794
795 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
796
9bccf70c 797 la r5,facctx(r16) ; Point to the main facility context
1c79356b 798 mr r2,r27 ; Restore
9bccf70c
A
799 stw r5,deferctx(r16) ; Start using the main facility context on the way out
800 lwz r5,vmmContextKern(r27) ; Get the context area address
1c79356b
A
801 mr r3,r16 ; Restore activation address
802 stw r19,vmmStat(r5) ; Save the changed and popped flags
803 bl swapCtxt ; Exchange the VM context for the emulator one
55e303ae 804 stw r8,saver3+4(r30) ; Set the return code as the return value also
1c79356b
A
805 b EXT(retFromVM) ; Go back to handler...
806
807
808;
809; Here is where we force exit from vmm mode. We do this when as
810; part of termination and is used to insure that we are not executing
811; in an alternate context. Because this is called from C we need to save
812; all non-volatile registers.
813;
814; Inputs:
815; R3 = activation
816; R4 = user savearea
817; Interruptions disabled
818;
819
820 .align 5
821 .globl EXT(vmm_force_exit)
822
823LEXT(vmm_force_exit)
824
825 stwu r1,-(FM_ALIGN(20*4)+FM_SIZE)(r1) ; Get enough space for the registers
826 mflr r0 ; Save the return
827 stmw r13,FM_ARG0(r1) ; Save all non-volatile registers
828 stw r0,(FM_ALIGN(20*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
829
830 lwz r2,vmmCEntry(r3) ; Get the context that is active
831 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
832 lwz r19,vmmFlags(r2) ; Get the status flags
833 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
834
835 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
836 mr r26,r3 ; Save the activation pointer
837 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
838 li r0,0 ; Get a zero
839 rlwinm r9,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
840 cmplw r9,r11 ; Check if we were in a vm
841 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
842 beq- vfeNotRun ; We were not in a vm....
0b4e3aa0 843 rlwinm r9,r9,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
1c79356b 844 stw r0,vmmCEntry(r26) ; Clear pointer to active context
91447636
A
845 mfsprg r10,1 ; Get the current activation
846 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
d7e50217
A
847 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
848 rlwinm r9,r9,0,FamVMenabit+1,FamVMenabit-1 ; Clear Fam Enable
849 rlwinm r9,r9,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear Fam Enable
850 lwz r5,vmmContextKern(r2) ; Get the context area address
851 lwz r6,vmmCntrl(r5) ; Get the control field
852 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
853 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
854 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
855 stw r6,vmmCntrl(r5) ; Store the control field
1c79356b
A
856 stw r9,ACT_MACT_SPF(r26) ; Get the special flags
857 stw r9,spcFlags(r10) ; Set per_proc copy of the special flags
858
859 mr r27,r2 ; Save the context entry
860 mr r30,r4 ; Save the savearea
861
862 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
863
9bccf70c 864 la r7,facctx(r26) ; Point to the main facility context
1c79356b
A
865
866 lwz r5,vmmContextKern(r27) ; Get the context area address
867 stw r19,vmmStat(r5) ; Save the changed and popped flags
9bccf70c
A
868 stw r7,deferctx(r26) ; Tell context launcher to switch facility context
869
1c79356b
A
870 bl swapCtxt ; Exchange the VM context for the emulator one
871
0b4e3aa0 872 lwz r8,saveexception(r30) ; Pick up the exception code
9bccf70c
A
873 lwz r7,SAVflags(r30) ; Pick up the savearea flags
874 lis r9,hi16(SAVredrive) ; Get exception redrive bit
0b4e3aa0 875 rlwinm r8,r8,30,24,31 ; Convert exception to return code
9bccf70c 876 andc r7,r7,r9 ; Make sure redrive is off because we are intercepting
55e303ae 877 stw r8,saver3+4(r30) ; Set the return code as the return value also
9bccf70c 878 stw r7,SAVflags(r30) ; Set the savearea flags
1c79356b
A
879
880
881vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers
882 lwz r1,0(r1) ; Pop the stack
883 lwz r0,FM_LR_SAVE(r1) ; Get the return address
884 mtlr r0 ; Set return
885 blr
886
887;
888; Note: we will not do any DCBTs to the savearea. It was just stored to a few cycles ago and should
9bccf70c 889; still be in the cache.
1c79356b 890;
1c79356b 891; NOTE NOTE: R16 is important to save!!!!
9bccf70c 892;
1c79356b
A
893 .align 5
894
55e303ae
A
895swapCtxt:
896 mfsprg r10,2 ; Get feature flags
897 la r6,vmmppcpc(r5) ; Point to the first line
898 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
1c79356b
A
899
900 lwz r14,saveexception(r30) ; Get the exception code
9bccf70c 901 dcbt 0,r6 ; Touch in the first line of the context area
55e303ae
A
902 bt++ pf64Bitb,swap64 ; Go do this swap on a 64-bit machine...
903
904 lwz r7,savesrr0+4(r30) ; Start moving context
905 lwz r8,savesrr1+4(r30)
906 lwz r9,saver0+4(r30)
1c79356b 907 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
55e303ae
A
908 lwz r10,saver1+4(r30)
909 lwz r11,saver2+4(r30)
910 lwz r12,saver3+4(r30)
911 lwz r13,saver4+4(r30)
9bccf70c 912 la r6,vmmppcr6(r5) ; Point to second line
55e303ae 913 lwz r14,saver5+4(r30)
1c79356b 914
9bccf70c 915 dcbt 0,r6 ; Touch second line of context area
1c79356b 916
9bccf70c 917 lwz r15,vmmppcpc(r5) ; First line of context
1c79356b 918 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
9bccf70c 919 lwz r23,vmmppcmsr(r5)
d7e50217 920 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
9bccf70c
A
921 lwz r17,vmmppcr0(r5)
922 lwz r18,vmmppcr1(r5)
1c79356b 923 and r23,r23,r22 ; Keep only the controllable bits
9bccf70c 924 lwz r19,vmmppcr2(r5)
1c79356b 925 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
9bccf70c 926 lwz r20,vmmppcr3(r5)
1c79356b 927 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
9bccf70c
A
928 lwz r21,vmmppcr4(r5)
929 lwz r22,vmmppcr5(r5)
1c79356b 930
9bccf70c 931 dcbt 0,r6 ; Touch third line of context area
1c79356b 932
9bccf70c
A
933 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
934 stw r8,vmmppcmsr(r5)
935 stw r9,vmmppcr0(r5)
936 stw r10,vmmppcr1(r5)
937 stw r11,vmmppcr2(r5)
938 stw r12,vmmppcr3(r5)
939 stw r13,vmmppcr4(r5)
940 stw r14,vmmppcr5(r5)
1c79356b
A
941
942;
943; Save the first 3 parameters if we are an SC (we will take care of the last later)
944;
945 bne+ cr1,swapnotsc ; Skip next if not an SC exception...
946 stw r12,return_params+0(r5) ; Save the first return
947 stw r13,return_params+4(r5) ; Save the second return
948 stw r14,return_params+8(r5) ; Save the third return
949
55e303ae
A
950swapnotsc: li r6,0 ; Clear this out
951 stw r6,savesrr0(r30) ; Insure that high order is clear
952 stw r15,savesrr0+4(r30) ; Save vm context into the savearea
953 stw r6,savesrr1(r30) ; Insure that high order is clear
954 stw r23,savesrr1+4(r30)
955 stw r17,saver0+4(r30)
956 stw r18,saver1+4(r30)
957 stw r19,saver2+4(r30)
958 stw r20,saver3+4(r30)
959 stw r21,saver4+4(r30)
9bccf70c 960 la r6,vmmppcr14(r5) ; Point to fourth line
55e303ae 961 stw r22,saver5+4(r30)
9bccf70c
A
962
963 dcbt 0,r6 ; Touch fourth line
964
965; Swap 8 registers
966
55e303ae
A
967 lwz r7,saver6+4(r30) ; Read savearea
968 lwz r8,saver7+4(r30)
969 lwz r9,saver8+4(r30)
970 lwz r10,saver9+4(r30)
971 lwz r11,saver10+4(r30)
972 lwz r12,saver11+4(r30)
973 lwz r13,saver12+4(r30)
974 lwz r14,saver13+4(r30)
9bccf70c
A
975
976 lwz r15,vmmppcr6(r5) ; Read vm context
977 lwz r24,vmmppcr7(r5)
978 lwz r17,vmmppcr8(r5)
979 lwz r18,vmmppcr9(r5)
980 lwz r19,vmmppcr10(r5)
981 lwz r20,vmmppcr11(r5)
982 lwz r21,vmmppcr12(r5)
983 lwz r22,vmmppcr13(r5)
984
985 stw r7,vmmppcr6(r5) ; Write context
986 stw r8,vmmppcr7(r5)
987 stw r9,vmmppcr8(r5)
988 stw r10,vmmppcr9(r5)
989 stw r11,vmmppcr10(r5)
990 stw r12,vmmppcr11(r5)
991 stw r13,vmmppcr12(r5)
992 la r6,vmmppcr22(r5) ; Point to fifth line
993 stw r14,vmmppcr13(r5)
994
995 dcbt 0,r6 ; Touch fifth line
996
55e303ae
A
997 stw r15,saver6+4(r30) ; Write vm context
998 stw r24,saver7+4(r30)
999 stw r17,saver8+4(r30)
1000 stw r18,saver9+4(r30)
1001 stw r19,saver10+4(r30)
1002 stw r20,saver11+4(r30)
1003 stw r21,saver12+4(r30)
1004 stw r22,saver13+4(r30)
9bccf70c
A
1005
1006; Swap 8 registers
1007
55e303ae
A
1008 lwz r7,saver14+4(r30) ; Read savearea
1009 lwz r8,saver15+4(r30)
1010 lwz r9,saver16+4(r30)
1011 lwz r10,saver17+4(r30)
1012 lwz r11,saver18+4(r30)
1013 lwz r12,saver19+4(r30)
1014 lwz r13,saver20+4(r30)
1015 lwz r14,saver21+4(r30)
9bccf70c
A
1016
1017 lwz r15,vmmppcr14(r5) ; Read vm context
1018 lwz r24,vmmppcr15(r5)
1019 lwz r17,vmmppcr16(r5)
1020 lwz r18,vmmppcr17(r5)
1021 lwz r19,vmmppcr18(r5)
1022 lwz r20,vmmppcr19(r5)
1023 lwz r21,vmmppcr20(r5)
1024 lwz r22,vmmppcr21(r5)
1025
1026 stw r7,vmmppcr14(r5) ; Write context
1027 stw r8,vmmppcr15(r5)
1028 stw r9,vmmppcr16(r5)
1029 stw r10,vmmppcr17(r5)
1030 stw r11,vmmppcr18(r5)
1031 stw r12,vmmppcr19(r5)
1032 stw r13,vmmppcr20(r5)
1033 la r6,vmmppcr30(r5) ; Point to sixth line
1034 stw r14,vmmppcr21(r5)
1035
1036 dcbt 0,r6 ; Touch sixth line
1037
55e303ae
A
1038 stw r15,saver14+4(r30) ; Write vm context
1039 stw r24,saver15+4(r30)
1040 stw r17,saver16+4(r30)
1041 stw r18,saver17+4(r30)
1042 stw r19,saver18+4(r30)
1043 stw r20,saver19+4(r30)
1044 stw r21,saver20+4(r30)
1045 stw r22,saver21+4(r30)
9bccf70c
A
1046
1047; Swap 8 registers
1048
55e303ae
A
1049 lwz r7,saver22+4(r30) ; Read savearea
1050 lwz r8,saver23+4(r30)
1051 lwz r9,saver24+4(r30)
1052 lwz r10,saver25+4(r30)
1053 lwz r11,saver26+4(r30)
1054 lwz r12,saver27+4(r30)
1055 lwz r13,saver28+4(r30)
1056 lwz r14,saver29+4(r30)
9bccf70c
A
1057
1058 lwz r15,vmmppcr22(r5) ; Read vm context
1059 lwz r24,vmmppcr23(r5)
1060 lwz r17,vmmppcr24(r5)
1061 lwz r18,vmmppcr25(r5)
1062 lwz r19,vmmppcr26(r5)
1063 lwz r20,vmmppcr27(r5)
1064 lwz r21,vmmppcr28(r5)
1065 lwz r22,vmmppcr29(r5)
1066
1067 stw r7,vmmppcr22(r5) ; Write context
1068 stw r8,vmmppcr23(r5)
1069 stw r9,vmmppcr24(r5)
1070 stw r10,vmmppcr25(r5)
1071 stw r11,vmmppcr26(r5)
1072 stw r12,vmmppcr27(r5)
1073 stw r13,vmmppcr28(r5)
1074 la r6,vmmppcvscr(r5) ; Point to seventh line
1075 stw r14,vmmppcr29(r5)
1076
1077 dcbt 0,r6 ; Touch seventh line
1078
55e303ae
A
1079 stw r15,saver22+4(r30) ; Write vm context
1080 stw r24,saver23+4(r30)
1081 stw r17,saver24+4(r30)
1082 stw r18,saver25+4(r30)
1083 stw r19,saver26+4(r30)
1084 stw r20,saver27+4(r30)
1085 stw r21,saver28+4(r30)
1086 stw r22,saver29+4(r30)
9bccf70c
A
1087
1088; Swap 8 registers
1089
55e303ae
A
1090 lwz r7,saver30+4(r30) ; Read savearea
1091 lwz r8,saver31+4(r30)
9bccf70c 1092 lwz r9,savecr(r30)
55e303ae
A
1093 lwz r10,savexer+4(r30)
1094 lwz r11,savelr+4(r30)
1095 lwz r12,savectr+4(r30)
9bccf70c
A
1096 lwz r14,savevrsave(r30)
1097
1098 lwz r15,vmmppcr30(r5) ; Read vm context
1099 lwz r24,vmmppcr31(r5)
1100 lwz r17,vmmppccr(r5)
1101 lwz r18,vmmppcxer(r5)
1102 lwz r19,vmmppclr(r5)
1103 lwz r20,vmmppcctr(r5)
1104 lwz r22,vmmppcvrsave(r5)
1105
1106 stw r7,vmmppcr30(r5) ; Write context
1107 stw r8,vmmppcr31(r5)
1108 stw r9,vmmppccr(r5)
1109 stw r10,vmmppcxer(r5)
1110 stw r11,vmmppclr(r5)
1111 stw r12,vmmppcctr(r5)
1112 stw r14,vmmppcvrsave(r5)
1113
55e303ae
A
1114 stw r15,saver30+4(r30) ; Write vm context
1115 stw r24,saver31+4(r30)
9bccf70c 1116 stw r17,savecr(r30)
55e303ae
A
1117 stw r18,savexer+4(r30)
1118 stw r19,savelr+4(r30)
1119 stw r20,savectr+4(r30)
9bccf70c
A
1120 stw r22,savevrsave(r30)
1121
1122; Swap 8 registers
1123
1124 lwz r7,savevscr+0(r30) ; Read savearea
1125 lwz r8,savevscr+4(r30)
1126 lwz r9,savevscr+8(r30)
1127 lwz r10,savevscr+12(r30)
1128 lwz r11,savefpscrpad(r30)
1129 lwz r12,savefpscr(r30)
1130
1131 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1132 lwz r24,vmmppcvscr+4(r5)
1133 lwz r17,vmmppcvscr+8(r5)
1134 lwz r18,vmmppcvscr+12(r5)
1135 lwz r19,vmmppcfpscrpad(r5)
1136 lwz r20,vmmppcfpscr(r5)
1137
1138 stw r7,vmmppcvscr+0(r5) ; Write context
1139 stw r8,vmmppcvscr+4(r5)
1140 stw r9,vmmppcvscr+8(r5)
1141 stw r10,vmmppcvscr+12(r5)
1142 stw r11,vmmppcfpscrpad(r5)
1143 stw r12,vmmppcfpscr(r5)
1144
1145 stw r15,savevscr+0(r30) ; Write vm context
1146 stw r24,savevscr+4(r30)
1147 stw r17,savevscr+8(r30)
1148 stw r18,savevscr+12(r30)
1149 stw r19,savefpscrpad(r30)
1150 stw r20,savefpscr(r30)
1151
1c79356b
A
1152
1153;
1154; Cobble up the exception return code and save any specific return values
1155;
1156
1157 lwz r7,saveexception(r30) ; Pick up the exception code
1158 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1159 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1160 stw r8,return_code(r5) ; Save the exit code
1161 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1162 beq+ swapDSI ; Yeah...
1163 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1164 beq+ cr1,swapISI ; We had an ISI...
1165 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1166 beq+ swapDSI ; An alignment exception looks like a DSI...
1167 beq+ cr1,swapSC ; We had a system call...
1168
1169 blr ; Return...
1170
1171;
1172; Set exit returns for a DSI or alignment exception
1173;
1174
55e303ae 1175swapDSI: lwz r10,savedar+4(r30) ; Get the DAR
1c79356b
A
1176 lwz r7,savedsisr(r30) ; and the DSISR
1177 stw r10,return_params+0(r5) ; Save DAR as first return parm
1178 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1179 blr ; Return...
1180
1181;
1182; Set exit returns for a ISI
1183;
1184
9bccf70c
A
1185swapISI: lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1186 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1c79356b
A
1187 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1188 stw r10,return_params+0(r5) ; Save PC as first return parm
1189 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1190 blr ; Return...
1191
1192;
1193; Set exit returns for a system call (note: we did the first 3 earlier)
1194; Do we really need to pass parameters back here????
1195;
1196
9bccf70c 1197swapSC: lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1c79356b
A
1198 stw r10,return_params+12(r5) ; Save it
1199 blr ; Return...
1200
55e303ae
A
1201;
1202; Here is the swap for 64-bit machines
1203;
1204
1205swap64: lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
1206 ld r7,savesrr0(r30) ; Start moving context
1207 ld r8,savesrr1(r30)
1208 ld r9,saver0(r30)
1209 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
1210 ld r10,saver1(r30)
1211 ld r11,saver2(r30)
1212 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
1213 ld r12,saver3(r30)
1214 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
1215 ld r13,saver4(r30)
1216 la r6,vmmppcr6(r5) ; Point to second line
1217 ld r14,saver5(r30)
1218
1219 dcbt 0,r6 ; Touch second line of context area
1220
1221 bt vmmDoing64,sw64x1 ; Skip to 64-bit stuff
1222
1223 lwz r15,vmmppcpc(r5) ; First line of context
1224 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1225 lwz r23,vmmppcmsr(r5)
3a60a9f5 1226 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
55e303ae
A
1227 lwz r17,vmmppcr0(r5)
1228 lwz r18,vmmppcr1(r5)
1229 and r23,r23,r22 ; Keep only the controllable bits
1230 lwz r19,vmmppcr2(r5)
1231 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1232 lwz r20,vmmppcr3(r5)
1233 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1234 lwz r21,vmmppcr4(r5)
1235 lwz r22,vmmppcr5(r5)
1236
1237 dcbt 0,r6 ; Touch third line of context area
1238
1239 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
1240 stw r8,vmmppcmsr(r5)
1241 stw r9,vmmppcr0(r5)
1242 stw r10,vmmppcr1(r5)
1243 stw r11,vmmppcr2(r5)
1244 stw r12,vmmppcr3(r5)
1245 stw r13,vmmppcr4(r5)
1246 stw r14,vmmppcr5(r5)
1247
1248;
1249; Save the first 3 parameters if we are an SC (we will take care of the last later)
1250;
1251 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1252 stw r12,return_params+0(r5) ; Save the first return
1253 stw r13,return_params+4(r5) ; Save the second return
1254 stw r14,return_params+8(r5) ; Save the third return
1255 b sw64x1done ; We are done with this section...
1256
1257sw64x1: ld r15,vmmppcXpc(r5) ; First line of context
1258 li r0,1 ; Get a 1 to turn on 64-bit
1259 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user (we will also allow 64-bit here)
1260 sldi r0,r0,63 ; Get 64-bit bit
1261 ld r23,vmmppcXmsr(r5)
3a60a9f5 1262 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
55e303ae
A
1263 ld r17,vmmppcXr0(r5)
1264 or r22,r22,r0 ; Add the 64-bit bit
1265 ld r18,vmmppcXr1(r5)
1266 and r23,r23,r22 ; Keep only the controllable bits
1267 ld r19,vmmppcXr2(r5)
1268 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1269 ld r20,vmmppcXr3(r5)
1270 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1271 ld r21,vmmppcXr4(r5)
1272 ld r22,vmmppcXr5(r5)
1273
1274 dcbt 0,r6 ; Touch third line of context area
1275
1276 std r7,vmmppcXpc(r5) ; Save emulator context into the context area
1277 std r8,vmmppcXmsr(r5)
1278 std r9,vmmppcXr0(r5)
1279 std r10,vmmppcXr1(r5)
1280 std r11,vmmppcXr2(r5)
1281 std r12,vmmppcXr3(r5)
1282 std r13,vmmppcXr4(r5)
1283 std r14,vmmppcXr5(r5)
1284
1285;
1286; Save the first 3 parameters if we are an SC (we will take care of the last later)
1287;
1288 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1289 std r12,return_paramsX+0(r5) ; Save the first return
1290 std r13,return_paramsX+8(r5) ; Save the second return
1291 std r14,return_paramsX+16(r5) ; Save the third return
1292
1293sw64x1done:
1294 std r15,savesrr0(r30) ; Save vm context into the savearea
1295 std r23,savesrr1(r30)
1296 std r17,saver0(r30)
1297 std r18,saver1(r30)
1298 std r19,saver2(r30)
1299 std r20,saver3(r30)
1300 std r21,saver4(r30)
1301 la r6,vmmppcr14(r5) ; Point to fourth line
1302 std r22,saver5(r30)
1303
1304 dcbt 0,r6 ; Touch fourth line
1305
1306; Swap 8 registers
1307
1308 ld r7,saver6(r30) ; Read savearea
1309 ld r8,saver7(r30)
1310 ld r9,saver8(r30)
1311 ld r10,saver9(r30)
1312 ld r11,saver10(r30)
1313 ld r12,saver11(r30)
1314 ld r13,saver12(r30)
1315 ld r14,saver13(r30)
1316
1317 bt vmmDoing64,sw64x2 ; Skip to 64-bit stuff
1318
1319 lwz r15,vmmppcr6(r5) ; Read vm context
1320 lwz r24,vmmppcr7(r5)
1321 lwz r17,vmmppcr8(r5)
1322 lwz r18,vmmppcr9(r5)
1323 lwz r19,vmmppcr10(r5)
1324 lwz r20,vmmppcr11(r5)
1325 lwz r21,vmmppcr12(r5)
1326 lwz r22,vmmppcr13(r5)
1327
1328 stw r7,vmmppcr6(r5) ; Write context
1329 stw r8,vmmppcr7(r5)
1330 stw r9,vmmppcr8(r5)
1331 stw r10,vmmppcr9(r5)
1332 stw r11,vmmppcr10(r5)
1333 stw r12,vmmppcr11(r5)
1334 stw r13,vmmppcr12(r5)
1335 la r6,vmmppcr22(r5) ; Point to fifth line
1336 stw r14,vmmppcr13(r5)
1337
1338 dcbt 0,r6 ; Touch fifth line
1339 b sw64x2done ; We are done with this section...
1340
1341sw64x2: ld r15,vmmppcXr6(r5) ; Read vm context
1342 ld r24,vmmppcXr7(r5)
1343 ld r17,vmmppcXr8(r5)
1344 ld r18,vmmppcXr9(r5)
1345 ld r19,vmmppcXr10(r5)
1346 ld r20,vmmppcXr11(r5)
1347 ld r21,vmmppcXr12(r5)
1348 ld r22,vmmppcXr13(r5)
1349
1350 std r7,vmmppcXr6(r5) ; Write context
1351 std r8,vmmppcXr7(r5)
1352 std r9,vmmppcXr8(r5)
1353 std r10,vmmppcXr9(r5)
1354 std r11,vmmppcXr10(r5)
1355 std r12,vmmppcXr11(r5)
1356 std r13,vmmppcXr12(r5)
1357 la r6,vmmppcXr22(r5) ; Point to fifth line
1358 std r14,vmmppcXr13(r5)
1359
1360 dcbt 0,r6 ; Touch fifth line
1361
1362sw64x2done: std r15,saver6(r30) ; Write vm context
1363 std r24,saver7(r30)
1364 std r17,saver8(r30)
1365 std r18,saver9(r30)
1366 std r19,saver10(r30)
1367 std r20,saver11(r30)
1368 std r21,saver12(r30)
1369 std r22,saver13(r30)
1370
1371; Swap 8 registers
1372
1373 ld r7,saver14(r30) ; Read savearea
1374 ld r8,saver15(r30)
1375 ld r9,saver16(r30)
1376 ld r10,saver17(r30)
1377 ld r11,saver18(r30)
1378 ld r12,saver19(r30)
1379 ld r13,saver20(r30)
1380 ld r14,saver21(r30)
1381
1382 bt vmmDoing64,sw64x3 ; Skip to 64-bit stuff
1383
1384 lwz r15,vmmppcr14(r5) ; Read vm context
1385 lwz r24,vmmppcr15(r5)
1386 lwz r17,vmmppcr16(r5)
1387 lwz r18,vmmppcr17(r5)
1388 lwz r19,vmmppcr18(r5)
1389 lwz r20,vmmppcr19(r5)
1390 lwz r21,vmmppcr20(r5)
1391 lwz r22,vmmppcr21(r5)
1392
1393 stw r7,vmmppcr14(r5) ; Write context
1394 stw r8,vmmppcr15(r5)
1395 stw r9,vmmppcr16(r5)
1396 stw r10,vmmppcr17(r5)
1397 stw r11,vmmppcr18(r5)
1398 stw r12,vmmppcr19(r5)
1399 stw r13,vmmppcr20(r5)
1400 la r6,vmmppcr30(r5) ; Point to sixth line
1401 stw r14,vmmppcr21(r5)
1402
1403 dcbt 0,r6 ; Touch sixth line
1404 b sw64x3done ; Done with this section...
1405
1406sw64x3: ld r15,vmmppcXr14(r5) ; Read vm context
1407 ld r24,vmmppcXr15(r5)
1408 ld r17,vmmppcXr16(r5)
1409 ld r18,vmmppcXr17(r5)
1410 ld r19,vmmppcXr18(r5)
1411 ld r20,vmmppcXr19(r5)
1412 ld r21,vmmppcXr20(r5)
1413 ld r22,vmmppcXr21(r5)
1414
1415 std r7,vmmppcXr14(r5) ; Write context
1416 std r8,vmmppcXr15(r5)
1417 std r9,vmmppcXr16(r5)
1418 std r10,vmmppcXr17(r5)
1419 std r11,vmmppcXr18(r5)
1420 std r12,vmmppcXr19(r5)
1421 std r13,vmmppcXr20(r5)
1422 la r6,vmmppcXr30(r5) ; Point to sixth line
1423 std r14,vmmppcXr21(r5)
1424
1425 dcbt 0,r6 ; Touch sixth line
1426
1427sw64x3done: std r15,saver14(r30) ; Write vm context
1428 std r24,saver15(r30)
1429 std r17,saver16(r30)
1430 std r18,saver17(r30)
1431 std r19,saver18(r30)
1432 std r20,saver19(r30)
1433 std r21,saver20(r30)
1434 std r22,saver21(r30)
1435
1436; Swap 8 registers
1437
1438 ld r7,saver22(r30) ; Read savearea
1439 ld r8,saver23(r30)
1440 ld r9,saver24(r30)
1441 ld r10,saver25(r30)
1442 ld r11,saver26(r30)
1443 ld r12,saver27(r30)
1444 ld r13,saver28(r30)
1445 ld r14,saver29(r30)
1446
1447 bt vmmDoing64,sw64x4 ; Skip to 64-bit stuff
1448
1449 lwz r15,vmmppcr22(r5) ; Read vm context
1450 lwz r24,vmmppcr23(r5)
1451 lwz r17,vmmppcr24(r5)
1452 lwz r18,vmmppcr25(r5)
1453 lwz r19,vmmppcr26(r5)
1454 lwz r20,vmmppcr27(r5)
1455 lwz r21,vmmppcr28(r5)
1456 lwz r22,vmmppcr29(r5)
1457
1458 stw r7,vmmppcr22(r5) ; Write context
1459 stw r8,vmmppcr23(r5)
1460 stw r9,vmmppcr24(r5)
1461 stw r10,vmmppcr25(r5)
1462 stw r11,vmmppcr26(r5)
1463 stw r12,vmmppcr27(r5)
1464 stw r13,vmmppcr28(r5)
1465 la r6,vmmppcvscr(r5) ; Point to seventh line
1466 stw r14,vmmppcr29(r5)
1467 dcbt 0,r6 ; Touch seventh line
1468 b sw64x4done ; Done with this section...
1469
1470sw64x4: ld r15,vmmppcXr22(r5) ; Read vm context
1471 ld r24,vmmppcXr23(r5)
1472 ld r17,vmmppcXr24(r5)
1473 ld r18,vmmppcXr25(r5)
1474 ld r19,vmmppcXr26(r5)
1475 ld r20,vmmppcXr27(r5)
1476 ld r21,vmmppcXr28(r5)
1477 ld r22,vmmppcXr29(r5)
1478
1479 std r7,vmmppcXr22(r5) ; Write context
1480 std r8,vmmppcXr23(r5)
1481 std r9,vmmppcXr24(r5)
1482 std r10,vmmppcXr25(r5)
1483 std r11,vmmppcXr26(r5)
1484 std r12,vmmppcXr27(r5)
1485 std r13,vmmppcXr28(r5)
1486 la r6,vmmppcvscr(r5) ; Point to seventh line
1487 std r14,vmmppcXr29(r5)
1488
1489 dcbt 0,r6 ; Touch seventh line
1490
1491sw64x4done: std r15,saver22(r30) ; Write vm context
1492 std r24,saver23(r30)
1493 std r17,saver24(r30)
1494 std r18,saver25(r30)
1495 std r19,saver26(r30)
1496 std r20,saver27(r30)
1497 std r21,saver28(r30)
1498 std r22,saver29(r30)
1499
1500; Swap 8 registers
1501
1502 ld r7,saver30(r30) ; Read savearea
1503 ld r8,saver31(r30)
1504 lwz r9,savecr(r30)
1505 ld r10,savexer(r30)
1506 ld r11,savelr(r30)
1507 ld r12,savectr(r30)
1508 lwz r14,savevrsave(r30)
1509
1510 bt vmmDoing64,sw64x5 ; Skip to 64-bit stuff
1511
1512 lwz r15,vmmppcr30(r5) ; Read vm context
1513 lwz r24,vmmppcr31(r5)
1514 lwz r17,vmmppccr(r5)
1515 lwz r18,vmmppcxer(r5)
1516 lwz r19,vmmppclr(r5)
1517 lwz r20,vmmppcctr(r5)
1518 lwz r22,vmmppcvrsave(r5)
1519
1520 stw r7,vmmppcr30(r5) ; Write context
1521 stw r8,vmmppcr31(r5)
1522 stw r9,vmmppccr(r5)
1523 stw r10,vmmppcxer(r5)
1524 stw r11,vmmppclr(r5)
1525 stw r12,vmmppcctr(r5)
1526 stw r14,vmmppcvrsave(r5)
1527 b sw64x5done ; Done here...
1528
1529sw64x5: ld r15,vmmppcXr30(r5) ; Read vm context
1530 ld r24,vmmppcXr31(r5)
1531 lwz r17,vmmppcXcr(r5)
1532 ld r18,vmmppcXxer(r5)
1533 ld r19,vmmppcXlr(r5)
1534 ld r20,vmmppcXctr(r5)
1535 lwz r22,vmmppcXvrsave(r5)
1536
1537 std r7,vmmppcXr30(r5) ; Write context
1538 std r8,vmmppcXr31(r5)
1539 stw r9,vmmppcXcr(r5)
1540 std r10,vmmppcXxer(r5)
1541 std r11,vmmppcXlr(r5)
1542 std r12,vmmppcXctr(r5)
1543 stw r14,vmmppcXvrsave(r5)
1544
1545sw64x5done: std r15,saver30(r30) ; Write vm context
1546 std r24,saver31(r30)
1547 stw r17,savecr(r30)
1548 std r18,savexer(r30)
1549 std r19,savelr(r30)
1550 std r20,savectr(r30)
1551 stw r22,savevrsave(r30)
1552
1553; Swap 8 registers
1554
1555 lwz r7,savevscr+0(r30) ; Read savearea
1556 lwz r8,savevscr+4(r30)
1557 lwz r9,savevscr+8(r30)
1558 lwz r10,savevscr+12(r30)
1559 lwz r11,savefpscrpad(r30)
1560 lwz r12,savefpscr(r30)
1561
1562 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1563 lwz r24,vmmppcvscr+4(r5)
1564 lwz r17,vmmppcvscr+8(r5)
1565 lwz r18,vmmppcvscr+12(r5)
1566 lwz r19,vmmppcfpscrpad(r5)
1567 lwz r20,vmmppcfpscr(r5)
1568
1569 stw r7,vmmppcvscr+0(r5) ; Write context
1570 stw r8,vmmppcvscr+4(r5)
1571 stw r9,vmmppcvscr+8(r5)
1572 stw r10,vmmppcvscr+12(r5)
1573 stw r11,vmmppcfpscrpad(r5)
1574 stw r12,vmmppcfpscr(r5)
1575
1576 stw r15,savevscr+0(r30) ; Write vm context
1577 stw r24,savevscr+4(r30)
1578 stw r17,savevscr+8(r30)
1579 stw r18,savevscr+12(r30)
1580 stw r19,savefpscrpad(r30)
1581 stw r20,savefpscr(r30)
1582
1583
1584;
1585; Cobble up the exception return code and save any specific return values
1586;
1587
1588 lwz r7,saveexception(r30) ; Pick up the exception code
1589 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1590 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1591 stw r8,return_code(r5) ; Save the exit code
1592 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1593 beq+ swapDSI64 ; Yeah...
1594 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1595 beq+ cr1,swapISI64 ; We had an ISI...
1596 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1597 beq+ swapDSI64 ; An alignment exception looks like a DSI...
1598 beq+ cr1,swapSC64 ; We had a system call...
1599
1600 blr ; Return...
1601
1602;
1603; Set exit returns for a DSI or alignment exception
1604;
1605
1606swapDSI64: ld r10,savedar(r30) ; Get the DAR
1607 lwz r7,savedsisr(r30) ; and the DSISR
1608 bt vmmDoing64,sw64DSI ; Skip to 64-bit stuff...
1609
1610
1611 stw r10,return_params+0(r5) ; Save DAR as first return parm
1612 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1613 blr ; Return...
1614
1615sw64DSI: std r10,return_paramsX+0(r5) ; Save DAR as first return parm
1616 std r7,return_paramsX+8(r5) ; Save DSISR as second return parm (note that this is expanded to 64 bits)
1617 blr ; Return...
1618
1619;
1620; Set exit returns for a ISI
1621;
1622
1623swapISI64: bt vmmDoing64,sw64ISI ; Skip to 64-bit stuff...
1624 lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1625 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1626 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1627 stw r10,return_params+0(r5) ; Save PC as first return parm
1628 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1629 blr ; Return...
1630
1631sw64ISI: ld r7,vmmppcXmsr(r5) ; Get the SRR1 value
1632 ld r10,vmmppcXpc(r5) ; Get the PC as failing address
1633 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1634 std r10,return_paramsX+0(r5) ; Save PC as first return parm
1635 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
1636 blr ; Return...
1637
1638;
1639; Set exit returns for a system call (note: we did the first 3 earlier)
1640; Do we really need to pass parameters back here????
1641;
1642
1643swapSC64: bt vmmDoing64,sw64SC ; Skip to 64-bit stuff...
1644 lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1645 stw r10,return_params+12(r5) ; Save it
1646 blr ; Return...
1647
1648sw64SC: ld r10,vmmppcXr6(r5) ; Get the fourth paramter
1649 std r10,return_paramsX+24(r5) ; Save it
1650 blr ; Return...
1651
d7e50217
A
1652;
1653; vmmFamGuestResume:
1654; Restore Guest context from Fam mode.
1655;
1656
1657vmmFamGuestResume:
91447636
A
1658 mfsprg r10,1 ; Get the current activation
1659 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
d7e50217 1660 lwz r27,vmmCEntry(r3) ; Get the context that is active
55e303ae
A
1661 lwz r4,VMMXAFlgs(r10) ; Get the eXtended Architecture flags
1662 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
d7e50217
A
1663 lwz r15,spcFlags(r10) ; Get per_proc special flags
1664 mr r26,r3 ; Save the activation pointer
1665 lwz r20,vmmContextKern(r27) ; Get the comm area
1666 rlwinm r15,r15,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
1667 stw r15,spcFlags(r10) ; Update the special flags
55e303ae 1668 bne fgrX
d7e50217 1669 lwz r7,famguestpc(r20) ; Load famguest ctx pc
55e303ae 1670 bf++ vmmMapDone,fgrNoMap ; No mapping done for this space.
d7e50217 1671 lwz r3,SAVflags(r30) ; Pick up the savearea flags
55e303ae
A
1672 lwz r2,vmmLastMap(r28) ; Get the last mapped address
1673 lwz r6,vmmLastMap+4(r28) ; Get the last mapped address
d7e50217
A
1674 li r4,T_DATA_ACCESS ; Change to DSI fault
1675 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1676 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
55e303ae 1677 stw r6,savedar+4(r30) ; Set the DAR to the last thing we mapped
d7e50217
A
1678 stw r3,SAVflags(r30) ; Turn on the redrive request
1679 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1680 stw r4,saveexception(r30) ; Say we need to emulate a DSI
55e303ae 1681 li r0,0 ; Clear
d7e50217 1682 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
55e303ae
A
1683 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1684fgrNoMap:
1685 lwz r4,savesrr1+4(r30) ; Get the saved MSR value
1686 stw r7,savesrr0+4(r30) ; Set savearea pc
d7e50217
A
1687 lwz r5,famguestmsr(r20) ; Load famguest ctx msr
1688 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1689 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1690 and r5,r5,r6 ; Keep only the controllable bits
1691 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1692 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1693 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1694 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
55e303ae 1695 stw r5,savesrr1+4(r30) ; Set savearea srr1
d7e50217
A
1696 lwz r4,famguestr0(r20) ; Load famguest ctx r0
1697 lwz r5,famguestr1(r20) ; Load famguest ctx r1
1698 lwz r6,famguestr2(r20) ; Load famguest ctx r2
1699 lwz r7,famguestr3(r20) ; Load famguest ctx r3
55e303ae
A
1700 stw r4,saver0+4(r30) ; Set savearea r0
1701 stw r5,saver1+4(r30) ; Set savearea r1
1702 stw r6,saver2+4(r30) ; Set savearea r2
1703 stw r7,saver3+4(r30) ; Set savearea r3
d7e50217
A
1704 lwz r4,famguestr4(r20) ; Load famguest ctx r4
1705 lwz r5,famguestr5(r20) ; Load famguest ctx r5
1706 lwz r6,famguestr6(r20) ; Load famguest ctx r6
1707 lwz r7,famguestr7(r20) ; Load famguest ctx r7
55e303ae
A
1708 stw r4,saver4+4(r30) ; Set savearea r4
1709 stw r5,saver5+4(r30) ; Set savearea r5
1710 stw r6,saver6+4(r30) ; Set savearea r6
1711 stw r7,saver7+4(r30) ; Set savearea r7
1712 b fgrret
1713fgrX:
1714 ld r7,famguestXpc(r20) ; Load famguest ctx pc
1715 bf++ vmmMapDone,fgrXNoMap ; No mapping done for this space.
1716 lwz r3,SAVflags(r30) ; Pick up the savearea flags
1717 ld r2,vmmLastMap(r28) ; Get the last mapped address
1718 li r4,T_DATA_ACCESS ; Change to DSI fault
1719 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1720 std r2,savedar(r30) ; Set the DAR to the last thing we mapped
1721 stw r3,SAVflags(r30) ; Turn on the redrive request
1722 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1723 stw r4,saveexception(r30) ; Say we need to emulate a DSI
1724 li r0,0 ; Clear
1725 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
1726 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1727fgrXNoMap:
1728 ld r4,savesrr1(r30) ; Get the saved MSR value
1729 std r7,savesrr0(r30) ; Set savearea pc
1730 ld r5,famguestXmsr(r20) ; Load famguest ctx msr
1731 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1732 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1733 and r5,r5,r6 ; Keep only the controllable bits
1734 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1735 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1736 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1737 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1738 std r5,savesrr1(r30) ; Set savearea srr1
1739 ld r4,famguestXr0(r20) ; Load famguest ctx r0
1740 ld r5,famguestXr1(r20) ; Load famguest ctx r1
1741 ld r6,famguestXr2(r20) ; Load famguest ctx r2
1742 ld r7,famguestXr3(r20) ; Load famguest ctx r3
1743 std r4,saver0(r30) ; Set savearea r0
1744 std r5,saver1(r30) ; Set savearea r1
1745 std r6,saver2(r30) ; Set savearea r2
1746 std r7,saver3(r30) ; Set savearea r3
1747 ld r4,famguestXr4(r20) ; Load famguest ctx r4
1748 ld r5,famguestXr5(r20) ; Load famguest ctx r5
1749 ld r6,famguestXr6(r20) ; Load famguest ctx r6
1750 ld r7,famguestXr7(r20) ; Load famguest ctx r7
1751 std r4,saver4(r30) ; Set savearea r4
1752 std r5,saver5(r30) ; Set savearea r5
1753 std r6,saver6(r30) ; Set savearea r6
1754 std r7,saver7(r30) ; Set savearea r7
1755fgrret:
d7e50217 1756 li r3,1 ; Show normal exit with check for AST
91447636 1757 mr r16,r26 ; Restore the thread pointer
d7e50217
A
1758 b EXT(ppcscret) ; Go back to handler...
1759
1760;
55e303ae 1761; FAM Intercept exception handler
d7e50217
A
1762;
1763
1764 .align 5
55e303ae
A
1765 .globl EXT(vmm_fam_exc)
1766
1767LEXT(vmm_fam_exc)
1768 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
1769 lwz r1,pfAvailable(r2) ; Get the CPU features flags
1770 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1771 bne fexcX
1772 lwz r4,saver4+4(r13) ; Load savearea r4
d7e50217
A
1773 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1774 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
55e303ae 1775 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
d7e50217 1776 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
55e303ae
A
1777 bt++ pf64Bitb,fexcVMareaPhys64 ; Go do this on a 64-bit machine...
1778 slwi r3,r3,12 ; Change ppnum to physical address
1779 b fexcVMareaPhysres
1780fexcVMareaPhys64:
1781 mtxer r5 ; Restore xer
1782 lwz r5,saver5+4(r13) ; Load savearea r5
1783 lwz r6,saver6+4(r13) ; Load savearea r6
1784 sldi r3,r3,12 ; Change ppnum to physical address
1785fexcVMareaPhysres:
d7e50217
A
1786 stw r4,famguestr4(r3) ; Save r4 in famguest ctx
1787 stw r5,famguestr5(r3) ; Save r5 in famguest ctx
1788 stw r6,famguestr6(r3) ; Save r6 in famguest ctx
1789 stw r7,famguestr7(r3) ; Save r7 in famguest ctx
55e303ae
A
1790 lwz r4,saver0+4(r13) ; Load savearea r0
1791 lwz r5,saver1+4(r13) ; Load savearea r1
1792 lwz r6,saver2+4(r13) ; Load savearea r2
1793 lwz r7,saver3+4(r13) ; Load savearea r3
d7e50217
A
1794 stw r4,famguestr0(r3) ; Save r0 in famguest ctx
1795 stw r5,famguestr1(r3) ; Save r1 in famguest ctx
1796 stw r6,famguestr2(r3) ; Save r2 in famguest ctx
1797 stw r7,famguestr3(r3) ; Save r3 in famguest ctx
1798 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1799 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1800 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1801 mfsrr0 r2 ; Get the interrupt srr0
1802 mfsrr1 r4 ; Get the interrupt srr1
1803 stw r2,famguestpc(r3) ; Save srr0 in famguest ctx
1804 stw r4,famguestmsr(r3) ; Save srr1 in famguest ctx
1805 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1806 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1807 mtsrr1 r6 ; Set srr1
1808 mr r6,r3 ; Set r6 with phys state page addr
1809 rlwinm r7,r11,30,24,31 ; Convert exception to return code
55e303ae
A
1810 beq+ cr1,fexcPRG ; We had a program exception...
1811 bne+ fexcret
d7e50217
A
1812 ; We had an Alignment...
1813 mfdar r3 ; Load dar
1814 mfdsisr r4 ; Load dsisr
1815 stw r3,famparam+0x4(r6) ; Set famparam 1 with dar
1816 stw r4,famparam+0x8(r6) ; Set famparam 2 with dsir
55e303ae
A
1817 b fexcret ;
1818fexcPRG:
d7e50217
A
1819 stw r4,famparam+0x4(r6) ; Set famparam 1 with srr1
1820 mr r3,r4 ; Set r3 with dsisr
1821 lwz r4,famguestr4(r6) ; Load r4 from famguest context
55e303ae 1822fexcret:
d7e50217
A
1823 lwz r5,famguestr5(r6) ; Load r5 from famguest context
1824 lwz r13,famhandler(r6) ; Load user address to resume
1825 stw r2,famparam(r6) ; Set famparam 0 with srr0
1826 stw r7,famdispcode(r6) ; Save the exit code
1827 lwz r1,famrefcon(r6) ; load refcon
55e303ae 1828 bt++ pf64Bitb,fexcrfi64 ; Go do this on a 64-bit machine...
d7e50217
A
1829 mtcr r0 ; Restore cr
1830 mtsrr0 r13 ; Load srr0
1831 mr r0,r7 ; Set dispatch code
1832 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1833 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1834 mfsprg r13,2 ; Restore r13
1835 mfsprg r11,3 ; Restore r11
1836 rfi
55e303ae
A
1837fexcrfi64:
1838 mtcr r0 ; Restore cr
1839 mtsrr0 r13 ; Load srr0
1840 mr r0,r7 ; Set dispatch code
1841 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1842 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1843 mfsprg r13,2 ; Restore r13
1844 mfsprg r11,3 ; Restore r11
1845 rfid
1846fexcX:
1847 mtxer r5 ; Restore xer
1848 ld r4,saver4(r13) ; Load savearea r4
1849 ld r5,saver5(r13) ; Load savearea r5
1850 ld r6,saver6(r13) ; Load savearea r6
1851 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1852 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1853 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
1854 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
1855 sldi r3,r3,12 ; Change ppnum to physical address
1856 std r4,famguestXr4(r3) ; Save r4 in famguest ctx
1857 std r5,famguestXr5(r3) ; Save r5 in famguest ctx
1858 std r6,famguestXr6(r3) ; Save r6 in famguest ctx
1859 std r7,famguestXr7(r3) ; Save r7 in famguest ctx
1860 ld r4,saver0(r13) ; Load savearea r0
1861 ld r5,saver1(r13) ; Load savearea r1
1862 ld r6,saver2(r13) ; Load savearea r2
1863 ld r7,saver3(r13) ; Load savearea r3
1864 std r4,famguestXr0(r3) ; Save r0 in famguest ctx
1865 std r5,famguestXr1(r3) ; Save r1 in famguest ctx
1866 std r6,famguestXr2(r3) ; Save r2 in famguest ctx
1867 std r7,famguestXr3(r3) ; Save r3 in famguest ctx
1868 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1869 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1870 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1871 mfsrr0 r2 ; Get the interrupt srr0
1872 mfsrr1 r4 ; Get the interrupt srr1
1873 std r2,famguestXpc(r3) ; Save srr0 in famguest ctx
1874 std r4,famguestXmsr(r3) ; Save srr1 in famguest ctx
1875 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1876 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1877 mtsrr1 r6 ; Set srr1
1878 mr r6,r3 ; Set r6 with phys state page addr
1879 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1880 beq+ cr1,fexcXPRG ; We had a program exception...
1881 bne+ fexcXret
1882 ; We had an Alignment...
1883 mfdar r3 ; Load dar
1884 mfdsisr r4 ; Load dsisr
1885 std r3,famparamX+0x8(r6) ; Set famparam 1 with dar
1886 std r4,famparamX+0x10(r6) ; Set famparam 2 with dsir
1887 b fexcXret
1888fexcXPRG:
1889 std r4,famparamX+0x8(r6) ; Set famparam 1 with srr1
1890 mr r3,r4 ; Set r3 with dsisr
1891 ld r4,famguestXr4(r6) ; Load r4 from famguest context
1892fexcXret:
1893 ld r5,famguestXr5(r6) ; Load r5 from famguest context
1894 ld r13,famhandlerX(r6) ; Load user address to resume
1895 std r2,famparamX(r6) ; Set famparam 0 with srr0
1896 std r7,famdispcodeX(r6) ; Save the exit code
1897 ld r1,famrefconX(r6) ; load refcon
1898 mtcr r0 ; Restore cr
1899 mtsrr0 r13 ; Load srr0
1900 mr r0,r7 ; Set dispatch code
1901 ld r7,famguestXr7(r6) ; Load r7 from famguest context
1902 ld r6,famguestXr6(r6) ; Load r6 from famguest context
1903 mfsprg r13,2 ; Restore r13
1904 mfsprg r11,3 ; Restore r11
1905 rfid
d7e50217
A
1906
1907;
1908; FAM Intercept DSI ISI fault handler
1909;
1910
1911 .align 5
55e303ae 1912 .globl EXT(vmm_fam_pf)
d7e50217 1913
55e303ae
A
1914LEXT(vmm_fam_pf)
1915 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
d7e50217 1916 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
55e303ae
A
1917 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1918 bne fpfX
1919 lwz r4,saver0+4(r13) ; Load savearea r0
1920 lwz r5,saver1+4(r13) ; Load savearea r1
1921 lwz r6,saver2+4(r13) ; Load savearea r2
1922 lwz r7,saver3+4(r13) ; Load savearea r3
1923 bt++ pf64Bitb,fpfVMareaPhys64 ; Go do this on a 64-bit machine...
1924 slwi r3,r3,12 ; Change ppnum to physical address
1925 b fpfVMareaPhysret
1926fpfVMareaPhys64:
1927 sldi r3,r3,12 ; Change ppnum to physical address
1928fpfVMareaPhysret:
d7e50217
A
1929 stw r4,famguestr0(r3) ; Save r0 in famguest
1930 stw r5,famguestr1(r3) ; Save r1 in famguest
1931 stw r6,famguestr2(r3) ; Save r2 in famguest
1932 stw r7,famguestr3(r3) ; Save r3 in famguest
55e303ae
A
1933 lwz r4,saver4+4(r13) ; Load savearea r0
1934 lwz r5,saver5+4(r13) ; Load savearea r1
1935 lwz r6,saver6+4(r13) ; Load savearea r2
1936 lwz r7,saver7+4(r13) ; Load savearea r3
d7e50217
A
1937 stw r4,famguestr4(r3) ; Save r4 in famguest
1938 lwz r4,spcFlags(r2) ; Load spcFlags
1939 stw r5,famguestr5(r3) ; Save r5 in famguest
55e303ae 1940 lwz r5,savesrr0+4(r13) ; Get the interrupt srr0
d7e50217 1941 stw r6,famguestr6(r3) ; Save r6 in famguest
55e303ae 1942 lwz r6,savesrr1+4(r13) ; Load srr1
d7e50217
A
1943 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1944 stw r7,famguestr7(r3) ; Save r7 in famguest
1945 stw r4,spcFlags(r2) ; Update spcFlags
1946 lwz r1,famrefcon(r3) ; Load refcon
1947 lwz r2,famhandler(r3) ; Load famhandler to resume
1948 stw r5,famguestpc(r3) ; Save srr0
55e303ae 1949 stw r5,saver2+4(r13) ; Store srr0 in savearea r2
d7e50217
A
1950 stw r5,famparam(r3) ; Store srr0 in fam param 0
1951 stw r6,famguestmsr(r3) ; Save srr1 in famguestmsr
1952 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
1953 rlwinm r7,r11,30,24,31 ; Convert exception to return code
55e303ae
A
1954 beq+ cr1,fpfISI ; We had an ISI...
1955; fpfDSI
1956 lwz r6,savedar+4(r13) ; Load dar from savearea
d7e50217
A
1957 lwz r4,savedsisr(r13) ; Load dsisr from savearea
1958 stw r6,famparam+0x4(r3) ; Store dar in fam param 1
55e303ae 1959 stw r6,saver3+4(r13) ; Store dar in savearea r3
d7e50217 1960 stw r4,famparam+0x8(r3) ; Store dsisr in fam param 2
55e303ae
A
1961 stw r4,saver4+4(r13) ; Store dsisr in savearea r4
1962 b fpfret
1963fpfISI:
d7e50217
A
1964 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
1965 stw r6,famparam+0x4(r3) ; Store srr1 in fam param 1
55e303ae
A
1966 stw r6,saver3+4(r13) ; Store srr1 in savearea r3
1967fpfret:
1968 stw r7,saver0+4(r13) ; Set dispatch code
d7e50217 1969 stw r7,famdispcode(r3) ; Set dispatch code
55e303ae
A
1970 stw r1,saver1+4(r13) ; Store refcon in savearea r1
1971 stw r2,savesrr0+4(r13) ; Store famhandler in srr0
1972 blr
1973fpfX:
1974 ld r4,saver0(r13) ; Load savearea r0
1975 ld r5,saver1(r13) ; Load savearea r1
1976 ld r6,saver2(r13) ; Load savearea r2
1977 ld r7,saver3(r13) ; Load savearea r3
1978 sldi r3,r3,12 ; Change ppnum to physical address
1979 std r4,famguestXr0(r3) ; Save r0 in famguest
1980 std r5,famguestXr1(r3) ; Save r1 in famguest
1981 std r6,famguestXr2(r3) ; Save r2 in famguest
1982 std r7,famguestXr3(r3) ; Save r3 in famguest
1983 ld r4,saver4(r13) ; Load savearea r0
1984 ld r5,saver5(r13) ; Load savearea r1
1985 ld r6,saver6(r13) ; Load savearea r2
1986 ld r7,saver7(r13) ; Load savearea r3
1987 std r4,famguestXr4(r3) ; Save r4 in famguest
1988 lwz r4,spcFlags(r2) ; Load spcFlags
1989 std r5,famguestXr5(r3) ; Save r5 in famguest
1990 ld r5,savesrr0(r13) ; Get the interrupt srr0
1991 std r6,famguestXr6(r3) ; Save r6 in famguest
1992 ld r6,savesrr1(r13) ; Load srr1
1993 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1994 std r7,famguestXr7(r3) ; Save r7 in famguest
1995 stw r4,spcFlags(r2) ; Update spcFlags
1996 ld r1,famrefconX(r3) ; Load refcon
1997 ld r2,famhandlerX(r3) ; Load famhandler to resume
1998 std r5,famguestXpc(r3) ; Save srr0
1999 std r5,saver2(r13) ; Store srr0 in savearea r2
2000 std r5,famparamX(r3) ; Store srr0 in fam param 0
2001 std r6,famguestXmsr(r3) ; Save srr1 in famguestmsr
2002 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
2003 rlwinm r7,r11,30,24,31 ; Convert exception to return code
2004 beq+ cr1,fpfXISI ; We had an ISI...
2005; fpfXDSI
2006 ld r6,savedar(r13) ; Load dar from savearea
2007 lwz r4,savedsisr(r13) ; Load dsisr from savearea
2008 std r6,famparamX+0x8(r3) ; Store dar in fam param 1
2009 std r6,saver3(r13) ; Store dar in savearea r3
2010 std r4,famparamX+0x10(r3) ; Store dsisr in fam param 2
2011 std r4,saver4(r13) ; Store dsisr in savearea r4
2012 b fpfXret
2013fpfXISI:
2014 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
2015 std r6,famparamX+0x8(r3) ; Store srr1 in fam param 1
2016 std r6,saver3(r13) ; Store srr1 in savearea r3
2017fpfXret:
2018 std r7,saver0(r13) ; Set dispatch code
2019 std r7,famdispcodeX(r3) ; Set dispatch code
2020 std r1,saver1(r13) ; Store refcon in savearea r1
2021 std r2,savesrr0(r13) ; Store famhandler in srr0
d7e50217
A
2022 blr
2023
91447636
A
2024/*
2025 * Ultra Fast Path FAM syscalls
2026 *
2027 * The UFT FAMs are those from kvmmResumeGuest to kvmmSetGuestRegister, inclusive.
2028 * We get here directly from the syscall vector, with interrupts and translation off,
2029 * 64-bit mode on if supported, and all registers live except:
2030 *
2031 * r13 = holds caller's cr
2032 * sprg2 = holds caller's r13
2033 * sprg3 = holds caller's r11
2034 * cr2 = set on (r3==kvmmSetGuestRegister)
2035 * cr5 = set on (r3==kvmmResumeGuest)
2036 */
d7e50217
A
2037
2038 .align 5
2039 .globl EXT(vmm_ufp)
2040
2041LEXT(vmm_ufp)
2042 mfsprg r3,0 ; Get the per_proc area
91447636 2043 mr r11,r13 ; Move saved cr to r11
55e303ae 2044 lwz r13,VMMXAFlgs(r3) ; Get the eXtended Architecture flags
91447636
A
2045 rlwinm. r13,r13,0,0,0 ; Are we doing a 64-bit virtual machine?
2046
55e303ae
A
2047 lwz r13,pfAvailable(r3) ; Get feature flags
2048 mtcrf 0x02,r13 ; Put pf64Bitb etc in cr6
2049 lwz r13,VMMareaPhys(r3) ; Load fast assist area
2050 bt++ pf64Bitb,ufpVMareaPhys64 ; Go do this on a 64-bit machine...
2051 slwi r13,r13,12 ; Change ppnum to physical address
2052 b ufpVMareaPhysret
2053ufpVMareaPhys64:
2054 sldi r13,r13,12 ; Change ppnum to physical address
2055ufpVMareaPhysret:
91447636
A
2056 bne ufpX ; go handle a 64-bit virtual machine
2057
55e303ae 2058 bt cr5_eq,ufpResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
91447636
A
2059 cmplwi cr5,r4,7 ; First argument in range? (ie, 0-7)
2060 bgt cr5,ufpVMret ; Return if not in the range
d7e50217 2061 slwi r4,r4,2 ; multiply index by 4
55e303ae
A
2062 la r3,famguestr0(r13) ; Load the base address
2063 bt cr2_eq,ufpSetGuestReg ; Set/get selector
2064; ufpGetGuestReg
d7e50217
A
2065 lwzx r3,r4,r3 ; Load the guest register
2066 b ufpVMret ; Return
55e303ae 2067ufpSetGuestReg:
d7e50217
A
2068 stwx r5,r4,r3 ; Update the guest register
2069 li r3,0 ; Set return value
2070 b ufpVMret ; Return
55e303ae 2071ufpResumeGuest:
d7e50217
A
2072 lwz r7,spcFlags(r3) ; Pick up the special flags
2073 mtsrr0 r4 ; Set srr0
2074 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2075 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
55e303ae
A
2076 stw r7,spcFlags(r3) ; Update the special flags
2077 mfsrr1 r6 ; Get the current MSR value
2078
2079 lwz r4,famguestmsr(r13) ; Load guest srr1
2080 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2081 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2082 and r4,r4,r1 ; Keep only the controllable bits
2083 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2084 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2085 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2086 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2087 beq ufpnokey ; Branch if not key switch
d7e50217
A
2088 mr r2,r7 ; Save r7
2089 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2090 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
55e303ae
A
2091 beq ufpnokey ; No, go to ResumeGuest_nokey
2092 mr r5,r3 ; Get the per_proc area
2093 stw r7,spcFlags(r3) ; Update the special flags
2094
2095 bt++ pf64Bitb,ufpsave64 ; Go do this on a 64-bit machine...
2096
2097 lwz r3,next_savearea+4(r5) ; Get the exception save area
2098 stw r8,saver8+4(r3) ; Save r8
2099 stw r9,saver9+4(r3) ; Save r9
2100 stw r10,saver10+4(r3) ; Save r10
2101 stw r11,saver11+4(r3) ; Save r11
2102 stw r12,saver12+4(r3) ; Save r12
2103 stw r13,saver13+4(r3) ; Save r12
2104 stw r14,saver14+4(r3) ; Save r14
2105 stw r15,saver15+4(r3) ; Save r15
2106 stw r16,saver16+4(r3) ; Save r16
2107 stw r17,saver17+4(r3) ; Save r17
2108 stw r18,saver18+4(r3) ; Save r18
2109 stw r19,saver19+4(r3) ; Save r19
2110 stw r20,saver20+4(r3) ; Save r20
2111 stw r21,saver21+4(r3) ; Save r21
2112 stw r22,saver22+4(r3) ; Save r22
2113 stw r23,saver23+4(r3) ; Save r23
2114 stw r24,saver24+4(r3) ; Save r24
2115 stw r25,saver25+4(r3) ; Save r25
2116 stw r26,saver26+4(r3) ; Save r26
2117 stw r27,saver27+4(r3) ; Save r27
2118 stw r28,saver28+4(r3) ; Save r28
2119 stw r29,saver29+4(r3) ; Save r29
2120 stw r30,saver30+4(r3) ; Save r30
2121 stw r31,saver31+4(r3) ; Save r31
2122 b ufpsaveres ; Continue
2123
2124ufpsave64:
2125 ld r3,next_savearea(r5) ; Get the exception save area
2126 std r8,saver8(r3) ; Save r8
2127 std r9,saver9(r3) ; Save r9
2128 std r10,saver10(r3) ; Save r10
2129 std r11,saver11(r3) ; Save r11
2130 std r12,saver12(r3) ; Save r12
2131 std r13,saver13(r3) ; Save r12
2132 std r14,saver14(r3) ; Save r14
2133 std r15,saver15(r3) ; Save r15
2134 std r16,saver16(r3) ; Save r16
2135 std r17,saver17(r3) ; Save r17
2136 std r18,saver18(r3) ; Save r18
2137 std r19,saver19(r3) ; Save r19
2138 std r20,saver20(r3) ; Save r20
2139 std r21,saver21(r3) ; Save r21
2140 std r22,saver22(r3) ; Save r22
2141 std r23,saver23(r3) ; Save r23
2142 std r24,saver24(r3) ; Save r24
2143 std r25,saver25(r3) ; Save r25
2144 std r26,saver26(r3) ; Save r26
2145 std r27,saver27(r3) ; Save r27
2146 std r28,saver28(r3) ; Save r28
2147 std r29,saver29(r3) ; Save r29
2148 mfxer r2 ; Get xer
2149 std r30,saver30(r3) ; Save r30
2150 std r31,saver31(r3) ; Save r31
2151 std r2,savexer(r3) ; Save xer
2152
2153ufpsaveres:
2154 mflr r20 ; Get lr
2155 li r2,1 ; Set to 1
2156 stw r7,spcFlags(r5) ; Update the special flags
2157 mr r13,r3 ; Set current savearea
2158 mr r21,r4 ; Save r4
2159 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2160 mr r29,r5 ; Get the per_proc area
2161 mr r3,r4 ; Set MSR value we going to
2162 bl EXT(switchSegs) ; Go handle the segment registers/STB
2163 mr r3,r13 ; Set current savearea
2164 mr r4,r21 ; Restore r4
2165 mtlr r20 ; Set lr
2166
2167 bt++ pf64Bitb,ufprestore64 ; Go do this on a 64-bit machine...
2168 lwz r8,saver8+4(r3) ; Load r8
2169 lwz r9,saver9+4(r3) ; Load r9
2170 lwz r10,saver10+4(r3) ; Load r10
2171 lwz r11,saver11+4(r3) ; Load r11
2172 lwz r12,saver12+4(r3) ; Load r12
2173 lwz r13,saver13+4(r3) ; Load r12
2174 lwz r14,saver14+4(r3) ; Load r14
2175 lwz r15,saver15+4(r3) ; Load r15
2176 lwz r16,saver16+4(r3) ; Load r16
2177 lwz r17,saver17+4(r3) ; Load r17
2178 lwz r18,saver18+4(r3) ; Load r18
2179 lwz r19,saver19+4(r3) ; Load r19
2180 lwz r20,saver20+4(r3) ; Load r20
2181 lwz r21,saver21+4(r3) ; Load r21
2182 lwz r22,saver22+4(r3) ; Load r22
2183 lwz r23,saver23+4(r3) ; Load r23
2184 lwz r24,saver24+4(r3) ; Load r24
2185 lwz r25,saver25+4(r3) ; Load r25
2186 lwz r26,saver26+4(r3) ; Load r26
2187 lwz r27,saver27+4(r3) ; Load r27
2188 lwz r28,saver28+4(r3) ; Load r28
2189 lwz r29,saver29+4(r3) ; Load r29
2190 lwz r30,saver30+4(r3) ; Load r30
2191 lwz r31,saver31+4(r3) ; Load r31
2192 b ufpnokey ; Continue
2193ufprestore64:
2194 ld r2,savexer(r3) ; Load xer
2195 ld r8,saver8(r3) ; Load r8
2196 ld r9,saver9(r3) ; Load r9
2197 ld r10,saver10(r3) ; Load r10
2198 mtxer r2 ; Restore xer
2199 ld r11,saver11(r3) ; Load r11
2200 ld r12,saver12(r3) ; Load r12
2201 ld r13,saver13(r3) ; Load r12
2202 ld r14,saver14(r3) ; Load r14
2203 ld r15,saver15(r3) ; Load r15
2204 ld r16,saver16(r3) ; Load r16
2205 ld r17,saver17(r3) ; Load r17
2206 ld r18,saver18(r3) ; Load r18
2207 ld r19,saver19(r3) ; Load r19
2208 ld r20,saver20(r3) ; Load r20
2209 ld r21,saver21(r3) ; Load r21
2210 ld r22,saver22(r3) ; Load r22
2211 ld r23,saver23(r3) ; Load r23
2212 ld r24,saver24(r3) ; Load r24
2213 ld r25,saver25(r3) ; Load r25
2214 ld r26,saver26(r3) ; Load r26
2215 ld r27,saver27(r3) ; Load r27
2216 ld r28,saver28(r3) ; Load r28
2217 ld r29,saver29(r3) ; Load r29
2218 ld r30,saver30(r3) ; Load r30
2219 ld r31,saver31(r3) ; Load r31
2220ufpnokey:
2221 mfsprg r3,0 ; Get the per_proc area
2222 mtsrr1 r4 ; Set srr1
d7e50217
A
2223 lwz r0,famguestr0(r13) ; Load r0
2224 lwz r1,famguestr1(r13) ; Load r1
2225 lwz r2,famguestr2(r13) ; Load r2
2226 lwz r3,famguestr3(r13) ; Load r3
2227 lwz r4,famguestr4(r13) ; Load r4
2228 lwz r5,famguestr5(r13) ; Load r5
2229 lwz r6,famguestr6(r13) ; Load r6
2230 lwz r7,famguestr7(r13) ; Load r7
2231ufpVMret:
55e303ae
A
2232 mfsprg r13,2 ; Restore R13
2233 bt++ pf64Bitb,ufpVMrfi64 ; Go do this on a 64-bit machine...
d7e50217
A
2234 mtcrf 0xFF,r11 ; Restore CR
2235 mfsprg r11,3 ; Restore R11
de355530 2236 rfi ; All done, go back...
55e303ae
A
2237ufpVMrfi64:
2238 mtcrf 0xFF,r11 ; Restore CR
2239 mfsprg r11,3 ; Restore R11
2240 rfid
2241
91447636 2242ufpX: ; here if virtual machine is 64-bit
55e303ae 2243 bt cr5_eq,ufpXResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
91447636
A
2244 cmplwi cr5,r4,7 ; Is first arg in range 0-7?
2245 bgt cr5,ufpXVMret ; Return if not in the range
55e303ae
A
2246 slwi r4,r4,3 ; multiply index by 8
2247 la r3,famguestXr0(r13) ; Load the base address
2248 bt cr2_eq,ufpXSetGuestReg ; Set/get selector
2249; ufpXGetGuestReg
2250 ldx r3,r4,r3 ; Load the guest register
2251 b ufpXVMret ; Return
2252ufpXSetGuestReg:
2253 stdx r5,r4,r3 ; Update the guest register
2254 li r3,0 ; Set return value
2255 b ufpXVMret ; Return
2256ufpXResumeGuest:
2257 lwz r7,spcFlags(r3) ; Pick up the special flags
2258 mtsrr0 r4 ; Set srr0
2259 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2260 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
2261 stw r7,spcFlags(r3) ; Update the special flags
2262 mfsrr1 r6 ; Get the current MSR value
2263
2264 ld r4,famguestXmsr(r13) ; Load guest srr1
2265 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2266 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2267 and r4,r4,r1 ; Keep only the controllable bits
2268 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2269 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2270 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2271 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2272 beq ufpXnokey ; Branch if not key switch
2273 mr r2,r7 ; Save r7
2274 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2275 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
2276 beq ufpXnokey ; No, go to ResumeGuest_nokey
2277 mr r5,r3 ; Get the per_proc area
2278 stw r7,spcFlags(r3) ; Update the special flags
2279
2280 ld r3,next_savearea(r5) ; Get the exception save area
2281 std r8,saver8(r3) ; Save r8
2282 std r9,saver9(r3) ; Save r9
2283 std r10,saver10(r3) ; Save r10
2284 std r11,saver11(r3) ; Save r11
2285 std r12,saver12(r3) ; Save r12
2286 std r13,saver13(r3) ; Save r12
2287 std r14,saver14(r3) ; Save r14
2288 std r15,saver15(r3) ; Save r15
2289 std r16,saver16(r3) ; Save r16
2290 std r17,saver17(r3) ; Save r17
2291 std r18,saver18(r3) ; Save r18
2292 std r19,saver19(r3) ; Save r19
2293 std r20,saver20(r3) ; Save r20
2294 std r21,saver21(r3) ; Save r21
2295 std r22,saver22(r3) ; Save r22
2296 std r23,saver23(r3) ; Save r23
2297 std r24,saver24(r3) ; Save r24
2298 std r25,saver25(r3) ; Save r25
2299 std r26,saver26(r3) ; Save r26
2300 std r27,saver27(r3) ; Save r27
2301 std r28,saver28(r3) ; Save r28
2302 std r29,saver29(r3) ; Save r29
2303 mfxer r2 ; Get xer
2304 std r30,saver30(r3) ; Save r30
2305 std r31,saver31(r3) ; Save r31
2306 std r2,savexer(r3) ; Save xer
2307
2308 mflr r20 ; Get lr
2309 li r2,1 ; Set to 1
2310 stw r7,spcFlags(r5) ; Update the special flags
2311 mr r13,r3 ; Set current savearea
2312 mr r21,r4 ; Save r4
2313 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2314 mr r29,r5 ; Get the per_proc area
2315 mr r3,r4 ; Set MSR value we going to
2316 bl EXT(switchSegs) ; Go handle the segment registers/STB
2317 mr r3,r13 ; Set current savearea
2318 mr r4,r21 ; Restore r4
2319 mtlr r20 ; Set lr
2320
2321 ld r2,savexer(r3) ; Load xer
2322 ld r8,saver8(r3) ; Load r8
2323 ld r9,saver9(r3) ; Load r9
2324 ld r10,saver10(r3) ; Load r10
2325 mtxer r2 ; Restore xer
2326 ld r11,saver11(r3) ; Load r11
2327 ld r12,saver12(r3) ; Load r12
2328 ld r13,saver13(r3) ; Load r12
2329 ld r14,saver14(r3) ; Load r14
2330 ld r15,saver15(r3) ; Load r15
2331 ld r16,saver16(r3) ; Load r16
2332 ld r17,saver17(r3) ; Load r17
2333 ld r18,saver18(r3) ; Load r18
2334 ld r19,saver19(r3) ; Load r19
2335 ld r20,saver20(r3) ; Load r20
2336 ld r21,saver21(r3) ; Load r21
2337 ld r22,saver22(r3) ; Load r22
2338 ld r23,saver23(r3) ; Load r23
2339 ld r24,saver24(r3) ; Load r24
2340 ld r25,saver25(r3) ; Load r25
2341 ld r26,saver26(r3) ; Load r26
2342 ld r27,saver27(r3) ; Load r27
2343 ld r28,saver28(r3) ; Load r28
2344 ld r29,saver29(r3) ; Load r29
2345 ld r30,saver30(r3) ; Load r30
2346 ld r31,saver31(r3) ; Load r31
2347ufpXnokey:
2348 mtsrr1 r4 ; Set srr1
2349 ld r0,famguestXr0(r13) ; Load r0
2350 ld r1,famguestXr1(r13) ; Load r1
2351 ld r2,famguestXr2(r13) ; Load r2
2352 ld r3,famguestXr3(r13) ; Load r3
2353 ld r4,famguestXr4(r13) ; Load r4
2354 ld r5,famguestXr5(r13) ; Load r5
2355 ld r6,famguestXr6(r13) ; Load r6
2356 ld r7,famguestXr7(r13) ; Load r7
2357ufpXVMret:
2358 mfsprg r13,2 ; Restore R13
2359 mtcrf 0xFF,r11 ; Restore CR
2360 mfsprg r11,3 ; Restore R11
2361 rfid
2362