]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/vmachmon_asm.s
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon_asm.s
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30#include <assym.s>
31#include <debug.h>
32#include <ppc/asm.h>
33#include <ppc/proc_reg.h>
34#include <ppc/exception.h>
35
36/*
37 * This file contains implementations for the Virtual Machine Monitor
38 * facility.
39 */
40
55e303ae
A
41#define vmmMapDone 31
42#define vmmDoing64 30
43
1c79356b
A
44
45/*
46 * int vmm_dispatch(savearea, act);
47
48 * vmm_dispatch is a PPC only system call. It is used with a selector (first
49 * parameter) to determine what function to enter. This is treated as an extension
50 * of hw_exceptions.
51 *
52 * Inputs:
53 * R4 = current activation
54 * R16 = current thread
55 * R30 = current savearea
56 */
57
55e303ae 58 .align 5 ; Line up on cache line
1c79356b
A
59 .globl EXT(vmm_dispatch_table)
60
61LEXT(vmm_dispatch_table)
62
63 /* Don't change the order of these routines in the table. It's */
64 /* OK to add new routines, but they must be added at the bottom. */
65
66 .long EXT(vmm_get_version_sel) ; Get the version of the VMM interface
d7e50217 67 .long 0 ; Not valid in Fam
1c79356b 68 .long EXT(vmm_get_features_sel) ; Get the features of the VMM interface
d7e50217 69 .long 0 ; Not valid in Fam
1c79356b 70 .long EXT(vmm_init_context_sel) ; Initializes a new VMM context
d7e50217 71 .long 0 ; Not valid in Fam
1c79356b 72 .long EXT(vmm_tear_down_context) ; Tears down a previously-allocated VMM context
d7e50217 73 .long 0 ; Not valid in Fam
1c79356b 74 .long EXT(vmm_tear_down_all) ; Tears down all VMMs
d7e50217 75 .long 0 ; Not valid in Fam
55e303ae 76 .long EXT(vmm_map_page32) ; Maps a page from the main address space into the VM space - supports 32-bit
d7e50217 77 .long 1 ; Valid in Fam
55e303ae 78 .long EXT(vmm_get_page_mapping32) ; Returns client va associated with VM va - supports 32-bit
d7e50217 79 .long 1 ; Valid in Fam
55e303ae 80 .long EXT(vmm_unmap_page32) ; Unmaps a page from the VM space - supports 32-bit
d7e50217 81 .long 1 ; Valid in Fam
55e303ae 82 .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space
d7e50217 83 .long 1 ; Valid in Fam
55e303ae 84 .long EXT(vmm_get_page_dirty_flag32) ; Gets the change bit for a page and optionally clears it - supports 32-bit
d7e50217 85 .long 1 ; Valid in Fam
1c79356b 86 .long EXT(vmm_get_float_state) ; Gets current floating point state
d7e50217 87 .long 0 ; not valid in Fam
1c79356b 88 .long EXT(vmm_get_vector_state) ; Gets current vector state
d7e50217 89 .long 0 ; Not valid in Fam
1c79356b 90 .long EXT(vmm_set_timer) ; Sets a timer value
d7e50217 91 .long 1 ; Valid in Fam
1c79356b 92 .long EXT(vmm_get_timer) ; Gets a timer value
d7e50217 93 .long 1 ; Valid in Fam
1c79356b 94 .long EXT(switchIntoVM) ; Switches to the VM context
d7e50217 95 .long 1 ; Valid in Fam
55e303ae 96 .long EXT(vmm_protect_page32) ; Sets protection values for a page - supports 32-bit
d7e50217 97 .long 1 ; Valid in Fam
55e303ae 98 .long EXT(vmm_map_execute32) ; Maps a page an launches VM - supports 32-bit
d7e50217 99 .long 1 ; Not valid in Fam
55e303ae 100 .long EXT(vmm_protect_execute32) ; Sets protection values for a page and launches VM - supports 32-bit
d7e50217 101 .long 1 ; Valid in Fam
55e303ae 102 .long EXT(vmm_map_list32) ; Maps a list of pages - supports 32-bit
d7e50217 103 .long 1 ; Valid in Fam
55e303ae 104 .long EXT(vmm_unmap_list32) ; Unmaps a list of pages - supports 32-bit
d7e50217
A
105 .long 1 ; Valid in Fam
106 .long EXT(vmm_fam_reserved) ; exit from Fam to host
107 .long 1 ; Valid in Fam
108 .long EXT(vmm_fam_reserved) ; resume guest from Fam
109 .long 1 ; Valid in Fam
110 .long EXT(vmm_fam_reserved) ; get guest register from Fam
111 .long 1 ; Valid in Fam
112 .long EXT(vmm_fam_reserved) ; Set guest register from Fam
113 .long 1 ; Valid in Fam
91447636
A
114 .long EXT(vmm_activate_XA) ; Activate extended architecture features for a VM
115 .long 0 ; Not valid in Fam
116 .long EXT(vmm_deactivate_XA) ; Deactivate extended architecture features for a VM
55e303ae
A
117 .long 0 ; Not valid in Fam
118 .long EXT(vmm_get_XA) ; Get extended architecture features from a VM
119 .long 1 ; Valid in Fam
120 .long EXT(vmm_map_page) ; Map a host to guest address space - supports 64-bit
121 .long 1 ; Valid in Fam
122 .long EXT(vmm_get_page_mapping) ; Get host address of a guest page - supports 64-bit
123 .long 1 ; Valid in Fam
124 .long EXT(vmm_unmap_page) ; Unmap a guest page - supports 64-bit
125 .long 1 ; Valid in Fam
126 .long EXT(vmm_get_page_dirty_flag) ; Check if guest page modified - supports 64-bit
127 .long 1 ; Valid in Fam
128 .long EXT(vmm_protect_page) ; Sets protection values for a page - supports 64-bit
129 .long 1 ; Valid in Fam
130 .long EXT(vmm_map_execute) ; Map guest page and launch - supports 64-bit
131 .long 1 ; Valid in Fam
132 .long EXT(vmm_protect_execute) ; Set prot attributes and launch - supports 64-bit
133 .long 1 ; Valid in Fam
134 .long EXT(vmm_map_list64) ; Map a list of pages into guest address spaces - supports 64-bit
135 .long 1 ; Valid in Fam
136 .long EXT(vmm_unmap_list64) ; Unmap a list of pages from guest address spaces - supports 64-bit
137 .long 1 ; Valid in Fam
138 .long EXT(vmm_max_addr) ; Returns the maximum virtual address
139 .long 1 ; Valid in Fam
91447636
A
140#if 0
141 .long EXT(vmm_set_guest_memory) ; Set guest memory extent
142 .long 0 ; Not valid in FAM
143 .long EXT(vmm_purge_local) ; Purge all local guest mappings */
144 .long 1 ; Valid in FAM
145#endif
d7e50217 146 .set vmm_count,(.-EXT(vmm_dispatch_table))/8 ; Get the top number
1c79356b
A
147
148
149 .align 5
150 .globl EXT(vmm_dispatch)
151
152LEXT(vmm_dispatch)
153
55e303ae 154 lwz r11,saver3+4(r30) ; Get the selector
1c79356b
A
155 mr r3,r4 ; All of our functions want the activation as the first parm
156 lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table
157 cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now?
158 cmplwi cr1,r11,vmm_count ; See if we have a valid selector
159 ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table
55e303ae 160 lwz r4,saver4+4(r30) ; Get 1st parameter after selector
1c79356b 161 beq+ EXT(switchIntoVM) ; Yes, go switch to it....
d7e50217 162 rlwinm r11,r11,3,0,28 ; Index into table
55e303ae 163 bge- cr1,vmmBogus ; It is a bogus entry
d7e50217 164 add r12,r10,r11 ; Get the vmm dispatch syscall entry
91447636
A
165 mfsprg r10,1 ; Get the current activation
166 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
d7e50217
A
167 lwz r13,0(r12) ; Get address of routine
168 lwz r12,4(r12) ; Get validity flag
169 lwz r5,spcFlags(r10) ; Get per_proc special flags
170 cmpwi cr1,r12,0 ; Check Fam valid
171 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
172 crand cr0_eq,cr1_eq,cr0_gt ; In Fam and Invalid syscall
173 beq vmmBogus ; Intercept to host
55e303ae
A
174 lwz r5,saver5+4(r30) ; Get 2nd parameter after selector - note that some of these parameters may actually be long longs
175 lwz r6,saver6+4(r30) ; Get 3rd parameter after selector
d7e50217 176 mtlr r13 ; Set the routine address
55e303ae
A
177 lwz r7,saver7+4(r30) ; Get 4th parameter after selector
178 lwz r8,saver8+4(r30) ; Get 5th parameter after selector
179 lwz r9,saver9+4(r30) ; Get 6th parameter after selector
d7e50217 180;
55e303ae
A
181; NOTE: some of the above parameters are actually long longs. We have glue code that transforms
182; all needed parameters and/or adds 32-/64-bit flavors to the needed functions.
1c79356b
A
183;
184
185 blrl ; Call function
55e303ae
A
186
187vmmRetPt: li r0,0 ; Clear this out
188 stw r0,saver3(r30) ; Make sure top of RC is clear
189 stw r3,saver3+4(r30) ; Pass back the return code
190 stw r0,saver4(r30) ; Make sure bottom of RC is clear (just in case)
191 stw r4,saver4+4(r30) ; Pass back the bottom return code (just in case)
1c79356b
A
192 li r3,1 ; Set normal return with check for AST
193 b EXT(ppcscret) ; Go back to handler...
194
d7e50217 195vmmBogus:
91447636
A
196 mfsprg r3,1 ; Get the current activation
197 lwz r10,ACT_PER_PROC(r3) ; Get the per_proc block
d7e50217
A
198 lwz r5,spcFlags(r10) ; Get per_proc special flags
199 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
200 bne vmmexitcall ; Do it to it
201 li r3,0 ; Bogus selector, treat like a bogus system call
1c79356b
A
202 b EXT(ppcscret) ; Go back to handler...
203
204
205 .align 5
206 .globl EXT(vmm_get_version_sel)
207
208LEXT(vmm_get_version_sel) ; Selector based version of get version
209
210 lis r3,hi16(EXT(vmm_get_version))
211 ori r3,r3,lo16(EXT(vmm_get_version))
212 b selcomm
213
214
215 .align 5
216 .globl EXT(vmm_get_features_sel)
217
218LEXT(vmm_get_features_sel) ; Selector based version of get features
219
0b4e3aa0
A
220 lis r3,hi16(EXT(vmm_get_features))
221 ori r3,r3,lo16(EXT(vmm_get_features))
1c79356b
A
222 b selcomm
223
224
225 .align 5
226 .globl EXT(vmm_init_context_sel)
227
228LEXT(vmm_init_context_sel) ; Selector based version of init context
229
55e303ae
A
230 lwz r4,saver4+4(r30) ; Get the passed in version
231 lwz r5,saver5+4(r30) ; Get the passed in comm area
0b4e3aa0 232 lis r3,hi16(EXT(vmm_init_context))
55e303ae 233 stw r4,saver3+4(r30) ; Cheat and move this parameter over
0b4e3aa0 234 ori r3,r3,lo16(EXT(vmm_init_context))
55e303ae 235 stw r5,saver4+4(r30) ; Cheat and move this parameter over
1c79356b
A
236
237selcomm: mtlr r3 ; Set the real routine address
238 mr r3,r30 ; Pass in the savearea
239 blrl ; Call the function
240 b EXT(ppcscret) ; Go back to handler...
241
55e303ae
A
242 .align 5
243 .globl EXT(vmm_map_page32)
244
245LEXT(vmm_map_page32)
246 mr r9,r7 ; Move prot to correct parm
247 mr r8,r6 ; Move guest address to low half of long long
248 li r7,0 ; Clear high half of guest address
249 mr r6,r5 ; Move host address to low half of long long
250 li r5,0 ; Clear high half of host address
251 b EXT(vmm_map_page) ; Transition to real function...
252
253 .align 5
254 .globl EXT(vmm_get_page_mapping32)
255
256LEXT(vmm_get_page_mapping32)
257 mr r6,r5 ; Move guest address to low half of long long
258 li r5,0 ; Clear high half of guest address
259 bl EXT(vmm_get_page_mapping) ; Transition to real function...
260 mr r3,r4 ; Convert addr64_t to vm_offset_t, dropping top half
261 b vmmRetPt ; Join normal return...
262
263 .align 5
264 .globl EXT(vmm_unmap_page32)
265
266LEXT(vmm_unmap_page32)
267 mr r6,r5 ; Move guest address to low half of long long
268 li r5,0 ; Clear high half of guest address
269 b EXT(vmm_unmap_page) ; Transition to real function...
270
271 .align 5
272 .globl EXT(vmm_get_page_dirty_flag32)
273
274LEXT(vmm_get_page_dirty_flag32)
275 mr r7,r6 ; Move reset flag
276 mr r6,r5 ; Move guest address to low half of long long
277 li r5,0 ; Clear high half of guest address
278 b EXT(vmm_get_page_dirty_flag) ; Transition to real function...
279
280 .align 5
281 .globl EXT(vmm_protect_page32)
282
283LEXT(vmm_protect_page32)
284 mr r7,r6 ; Move protection bits
285 mr r6,r5 ; Move guest address to low half of long long
286 li r5,0 ; Clear high half of guest address
287 b EXT(vmm_protect_page) ; Transition to real function...
288
289 .align 5
290 .globl EXT(vmm_map_execute32)
291
292LEXT(vmm_map_execute32)
293 mr r9,r7 ; Move prot to correct parm
294 mr r8,r6 ; Move guest address to low half of long long
295 li r7,0 ; Clear high half of guest address
296 mr r6,r5 ; Move host address to low half of long long
297 li r5,0 ; Clear high half of host address
298 b EXT(vmm_map_execute) ; Transition to real function...
299
300 .align 5
301 .globl EXT(vmm_protect_execute32)
302
303LEXT(vmm_protect_execute32)
304 mr r7,r6 ; Move protection bits
305 mr r6,r5 ; Move guest address to low half of long long
306 li r5,0 ; Clear high half of guest address
307 b EXT(vmm_protect_execute) ; Transition to real function...
308
309 .align 5
310 .globl EXT(vmm_map_list32)
311
312LEXT(vmm_map_list32)
313 li r6,0 ; Set 32-bit flavor
314 b EXT(vmm_map_list) ; Go to common routine...
315
316 .align 5
317 .globl EXT(vmm_map_list64)
318
319LEXT(vmm_map_list64)
320 li r6,1 ; Set 64-bit flavor
321 b EXT(vmm_map_list) ; Go to common routine...
322
323 .align 5
324 .globl EXT(vmm_map_list32)
325
326LEXT(vmm_unmap_list32)
327 li r6,0 ; Set 32-bit flavor
328 b EXT(vmm_unmap_list) ; Go to common routine...
329
330 .align 5
331 .globl EXT(vmm_map_list64)
332
333LEXT(vmm_unmap_list64)
334 li r6,1 ; Set 64-bit flavor
335 b EXT(vmm_unmap_list) ; Go to common routine...
336
1c79356b
A
337/*
338 * Here is where we transition to the virtual machine.
339 *
340 * We will swap the register context in the savearea with that which is saved in our shared
341 * context area. We will validity check a bit and clear any nasty bits in the MSR and force
342 * the manditory ones on.
343 *
344 * Then we will setup the new address space to run with, and anything else that is normally part
345 * of a context switch.
346 *
0b4e3aa0
A
347 * The vmm_execute_vm entry point is for the fused vmm_map_execute and vmm_protect_execute
348 * calls. This is called, but never returned from. We always go directly back to the
349 * user from here.
350 *
1c79356b
A
351 *
352 */
353
0b4e3aa0
A
354
355 .align 5
356 .globl EXT(vmm_execute_vm)
357
358LEXT(vmm_execute_vm)
0b4e3aa0
A
359 lwz r30,ACT_MACT_PCB(r3) ; Restore the savearea pointer because it could be trash here
360 b EXT(switchIntoVM) ; Join common...
361
362
1c79356b
A
363 .align 5
364 .globl EXT(switchIntoVM)
365
366LEXT(switchIntoVM)
91447636
A
367 mfsprg r10,1 ; Get the current activation
368 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
55e303ae
A
369 rlwinm r31,r4,24,24,31 ; Get the address space
370 rlwinm r4,r4,0,24,31 ; Isolate the context id
371 lwz r28,vmmControl(r3) ; Pick up the control table address
1c79356b 372 subi r4,r4,1 ; Switch to zero offset
55e303ae 373 rlwinm. r2,r28,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we
1c79356b 374 ; do not try this while we are transitioning off to on
55e303ae 375 cmplwi cr1,r4,kVmmMaxContexts ; Is the index valid?
1c79356b 376 beq- vmmBogus ; Not started, treat like a bogus system call
55e303ae 377 subic. r31,r31,1 ; Make address space 0 based and test if we use default
1c79356b 378 mulli r2,r4,vmmCEntrySize ; Get displacement from index
55e303ae
A
379 bge- cr1,swvmmBogus ; Index is bogus...
380 add r2,r2,r28 ; Point to the entry
381 bge-- swvmmDAdsp ; There was an explicit address space request
382 mr r31,r4 ; Default the address space to the context ID
383
384swvmmDAdsp: la r2,vmmc(r2) ; Get the offset to the context array
385 lwz r8,vmmGFlags(r28) ; Get the general flags
1c79356b 386 lwz r4,vmmFlags(r2) ; Get the flags for the selected entry
55e303ae 387 crset vmmMapDone ; Assume we will be mapping something
1c79356b
A
388 lwz r5,vmmContextKern(r2) ; Get the context area address
389 rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use
55e303ae
A
390 cmplwi cr1,r31,kVmmMaxContexts ; See if we have a valid address space ID
391 rlwinm r8,r8,0,24,31 ; Clean up address space
392 beq-- swvmmBogus ; This context is no good...
393
394 la r26,vmmAdsp(r28) ; Point to the pmaps
395 sub r8,r8,r31 ; Get diff between launching address space - 1 and last mapped into (should be 1 if the same)
396 rlwinm r31,r31,2,0,29 ; Index to the pmap
397 cmplwi r8,1 ; See if we have the same address space
398 bge-- cr1,swvmmBogAdsp ; Address space is no good...
399 lwzx r31,r26,r31 ; Get the requested address space pmap
400 li r0,0 ; Get a 0 in case we need to trash redrive
401 lwz r15,spcFlags(r10) ; Get per_proc special flags
402 beq swvmmAdspOk ; Do not invalidate address space if we are launching the same
403 crclr vmmMapDone ; Clear map done flag
404 stb r0,vmmGFlags+3(r28) ; Clear the last mapped address space ID so we will not redrive later
1c79356b
A
405;
406; Here we check for any immediate intercepts. So far, the only
0b4e3aa0
A
407; two of these are a timer pop and and external stop. We will not dispatch if
408; either is true. They need to either reset the timer (i.e. set timer
409; to 0) or to set a future time, or if it is external stop, set the vmmXStopRst flag.
1c79356b
A
410;
411
55e303ae
A
412swvmmAdspOk:
413 rlwinm. r0,r15,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
414 stw r31,vmmPmap(r2) ; Save the last dispatched address space
415 bne vmmFamGuestResume
0b4e3aa0
A
416 lwz r6,vmmCntrl(r5) ; Get the control field
417 rlwinm. r7,r6,0,vmmXStartb,vmmXStartb ; Clear all but start bit
418 beq+ swvmChkStop ; Do not reset stop
419 andc r6,r6,r7 ; Clear it
420 li r8,vmmFlags ; Point to the flags
421 stw r6,vmmCntrl(r5) ; Set the control field
422
423swvmtryx: lwarx r4,r8,r2 ; Pick up the flags
424 rlwinm r4,r4,0,vmmXStopb+1,vmmXStopb-1 ; Clear the stop bit
425 stwcx. r4,r8,r2 ; Save the updated field
426 bne- swvmtryx ; Try again...
427
428swvmChkStop:
429 rlwinm. r26,r4,0,vmmXStopb,vmmXStopb ; Is this VM stopped?
55e303ae 430 bne-- swvmSetStop ; Yes...
0b4e3aa0 431
9bccf70c 432 rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer go pop?
55e303ae
A
433 cmplwi cr1,r31,0 ; Is there actually an address space defined?
434 bne-- svvmTimerPop ; Yes...
435
436;
437; Special note: we need to intercept any attempt to launch a guest into a non-existent address space.
438; We will just go emulate an ISI if there is not one.
439;
440
441 beq-- cr1,swvmEmulateISI ; We are trying to launch into an undefined address space. This is not so good...
1c79356b
A
442
443;
444; Here is where we actually swap into the VM (alternate) context.
445; We will bulk do a wholesale swap of the registers in the context area (the VMs)
446; with the ones in the savearea (our main code). During the copy, we will fix up the
447; MSR, forcing on a few bits and turning off a few others. Then we will deal with the
448; PMAP and other per_proc stuff. Finally, we will exit back through the main exception
449; handler to deal with unstacking saveareas and ASTs, etc.
450;
451
452swvmDoSwitch:
453
454;
455; First, we save the volatile registers we care about. Remember, all register
456; handling here is pretty funky anyway, so we just pick the ones that are ok.
457;
458 mr r26,r3 ; Save the activation pointer
1c79356b 459
9bccf70c
A
460 la r11,vmmFacCtx(r2) ; Point to the virtual machine facility context
461 mr r27,r2 ; Save the context entry
462 stw r11,deferctx(r3) ; Start using the virtual machine facility context when we exit
1c79356b
A
463
464 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
55e303ae 465 mr r3,r31 ; Get the pointer to the PMAP
1c79356b
A
466 oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now
467 bl EXT(hw_set_user_space_dis) ; Swap the address spaces
468 lwz r17,vmmFlags(r27) ; Get the status flags
d7e50217
A
469 lwz r20,vmmContextKern(r27) ; Get the state page kernel addr
470 lwz r21,vmmCntrl(r20) ; Get vmmCntrl
471 rlwinm. r22,r21,0,vmmFamEnab,vmmFamEnab ; Is vmmFamEnab set?
55e303ae
A
472 lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
473 stw r22,VMMXAFlgs(r10) ; Store vmmXAFlgs in per_proc VMMXAFlgs
d7e50217 474 beq swvmNoFam ; No Fam intercept
55e303ae 475 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
d7e50217
A
476 rlwimi r15,r21,32+vmmFamSetb-FamVMmodebit,FamVMmodebit,FamVMmodebit ; Set FamVMmode bit
477 rlwinm r21,r21,0,vmmFamSetb+1,vmmFamSetb-1 ; Clear FamSet bit
55e303ae 478 bne swvmXfamintercpt
d7e50217 479 lwz r22,famintercepts(r20) ; Load intercept bit field
55e303ae
A
480 b swvmfamintercptres
481swvmXfamintercpt:
482 lwz r22,faminterceptsX(r20) ; Load intercept bit field
483swvmfamintercptres:
d7e50217
A
484 stw r21,vmmCntrl(r20) ; Update vmmCntrl
485 lwz r19,vmmContextPhys(r27) ; Get vmmFAMarea address
486 stw r22,vmmFAMintercept(r27) ; Get vmmFAMintercept
487 stw r22,FAMintercept(r10) ; Store vmmFAMintercept in per_proc FAMintercept
488 stw r19,VMMareaPhys(r10) ; Store VMMareaPhys
489 oris r15,r15,hi16(FamVMena) ; Set FamVMenabit
490swvmNoFam:
1c79356b 491 stw r27,vmmCEntry(r26) ; Remember what context we are running
55e303ae 492 bf++ vmmMapDone,swvmNoMap ; We have not mapped anything or it was not for this address space
1c79356b
A
493
494;
495; This little bit of hoopala here (triggered by vmmMapDone) is
496; a performance enhancement. This will change the returning savearea
497; to look like we had a DSI rather than a system call. Then, setting
498; the redrive bit, the exception handler will redrive the exception as
499; a DSI, entering the last mapped address into the hash table. This keeps
500; double faults from happening. Note that there is only a gain if the VM
501; takes a fault, then the emulator resolves it only, and then begins
502; the VM execution again. It seems like this should be the normal case.
55e303ae
A
503;
504; Note that we need to revisit this when we move the virtual machines to the task because
505; then it will be possible for more than one thread to access this stuff at the same time.
1c79356b
A
506;
507
508 lwz r3,SAVflags(r30) ; Pick up the savearea flags
55e303ae
A
509 lwz r2,vmmLastMap(r28) ; Get the last mapped address
510 lwz r14,vmmLastMap+4(r28) ; Get the last mapped address low half
1c79356b
A
511 li r20,T_DATA_ACCESS ; Change to DSI fault
512 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
513 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
55e303ae 514 stw r14,savedar+4(r30) ; Set the DAR to the last thing we mapped
1c79356b
A
515 stw r3,SAVflags(r30) ; Turn on the redrive request
516 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
55e303ae 517 li r0,0 ; Clear
1c79356b
A
518 stw r20,saveexception(r30) ; Say we need to emulate a DSI
519 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
55e303ae 520 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1c79356b 521
0b4e3aa0
A
522swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area
523 rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits
524 lwz r20,vmmCntrl(r20) ; Get the control flags
1c79356b 525 rlwimi r17,r11,8,24,31 ; Save the old spf flags
0b4e3aa0 526 rlwimi r15,r20,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
1c79356b
A
527 stw r15,spcFlags(r10) ; Set per_proc copy of the special flags
528 stw r15,ACT_MACT_SPF(r26) ; Get the special flags
529
530 stw r17,vmmFlags(r27) ; Set the status flags
531
532 bl swapCtxt ; First, swap the general register state
533
0b4e3aa0 534 lwz r17,vmmContextKern(r27) ; Get the comm area back
9bccf70c 535 la r25,vmmFacCtx(r27) ; Point to the facility context
0b4e3aa0 536 lwz r15,vmmCntrl(r17) ; Get the control flags again
91447636
A
537 mfsprg r29,1 ; Get the current activation
538 lwz r29,ACT_PER_PROC(r29) ; Get the per_proc block
1c79356b 539
9bccf70c
A
540;
541; Check if there is new floating point context to load
542;
543
1c79356b 544 rlwinm. r0,r15,0,vmmFloatLoadb,vmmFloatLoadb ; Are there new floating point values?
9bccf70c 545 lhz r29,PP_CPU_NUMBER(r29) ; Get our cpu number
1c79356b
A
546 li r14,vmmppcFPRs ; Get displacement to the new values
547 andc r15,r15,r0 ; Clear the bit
548 beq+ swvmNoNewFloats ; Nope, good...
549
9bccf70c
A
550 lwz r19,FPUcpu(r25) ; Get the last CPU we ran on
551
552 stw r29,FPUcpu(r25) ; Claim the context for ourselves
553
554 eieio ; Make sure this stays in order
555
91447636
A
556 lis r18,hi16(EXT(PerProcTable)) ; Set base PerProcTable
557 mulli r19,r19,ppeSize ; Find offset to the owner per_proc_entry
558 ori r18,r18,lo16(EXT(PerProcTable)) ; Set base PerProcTable
9bccf70c 559 li r16,FPUowner ; Displacement to float owner
91447636
A
560 add r19,r18,r19 ; Point to the owner per_proc_entry
561 lwz r19,ppe_vaddr(r19) ; Point to the owner per_proc
9bccf70c
A
562
563swvminvfpu: lwarx r18,r16,r19 ; Get the owner
55e303ae
A
564
565 sub r0,r18,r25 ; Subtract one from the other
566 sub r3,r25,r18 ; Subtract the other from the one
567 or r3,r3,r0 ; Combine them
568 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
569 and r18,r18,r3 ; Make 0 if same, unchanged if not
570 stwcx. r18,r16,r19 ; Try to invalidate it
571 bne-- swvminvfpu ; Try again if there was a collision...
572
573 lwz r3,FPUsave(r25) ; Get the FPU savearea
9bccf70c 574 dcbt r14,r17 ; Touch in first line of new stuff
1c79356b
A
575 mr. r3,r3 ; Is there one?
576 bne+ swvmGotFloat ; Yes...
577
578 bl EXT(save_get) ; Get a savearea
579
9bccf70c
A
580 li r7,SAVfloat ; Get floating point flag
581 stw r26,SAVact(r3) ; Save our activation
582 li r0,0 ; Get a zero
583 stb r7,SAVflags+2(r3) ; Set that this is floating point
55e303ae 584 stw r0,SAVprev+4(r3) ; Clear the back chain
9bccf70c
A
585 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
586
587 stw r3,FPUsave(r25) ; Chain us to context
1c79356b
A
588
589swvmGotFloat:
1c79356b
A
590 la r4,savefp0(r3) ; Point to the destination
591 mr r21,r3 ; Save the save area
592 la r3,vmmppcFPRs(r17) ; Point to the source
9bccf70c 593 li r5,32*8 ; Get the size (32 FPRs at 8 bytes each)
1c79356b
A
594
595 bl EXT(bcopy) ; Copy the new values
9bccf70c 596
1c79356b
A
597 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
598 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad
599 rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here
600 lwz r14,vmmStat(r17) ; Get the status flags
91447636
A
601 mfsprg r10,1 ; Get the current activation
602 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
1c79356b
A
603 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
604 rlwinm r14,r14,0,vmmFloatCngdb+1,vmmFloatCngdb-1 ; Clear the changed flag
605 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
606 stw r14,vmmStat(r17) ; Set the status flags sans vmmFloatCngd
1c79356b 607
9bccf70c
A
608;
609; Check if there is new vector context to load
610;
611
1c79356b
A
612swvmNoNewFloats:
613 rlwinm. r0,r15,0,vmmVectLoadb,vmmVectLoadb ; Are there new vector values?
614 li r14,vmmppcVRs ; Get displacement to the new values
615 andc r15,r15,r0 ; Clear the bit
616 beq+ swvmNoNewVects ; Nope, good...
617
9bccf70c
A
618 lwz r19,VMXcpu(r25) ; Get the last CPU we ran on
619
620 stw r29,VMXcpu(r25) ; Claim the context for ourselves
621
622 eieio ; Make sure this stays in order
623
91447636
A
624 lis r18,hi16(EXT(PerProcTable)) ; Set base PerProcTable
625 mulli r19,r19,ppeSize ; Find offset to the owner per_proc_entry
626 ori r18,r18,lo16(EXT(PerProcTable)) ; Set base PerProcTable
9bccf70c 627 li r16,VMXowner ; Displacement to vector owner
91447636
A
628 add r19,r18,r19 ; Point to the owner per_proc_entry
629 lwz r19,ppe_vaddr(r19) ; Point to the owner per_proc
9bccf70c
A
630
631swvminvvec: lwarx r18,r16,r19 ; Get the owner
55e303ae
A
632
633 sub r0,r18,r25 ; Subtract one from the other
634 sub r3,r25,r18 ; Subtract the other from the one
635 or r3,r3,r0 ; Combine them
636 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
637 and r18,r18,r3 ; Make 0 if same, unchanged if not
638 stwcx. r18,r16,r19 ; Try to invalidate it
639 bne-- swvminvfpu ; Try again if there was a collision...
9bccf70c
A
640
641swvminvved: lwz r3,VMXsave(r25) ; Get the vector savearea
642 dcbt r14,r17 ; Touch in first line of new stuff
1c79356b
A
643 mr. r3,r3 ; Is there one?
644 bne+ swvmGotVect ; Yes...
645
646 bl EXT(save_get) ; Get a savearea
647
9bccf70c
A
648 li r7,SAVvector ; Get the vector type flag
649 stw r26,SAVact(r3) ; Save our activation
650 li r0,0 ; Get a zero
651 stb r7,SAVflags+2(r3) ; Set that this is vector
55e303ae 652 stw r0,SAVprev+4(r3) ; Clear the back chain
9bccf70c
A
653 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
654
655 stw r3,VMXsave(r25) ; Chain us to context
1c79356b
A
656
657swvmGotVect:
1c79356b
A
658 mr r21,r3 ; Save the pointer to the savearea
659 la r4,savevr0(r3) ; Point to the destination
660 la r3,vmmppcVRs(r17) ; Point to the source
9bccf70c 661 li r5,32*16 ; Get the size (32 vectors at 16 bytes each)
1c79356b
A
662
663 bl EXT(bcopy) ; Copy the new values
664
9bccf70c
A
665 lwz r8,savevrsave(r30) ; Get the current VRSave
666
1c79356b
A
667 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
668 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad
669 rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here
55e303ae 670 stw r8,savevrvalid(r21) ; Set the current VRSave as valid saved
1c79356b 671 lwz r14,vmmStat(r17) ; Get the status flags
91447636
A
672 mfsprg r10,1 ; Get the current activation
673 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
1c79356b
A
674 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
675 rlwinm r14,r14,0,vmmVectCngdb+1,vmmVectCngdb-1 ; Clear the changed flag
1c79356b
A
676 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
677 stw r14,vmmStat(r17) ; Set the status flags sans vmmVectCngd
1c79356b
A
678
679swvmNoNewVects:
680 li r3,1 ; Show normal exit with check for AST
91447636 681 mr r16,r26 ; Restore the thread pointer
1c79356b
A
682 b EXT(ppcscret) ; Go back to handler...
683
55e303ae
A
684 .align 5
685
686swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return
687 li r0,0 ; Clear
688 li r3,1 ; Set normal return with check for AST
689 stw r0,saver3(r30) ; Clear upper half
690 stw r2,saver3+4(r30) ; Pass back the return code
691 b EXT(ppcscret) ; Go back to handler...
692
693swvmmBogAdsp:
694 li r2,kVmmInvalidAdSpace ; Set bogus address space return
695 li r0,0 ; Clear
696 li r3,1 ; Set normal return with check for AST
697 stw r0,saver3(r30) ; Clear upper half
698 stw r2,saver3+4(r30) ; Pass back the return code
699 b EXT(ppcscret) ; Go back to handler...
700
701swvmSetStop:
702 li r2,kVmmStopped ; Set stopped return
703 li r0,0 ; Clear
704 li r3,1 ; Set normal return with check for AST
705 stw r0,saver3(r30) ; Clear upper half
706 stw r2,saver3+4(r30) ; Pass back the return code
707 stw r2,return_code(r5) ; Save the exit code
708 b EXT(ppcscret) ; Go back to handler...
709
710svvmTimerPop:
711 li r2,kVmmReturnNull ; Set null return
712 li r0,0 ; Clear
713 li r3,1 ; Set normal return with check for AST
714 stw r0,saver3(r30) ; Clear upper half
715 stw r2,saver3+4(r30) ; Pass back the return code
716 stw r2,return_code(r5) ; Save the exit code
717 b EXT(ppcscret) ; Go back to handler...
718
719swvmEmulateISI:
720 mfsprg r10,2 ; Get feature flags
721 lwz r11,vmmXAFlgs(r28) ; Get the eXtended Architecture flags
722 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
723 rlwinm. r11,r11,0,0,0 ; Are we doing a 64-bit virtual machine?
724 li r2,kVmmReturnInstrPageFault ; Set ISI
725 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
726 li r0,0 ; Clear
727 li r3,1 ; Set normal return with check for AST
728 stw r0,saver3(r30) ; Clear upper half
729 stw r2,saver3+4(r30) ; Pass back the return code
730 stw r2,return_code(r5) ; Save the exit code
731 lis r7,hi16(MASK(DSISR_HASH)) ; Pretend like we got a PTE miss
732 bt vmmDoing64,vmISI64 ; Go do this for a 64-bit VM...
733
734 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
735 stw r10,return_params+0(r5) ; Save PC as first return parm
736 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
737 b EXT(ppcscret) ; Go back to handler...
738
739vmISI64: ld r10,vmmppcXpc(r5) ; Get the PC as failing address
740 std r10,return_paramsX+0(r5) ; Save PC as first return parm
741 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
742 b EXT(ppcscret) ; Go back to handler...
d7e50217
A
743
744;
745; These syscalls are invalid, FAM syscall fast path
746;
747
748 .align 5
749 .globl EXT(vmm_fam_reserved)
750
751LEXT(vmm_fam_reserved)
752 li r3,0 ; Force exception
753 b EXT(ppcscret) ; Go back to handler...
1c79356b 754
1c79356b
A
755;
756; Here is where we exit from vmm mode. We do this on any kind of exception.
757; Interruptions (decrementer, external, etc.) are another story though.
758; These we just pass through. We also switch back explicity when requested.
759; This will happen in response to a timer pop and some kinds of ASTs.
760;
761; Inputs:
762; R3 = activation
763; R4 = savearea
764;
765
766 .align 5
767 .globl EXT(vmm_exit)
768
769LEXT(vmm_exit)
770
d7e50217 771vmmexitcall:
1c79356b
A
772 lwz r2,vmmCEntry(r3) ; Get the context that is active
773 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
774 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
775 lwz r19,vmmFlags(r2) ; Get the status flags
776 mr r16,r3 ; R16 is safe to use for the activation address
777
778 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
779 li r0,0 ; Get a zero
780 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
781 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
782 rlwinm r11,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
783 stw r0,vmmCEntry(r16) ; Clear pointer to active context
784 stw r19,vmmFlags(r2) ; Set the status flags
0b4e3aa0 785 rlwinm r11,r11,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
91447636
A
786 mfsprg r10,1 ; Get the current activation
787 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
d7e50217
A
788 rlwinm r11,r11,0,FamVMenabit+1,FamVMenabit-1 ; Clear FamVMEnable
789 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
790 lwz r5,vmmContextKern(r2) ; Get the state page kernel addr
791 rlwinm r11,r11,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMMode
792 lwz r6,vmmCntrl(r5) ; Get the control field
793 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
794 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
795 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
1c79356b 796 stw r11,ACT_MACT_SPF(r16) ; Get the special flags
d7e50217 797 stw r6,vmmCntrl(r5) ; Store the control field
1c79356b
A
798 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
799
800 mr r26,r16 ; Save the activation pointer
801 mr r27,r2 ; Save the context entry
802
803 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
804
9bccf70c 805 la r5,facctx(r16) ; Point to the main facility context
1c79356b 806 mr r2,r27 ; Restore
9bccf70c
A
807 stw r5,deferctx(r16) ; Start using the main facility context on the way out
808 lwz r5,vmmContextKern(r27) ; Get the context area address
1c79356b
A
809 mr r3,r16 ; Restore activation address
810 stw r19,vmmStat(r5) ; Save the changed and popped flags
811 bl swapCtxt ; Exchange the VM context for the emulator one
55e303ae 812 stw r8,saver3+4(r30) ; Set the return code as the return value also
1c79356b
A
813 b EXT(retFromVM) ; Go back to handler...
814
815
816;
817; Here is where we force exit from vmm mode. We do this when as
818; part of termination and is used to insure that we are not executing
819; in an alternate context. Because this is called from C we need to save
820; all non-volatile registers.
821;
822; Inputs:
823; R3 = activation
824; R4 = user savearea
825; Interruptions disabled
826;
827
828 .align 5
829 .globl EXT(vmm_force_exit)
830
831LEXT(vmm_force_exit)
832
833 stwu r1,-(FM_ALIGN(20*4)+FM_SIZE)(r1) ; Get enough space for the registers
834 mflr r0 ; Save the return
835 stmw r13,FM_ARG0(r1) ; Save all non-volatile registers
836 stw r0,(FM_ALIGN(20*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
837
838 lwz r2,vmmCEntry(r3) ; Get the context that is active
839 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
840 lwz r19,vmmFlags(r2) ; Get the status flags
841 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
842
843 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
844 mr r26,r3 ; Save the activation pointer
845 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
846 li r0,0 ; Get a zero
847 rlwinm r9,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
848 cmplw r9,r11 ; Check if we were in a vm
849 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
850 beq- vfeNotRun ; We were not in a vm....
0b4e3aa0 851 rlwinm r9,r9,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
1c79356b 852 stw r0,vmmCEntry(r26) ; Clear pointer to active context
91447636
A
853 mfsprg r10,1 ; Get the current activation
854 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
d7e50217
A
855 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
856 rlwinm r9,r9,0,FamVMenabit+1,FamVMenabit-1 ; Clear Fam Enable
857 rlwinm r9,r9,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear Fam Enable
858 lwz r5,vmmContextKern(r2) ; Get the context area address
859 lwz r6,vmmCntrl(r5) ; Get the control field
860 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
861 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
862 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
863 stw r6,vmmCntrl(r5) ; Store the control field
1c79356b
A
864 stw r9,ACT_MACT_SPF(r26) ; Get the special flags
865 stw r9,spcFlags(r10) ; Set per_proc copy of the special flags
866
867 mr r27,r2 ; Save the context entry
868 mr r30,r4 ; Save the savearea
869
870 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
871
9bccf70c 872 la r7,facctx(r26) ; Point to the main facility context
1c79356b
A
873
874 lwz r5,vmmContextKern(r27) ; Get the context area address
875 stw r19,vmmStat(r5) ; Save the changed and popped flags
9bccf70c
A
876 stw r7,deferctx(r26) ; Tell context launcher to switch facility context
877
1c79356b
A
878 bl swapCtxt ; Exchange the VM context for the emulator one
879
0b4e3aa0 880 lwz r8,saveexception(r30) ; Pick up the exception code
9bccf70c
A
881 lwz r7,SAVflags(r30) ; Pick up the savearea flags
882 lis r9,hi16(SAVredrive) ; Get exception redrive bit
0b4e3aa0 883 rlwinm r8,r8,30,24,31 ; Convert exception to return code
9bccf70c 884 andc r7,r7,r9 ; Make sure redrive is off because we are intercepting
55e303ae 885 stw r8,saver3+4(r30) ; Set the return code as the return value also
9bccf70c 886 stw r7,SAVflags(r30) ; Set the savearea flags
1c79356b
A
887
888
889vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers
890 lwz r1,0(r1) ; Pop the stack
891 lwz r0,FM_LR_SAVE(r1) ; Get the return address
892 mtlr r0 ; Set return
893 blr
894
895;
896; Note: we will not do any DCBTs to the savearea. It was just stored to a few cycles ago and should
9bccf70c 897; still be in the cache.
1c79356b 898;
1c79356b 899; NOTE NOTE: R16 is important to save!!!!
9bccf70c 900;
1c79356b
A
901 .align 5
902
55e303ae
A
903swapCtxt:
904 mfsprg r10,2 ; Get feature flags
905 la r6,vmmppcpc(r5) ; Point to the first line
906 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
1c79356b
A
907
908 lwz r14,saveexception(r30) ; Get the exception code
9bccf70c 909 dcbt 0,r6 ; Touch in the first line of the context area
55e303ae
A
910 bt++ pf64Bitb,swap64 ; Go do this swap on a 64-bit machine...
911
912 lwz r7,savesrr0+4(r30) ; Start moving context
913 lwz r8,savesrr1+4(r30)
914 lwz r9,saver0+4(r30)
1c79356b 915 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
55e303ae
A
916 lwz r10,saver1+4(r30)
917 lwz r11,saver2+4(r30)
918 lwz r12,saver3+4(r30)
919 lwz r13,saver4+4(r30)
9bccf70c 920 la r6,vmmppcr6(r5) ; Point to second line
55e303ae 921 lwz r14,saver5+4(r30)
1c79356b 922
9bccf70c 923 dcbt 0,r6 ; Touch second line of context area
1c79356b 924
9bccf70c 925 lwz r15,vmmppcpc(r5) ; First line of context
1c79356b 926 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
9bccf70c 927 lwz r23,vmmppcmsr(r5)
d7e50217 928 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
9bccf70c
A
929 lwz r17,vmmppcr0(r5)
930 lwz r18,vmmppcr1(r5)
1c79356b 931 and r23,r23,r22 ; Keep only the controllable bits
9bccf70c 932 lwz r19,vmmppcr2(r5)
1c79356b 933 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
9bccf70c 934 lwz r20,vmmppcr3(r5)
1c79356b 935 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
9bccf70c
A
936 lwz r21,vmmppcr4(r5)
937 lwz r22,vmmppcr5(r5)
1c79356b 938
9bccf70c 939 dcbt 0,r6 ; Touch third line of context area
1c79356b 940
9bccf70c
A
941 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
942 stw r8,vmmppcmsr(r5)
943 stw r9,vmmppcr0(r5)
944 stw r10,vmmppcr1(r5)
945 stw r11,vmmppcr2(r5)
946 stw r12,vmmppcr3(r5)
947 stw r13,vmmppcr4(r5)
948 stw r14,vmmppcr5(r5)
1c79356b
A
949
950;
951; Save the first 3 parameters if we are an SC (we will take care of the last later)
952;
953 bne+ cr1,swapnotsc ; Skip next if not an SC exception...
954 stw r12,return_params+0(r5) ; Save the first return
955 stw r13,return_params+4(r5) ; Save the second return
956 stw r14,return_params+8(r5) ; Save the third return
957
55e303ae
A
958swapnotsc: li r6,0 ; Clear this out
959 stw r6,savesrr0(r30) ; Insure that high order is clear
960 stw r15,savesrr0+4(r30) ; Save vm context into the savearea
961 stw r6,savesrr1(r30) ; Insure that high order is clear
962 stw r23,savesrr1+4(r30)
963 stw r17,saver0+4(r30)
964 stw r18,saver1+4(r30)
965 stw r19,saver2+4(r30)
966 stw r20,saver3+4(r30)
967 stw r21,saver4+4(r30)
9bccf70c 968 la r6,vmmppcr14(r5) ; Point to fourth line
55e303ae 969 stw r22,saver5+4(r30)
9bccf70c
A
970
971 dcbt 0,r6 ; Touch fourth line
972
973; Swap 8 registers
974
55e303ae
A
975 lwz r7,saver6+4(r30) ; Read savearea
976 lwz r8,saver7+4(r30)
977 lwz r9,saver8+4(r30)
978 lwz r10,saver9+4(r30)
979 lwz r11,saver10+4(r30)
980 lwz r12,saver11+4(r30)
981 lwz r13,saver12+4(r30)
982 lwz r14,saver13+4(r30)
9bccf70c
A
983
984 lwz r15,vmmppcr6(r5) ; Read vm context
985 lwz r24,vmmppcr7(r5)
986 lwz r17,vmmppcr8(r5)
987 lwz r18,vmmppcr9(r5)
988 lwz r19,vmmppcr10(r5)
989 lwz r20,vmmppcr11(r5)
990 lwz r21,vmmppcr12(r5)
991 lwz r22,vmmppcr13(r5)
992
993 stw r7,vmmppcr6(r5) ; Write context
994 stw r8,vmmppcr7(r5)
995 stw r9,vmmppcr8(r5)
996 stw r10,vmmppcr9(r5)
997 stw r11,vmmppcr10(r5)
998 stw r12,vmmppcr11(r5)
999 stw r13,vmmppcr12(r5)
1000 la r6,vmmppcr22(r5) ; Point to fifth line
1001 stw r14,vmmppcr13(r5)
1002
1003 dcbt 0,r6 ; Touch fifth line
1004
55e303ae
A
1005 stw r15,saver6+4(r30) ; Write vm context
1006 stw r24,saver7+4(r30)
1007 stw r17,saver8+4(r30)
1008 stw r18,saver9+4(r30)
1009 stw r19,saver10+4(r30)
1010 stw r20,saver11+4(r30)
1011 stw r21,saver12+4(r30)
1012 stw r22,saver13+4(r30)
9bccf70c
A
1013
1014; Swap 8 registers
1015
55e303ae
A
1016 lwz r7,saver14+4(r30) ; Read savearea
1017 lwz r8,saver15+4(r30)
1018 lwz r9,saver16+4(r30)
1019 lwz r10,saver17+4(r30)
1020 lwz r11,saver18+4(r30)
1021 lwz r12,saver19+4(r30)
1022 lwz r13,saver20+4(r30)
1023 lwz r14,saver21+4(r30)
9bccf70c
A
1024
1025 lwz r15,vmmppcr14(r5) ; Read vm context
1026 lwz r24,vmmppcr15(r5)
1027 lwz r17,vmmppcr16(r5)
1028 lwz r18,vmmppcr17(r5)
1029 lwz r19,vmmppcr18(r5)
1030 lwz r20,vmmppcr19(r5)
1031 lwz r21,vmmppcr20(r5)
1032 lwz r22,vmmppcr21(r5)
1033
1034 stw r7,vmmppcr14(r5) ; Write context
1035 stw r8,vmmppcr15(r5)
1036 stw r9,vmmppcr16(r5)
1037 stw r10,vmmppcr17(r5)
1038 stw r11,vmmppcr18(r5)
1039 stw r12,vmmppcr19(r5)
1040 stw r13,vmmppcr20(r5)
1041 la r6,vmmppcr30(r5) ; Point to sixth line
1042 stw r14,vmmppcr21(r5)
1043
1044 dcbt 0,r6 ; Touch sixth line
1045
55e303ae
A
1046 stw r15,saver14+4(r30) ; Write vm context
1047 stw r24,saver15+4(r30)
1048 stw r17,saver16+4(r30)
1049 stw r18,saver17+4(r30)
1050 stw r19,saver18+4(r30)
1051 stw r20,saver19+4(r30)
1052 stw r21,saver20+4(r30)
1053 stw r22,saver21+4(r30)
9bccf70c
A
1054
1055; Swap 8 registers
1056
55e303ae
A
1057 lwz r7,saver22+4(r30) ; Read savearea
1058 lwz r8,saver23+4(r30)
1059 lwz r9,saver24+4(r30)
1060 lwz r10,saver25+4(r30)
1061 lwz r11,saver26+4(r30)
1062 lwz r12,saver27+4(r30)
1063 lwz r13,saver28+4(r30)
1064 lwz r14,saver29+4(r30)
9bccf70c
A
1065
1066 lwz r15,vmmppcr22(r5) ; Read vm context
1067 lwz r24,vmmppcr23(r5)
1068 lwz r17,vmmppcr24(r5)
1069 lwz r18,vmmppcr25(r5)
1070 lwz r19,vmmppcr26(r5)
1071 lwz r20,vmmppcr27(r5)
1072 lwz r21,vmmppcr28(r5)
1073 lwz r22,vmmppcr29(r5)
1074
1075 stw r7,vmmppcr22(r5) ; Write context
1076 stw r8,vmmppcr23(r5)
1077 stw r9,vmmppcr24(r5)
1078 stw r10,vmmppcr25(r5)
1079 stw r11,vmmppcr26(r5)
1080 stw r12,vmmppcr27(r5)
1081 stw r13,vmmppcr28(r5)
1082 la r6,vmmppcvscr(r5) ; Point to seventh line
1083 stw r14,vmmppcr29(r5)
1084
1085 dcbt 0,r6 ; Touch seventh line
1086
55e303ae
A
1087 stw r15,saver22+4(r30) ; Write vm context
1088 stw r24,saver23+4(r30)
1089 stw r17,saver24+4(r30)
1090 stw r18,saver25+4(r30)
1091 stw r19,saver26+4(r30)
1092 stw r20,saver27+4(r30)
1093 stw r21,saver28+4(r30)
1094 stw r22,saver29+4(r30)
9bccf70c
A
1095
1096; Swap 8 registers
1097
55e303ae
A
1098 lwz r7,saver30+4(r30) ; Read savearea
1099 lwz r8,saver31+4(r30)
9bccf70c 1100 lwz r9,savecr(r30)
55e303ae
A
1101 lwz r10,savexer+4(r30)
1102 lwz r11,savelr+4(r30)
1103 lwz r12,savectr+4(r30)
9bccf70c
A
1104 lwz r14,savevrsave(r30)
1105
1106 lwz r15,vmmppcr30(r5) ; Read vm context
1107 lwz r24,vmmppcr31(r5)
1108 lwz r17,vmmppccr(r5)
1109 lwz r18,vmmppcxer(r5)
1110 lwz r19,vmmppclr(r5)
1111 lwz r20,vmmppcctr(r5)
1112 lwz r22,vmmppcvrsave(r5)
1113
1114 stw r7,vmmppcr30(r5) ; Write context
1115 stw r8,vmmppcr31(r5)
1116 stw r9,vmmppccr(r5)
1117 stw r10,vmmppcxer(r5)
1118 stw r11,vmmppclr(r5)
1119 stw r12,vmmppcctr(r5)
1120 stw r14,vmmppcvrsave(r5)
1121
55e303ae
A
1122 stw r15,saver30+4(r30) ; Write vm context
1123 stw r24,saver31+4(r30)
9bccf70c 1124 stw r17,savecr(r30)
55e303ae
A
1125 stw r18,savexer+4(r30)
1126 stw r19,savelr+4(r30)
1127 stw r20,savectr+4(r30)
9bccf70c
A
1128 stw r22,savevrsave(r30)
1129
1130; Swap 8 registers
1131
1132 lwz r7,savevscr+0(r30) ; Read savearea
1133 lwz r8,savevscr+4(r30)
1134 lwz r9,savevscr+8(r30)
1135 lwz r10,savevscr+12(r30)
1136 lwz r11,savefpscrpad(r30)
1137 lwz r12,savefpscr(r30)
1138
1139 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1140 lwz r24,vmmppcvscr+4(r5)
1141 lwz r17,vmmppcvscr+8(r5)
1142 lwz r18,vmmppcvscr+12(r5)
1143 lwz r19,vmmppcfpscrpad(r5)
1144 lwz r20,vmmppcfpscr(r5)
1145
1146 stw r7,vmmppcvscr+0(r5) ; Write context
1147 stw r8,vmmppcvscr+4(r5)
1148 stw r9,vmmppcvscr+8(r5)
1149 stw r10,vmmppcvscr+12(r5)
1150 stw r11,vmmppcfpscrpad(r5)
1151 stw r12,vmmppcfpscr(r5)
1152
1153 stw r15,savevscr+0(r30) ; Write vm context
1154 stw r24,savevscr+4(r30)
1155 stw r17,savevscr+8(r30)
1156 stw r18,savevscr+12(r30)
1157 stw r19,savefpscrpad(r30)
1158 stw r20,savefpscr(r30)
1159
1c79356b
A
1160
1161;
1162; Cobble up the exception return code and save any specific return values
1163;
1164
1165 lwz r7,saveexception(r30) ; Pick up the exception code
1166 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1167 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1168 stw r8,return_code(r5) ; Save the exit code
1169 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1170 beq+ swapDSI ; Yeah...
1171 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1172 beq+ cr1,swapISI ; We had an ISI...
1173 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1174 beq+ swapDSI ; An alignment exception looks like a DSI...
1175 beq+ cr1,swapSC ; We had a system call...
1176
1177 blr ; Return...
1178
1179;
1180; Set exit returns for a DSI or alignment exception
1181;
1182
55e303ae 1183swapDSI: lwz r10,savedar+4(r30) ; Get the DAR
1c79356b
A
1184 lwz r7,savedsisr(r30) ; and the DSISR
1185 stw r10,return_params+0(r5) ; Save DAR as first return parm
1186 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1187 blr ; Return...
1188
1189;
1190; Set exit returns for a ISI
1191;
1192
9bccf70c
A
1193swapISI: lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1194 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1c79356b
A
1195 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1196 stw r10,return_params+0(r5) ; Save PC as first return parm
1197 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1198 blr ; Return...
1199
1200;
1201; Set exit returns for a system call (note: we did the first 3 earlier)
1202; Do we really need to pass parameters back here????
1203;
1204
9bccf70c 1205swapSC: lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1c79356b
A
1206 stw r10,return_params+12(r5) ; Save it
1207 blr ; Return...
1208
55e303ae
A
1209;
1210; Here is the swap for 64-bit machines
1211;
1212
1213swap64: lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
1214 ld r7,savesrr0(r30) ; Start moving context
1215 ld r8,savesrr1(r30)
1216 ld r9,saver0(r30)
1217 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
1218 ld r10,saver1(r30)
1219 ld r11,saver2(r30)
1220 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
1221 ld r12,saver3(r30)
1222 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
1223 ld r13,saver4(r30)
1224 la r6,vmmppcr6(r5) ; Point to second line
1225 ld r14,saver5(r30)
1226
1227 dcbt 0,r6 ; Touch second line of context area
1228
1229 bt vmmDoing64,sw64x1 ; Skip to 64-bit stuff
1230
1231 lwz r15,vmmppcpc(r5) ; First line of context
1232 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1233 lwz r23,vmmppcmsr(r5)
3a60a9f5 1234 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
55e303ae
A
1235 lwz r17,vmmppcr0(r5)
1236 lwz r18,vmmppcr1(r5)
1237 and r23,r23,r22 ; Keep only the controllable bits
1238 lwz r19,vmmppcr2(r5)
1239 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1240 lwz r20,vmmppcr3(r5)
1241 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1242 lwz r21,vmmppcr4(r5)
1243 lwz r22,vmmppcr5(r5)
1244
1245 dcbt 0,r6 ; Touch third line of context area
1246
1247 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
1248 stw r8,vmmppcmsr(r5)
1249 stw r9,vmmppcr0(r5)
1250 stw r10,vmmppcr1(r5)
1251 stw r11,vmmppcr2(r5)
1252 stw r12,vmmppcr3(r5)
1253 stw r13,vmmppcr4(r5)
1254 stw r14,vmmppcr5(r5)
1255
1256;
1257; Save the first 3 parameters if we are an SC (we will take care of the last later)
1258;
1259 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1260 stw r12,return_params+0(r5) ; Save the first return
1261 stw r13,return_params+4(r5) ; Save the second return
1262 stw r14,return_params+8(r5) ; Save the third return
1263 b sw64x1done ; We are done with this section...
1264
1265sw64x1: ld r15,vmmppcXpc(r5) ; First line of context
1266 li r0,1 ; Get a 1 to turn on 64-bit
1267 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user (we will also allow 64-bit here)
1268 sldi r0,r0,63 ; Get 64-bit bit
1269 ld r23,vmmppcXmsr(r5)
3a60a9f5 1270 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
55e303ae
A
1271 ld r17,vmmppcXr0(r5)
1272 or r22,r22,r0 ; Add the 64-bit bit
1273 ld r18,vmmppcXr1(r5)
1274 and r23,r23,r22 ; Keep only the controllable bits
1275 ld r19,vmmppcXr2(r5)
1276 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1277 ld r20,vmmppcXr3(r5)
1278 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1279 ld r21,vmmppcXr4(r5)
1280 ld r22,vmmppcXr5(r5)
1281
1282 dcbt 0,r6 ; Touch third line of context area
1283
1284 std r7,vmmppcXpc(r5) ; Save emulator context into the context area
1285 std r8,vmmppcXmsr(r5)
1286 std r9,vmmppcXr0(r5)
1287 std r10,vmmppcXr1(r5)
1288 std r11,vmmppcXr2(r5)
1289 std r12,vmmppcXr3(r5)
1290 std r13,vmmppcXr4(r5)
1291 std r14,vmmppcXr5(r5)
1292
1293;
1294; Save the first 3 parameters if we are an SC (we will take care of the last later)
1295;
1296 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1297 std r12,return_paramsX+0(r5) ; Save the first return
1298 std r13,return_paramsX+8(r5) ; Save the second return
1299 std r14,return_paramsX+16(r5) ; Save the third return
1300
1301sw64x1done:
1302 std r15,savesrr0(r30) ; Save vm context into the savearea
1303 std r23,savesrr1(r30)
1304 std r17,saver0(r30)
1305 std r18,saver1(r30)
1306 std r19,saver2(r30)
1307 std r20,saver3(r30)
1308 std r21,saver4(r30)
1309 la r6,vmmppcr14(r5) ; Point to fourth line
1310 std r22,saver5(r30)
1311
1312 dcbt 0,r6 ; Touch fourth line
1313
1314; Swap 8 registers
1315
1316 ld r7,saver6(r30) ; Read savearea
1317 ld r8,saver7(r30)
1318 ld r9,saver8(r30)
1319 ld r10,saver9(r30)
1320 ld r11,saver10(r30)
1321 ld r12,saver11(r30)
1322 ld r13,saver12(r30)
1323 ld r14,saver13(r30)
1324
1325 bt vmmDoing64,sw64x2 ; Skip to 64-bit stuff
1326
1327 lwz r15,vmmppcr6(r5) ; Read vm context
1328 lwz r24,vmmppcr7(r5)
1329 lwz r17,vmmppcr8(r5)
1330 lwz r18,vmmppcr9(r5)
1331 lwz r19,vmmppcr10(r5)
1332 lwz r20,vmmppcr11(r5)
1333 lwz r21,vmmppcr12(r5)
1334 lwz r22,vmmppcr13(r5)
1335
1336 stw r7,vmmppcr6(r5) ; Write context
1337 stw r8,vmmppcr7(r5)
1338 stw r9,vmmppcr8(r5)
1339 stw r10,vmmppcr9(r5)
1340 stw r11,vmmppcr10(r5)
1341 stw r12,vmmppcr11(r5)
1342 stw r13,vmmppcr12(r5)
1343 la r6,vmmppcr22(r5) ; Point to fifth line
1344 stw r14,vmmppcr13(r5)
1345
1346 dcbt 0,r6 ; Touch fifth line
1347 b sw64x2done ; We are done with this section...
1348
1349sw64x2: ld r15,vmmppcXr6(r5) ; Read vm context
1350 ld r24,vmmppcXr7(r5)
1351 ld r17,vmmppcXr8(r5)
1352 ld r18,vmmppcXr9(r5)
1353 ld r19,vmmppcXr10(r5)
1354 ld r20,vmmppcXr11(r5)
1355 ld r21,vmmppcXr12(r5)
1356 ld r22,vmmppcXr13(r5)
1357
1358 std r7,vmmppcXr6(r5) ; Write context
1359 std r8,vmmppcXr7(r5)
1360 std r9,vmmppcXr8(r5)
1361 std r10,vmmppcXr9(r5)
1362 std r11,vmmppcXr10(r5)
1363 std r12,vmmppcXr11(r5)
1364 std r13,vmmppcXr12(r5)
1365 la r6,vmmppcXr22(r5) ; Point to fifth line
1366 std r14,vmmppcXr13(r5)
1367
1368 dcbt 0,r6 ; Touch fifth line
1369
1370sw64x2done: std r15,saver6(r30) ; Write vm context
1371 std r24,saver7(r30)
1372 std r17,saver8(r30)
1373 std r18,saver9(r30)
1374 std r19,saver10(r30)
1375 std r20,saver11(r30)
1376 std r21,saver12(r30)
1377 std r22,saver13(r30)
1378
1379; Swap 8 registers
1380
1381 ld r7,saver14(r30) ; Read savearea
1382 ld r8,saver15(r30)
1383 ld r9,saver16(r30)
1384 ld r10,saver17(r30)
1385 ld r11,saver18(r30)
1386 ld r12,saver19(r30)
1387 ld r13,saver20(r30)
1388 ld r14,saver21(r30)
1389
1390 bt vmmDoing64,sw64x3 ; Skip to 64-bit stuff
1391
1392 lwz r15,vmmppcr14(r5) ; Read vm context
1393 lwz r24,vmmppcr15(r5)
1394 lwz r17,vmmppcr16(r5)
1395 lwz r18,vmmppcr17(r5)
1396 lwz r19,vmmppcr18(r5)
1397 lwz r20,vmmppcr19(r5)
1398 lwz r21,vmmppcr20(r5)
1399 lwz r22,vmmppcr21(r5)
1400
1401 stw r7,vmmppcr14(r5) ; Write context
1402 stw r8,vmmppcr15(r5)
1403 stw r9,vmmppcr16(r5)
1404 stw r10,vmmppcr17(r5)
1405 stw r11,vmmppcr18(r5)
1406 stw r12,vmmppcr19(r5)
1407 stw r13,vmmppcr20(r5)
1408 la r6,vmmppcr30(r5) ; Point to sixth line
1409 stw r14,vmmppcr21(r5)
1410
1411 dcbt 0,r6 ; Touch sixth line
1412 b sw64x3done ; Done with this section...
1413
1414sw64x3: ld r15,vmmppcXr14(r5) ; Read vm context
1415 ld r24,vmmppcXr15(r5)
1416 ld r17,vmmppcXr16(r5)
1417 ld r18,vmmppcXr17(r5)
1418 ld r19,vmmppcXr18(r5)
1419 ld r20,vmmppcXr19(r5)
1420 ld r21,vmmppcXr20(r5)
1421 ld r22,vmmppcXr21(r5)
1422
1423 std r7,vmmppcXr14(r5) ; Write context
1424 std r8,vmmppcXr15(r5)
1425 std r9,vmmppcXr16(r5)
1426 std r10,vmmppcXr17(r5)
1427 std r11,vmmppcXr18(r5)
1428 std r12,vmmppcXr19(r5)
1429 std r13,vmmppcXr20(r5)
1430 la r6,vmmppcXr30(r5) ; Point to sixth line
1431 std r14,vmmppcXr21(r5)
1432
1433 dcbt 0,r6 ; Touch sixth line
1434
1435sw64x3done: std r15,saver14(r30) ; Write vm context
1436 std r24,saver15(r30)
1437 std r17,saver16(r30)
1438 std r18,saver17(r30)
1439 std r19,saver18(r30)
1440 std r20,saver19(r30)
1441 std r21,saver20(r30)
1442 std r22,saver21(r30)
1443
1444; Swap 8 registers
1445
1446 ld r7,saver22(r30) ; Read savearea
1447 ld r8,saver23(r30)
1448 ld r9,saver24(r30)
1449 ld r10,saver25(r30)
1450 ld r11,saver26(r30)
1451 ld r12,saver27(r30)
1452 ld r13,saver28(r30)
1453 ld r14,saver29(r30)
1454
1455 bt vmmDoing64,sw64x4 ; Skip to 64-bit stuff
1456
1457 lwz r15,vmmppcr22(r5) ; Read vm context
1458 lwz r24,vmmppcr23(r5)
1459 lwz r17,vmmppcr24(r5)
1460 lwz r18,vmmppcr25(r5)
1461 lwz r19,vmmppcr26(r5)
1462 lwz r20,vmmppcr27(r5)
1463 lwz r21,vmmppcr28(r5)
1464 lwz r22,vmmppcr29(r5)
1465
1466 stw r7,vmmppcr22(r5) ; Write context
1467 stw r8,vmmppcr23(r5)
1468 stw r9,vmmppcr24(r5)
1469 stw r10,vmmppcr25(r5)
1470 stw r11,vmmppcr26(r5)
1471 stw r12,vmmppcr27(r5)
1472 stw r13,vmmppcr28(r5)
1473 la r6,vmmppcvscr(r5) ; Point to seventh line
1474 stw r14,vmmppcr29(r5)
1475 dcbt 0,r6 ; Touch seventh line
1476 b sw64x4done ; Done with this section...
1477
1478sw64x4: ld r15,vmmppcXr22(r5) ; Read vm context
1479 ld r24,vmmppcXr23(r5)
1480 ld r17,vmmppcXr24(r5)
1481 ld r18,vmmppcXr25(r5)
1482 ld r19,vmmppcXr26(r5)
1483 ld r20,vmmppcXr27(r5)
1484 ld r21,vmmppcXr28(r5)
1485 ld r22,vmmppcXr29(r5)
1486
1487 std r7,vmmppcXr22(r5) ; Write context
1488 std r8,vmmppcXr23(r5)
1489 std r9,vmmppcXr24(r5)
1490 std r10,vmmppcXr25(r5)
1491 std r11,vmmppcXr26(r5)
1492 std r12,vmmppcXr27(r5)
1493 std r13,vmmppcXr28(r5)
1494 la r6,vmmppcvscr(r5) ; Point to seventh line
1495 std r14,vmmppcXr29(r5)
1496
1497 dcbt 0,r6 ; Touch seventh line
1498
1499sw64x4done: std r15,saver22(r30) ; Write vm context
1500 std r24,saver23(r30)
1501 std r17,saver24(r30)
1502 std r18,saver25(r30)
1503 std r19,saver26(r30)
1504 std r20,saver27(r30)
1505 std r21,saver28(r30)
1506 std r22,saver29(r30)
1507
1508; Swap 8 registers
1509
1510 ld r7,saver30(r30) ; Read savearea
1511 ld r8,saver31(r30)
1512 lwz r9,savecr(r30)
1513 ld r10,savexer(r30)
1514 ld r11,savelr(r30)
1515 ld r12,savectr(r30)
1516 lwz r14,savevrsave(r30)
1517
1518 bt vmmDoing64,sw64x5 ; Skip to 64-bit stuff
1519
1520 lwz r15,vmmppcr30(r5) ; Read vm context
1521 lwz r24,vmmppcr31(r5)
1522 lwz r17,vmmppccr(r5)
1523 lwz r18,vmmppcxer(r5)
1524 lwz r19,vmmppclr(r5)
1525 lwz r20,vmmppcctr(r5)
1526 lwz r22,vmmppcvrsave(r5)
1527
1528 stw r7,vmmppcr30(r5) ; Write context
1529 stw r8,vmmppcr31(r5)
1530 stw r9,vmmppccr(r5)
1531 stw r10,vmmppcxer(r5)
1532 stw r11,vmmppclr(r5)
1533 stw r12,vmmppcctr(r5)
1534 stw r14,vmmppcvrsave(r5)
1535 b sw64x5done ; Done here...
1536
1537sw64x5: ld r15,vmmppcXr30(r5) ; Read vm context
1538 ld r24,vmmppcXr31(r5)
1539 lwz r17,vmmppcXcr(r5)
1540 ld r18,vmmppcXxer(r5)
1541 ld r19,vmmppcXlr(r5)
1542 ld r20,vmmppcXctr(r5)
1543 lwz r22,vmmppcXvrsave(r5)
1544
1545 std r7,vmmppcXr30(r5) ; Write context
1546 std r8,vmmppcXr31(r5)
1547 stw r9,vmmppcXcr(r5)
1548 std r10,vmmppcXxer(r5)
1549 std r11,vmmppcXlr(r5)
1550 std r12,vmmppcXctr(r5)
1551 stw r14,vmmppcXvrsave(r5)
1552
1553sw64x5done: std r15,saver30(r30) ; Write vm context
1554 std r24,saver31(r30)
1555 stw r17,savecr(r30)
1556 std r18,savexer(r30)
1557 std r19,savelr(r30)
1558 std r20,savectr(r30)
1559 stw r22,savevrsave(r30)
1560
1561; Swap 8 registers
1562
1563 lwz r7,savevscr+0(r30) ; Read savearea
1564 lwz r8,savevscr+4(r30)
1565 lwz r9,savevscr+8(r30)
1566 lwz r10,savevscr+12(r30)
1567 lwz r11,savefpscrpad(r30)
1568 lwz r12,savefpscr(r30)
1569
1570 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1571 lwz r24,vmmppcvscr+4(r5)
1572 lwz r17,vmmppcvscr+8(r5)
1573 lwz r18,vmmppcvscr+12(r5)
1574 lwz r19,vmmppcfpscrpad(r5)
1575 lwz r20,vmmppcfpscr(r5)
1576
1577 stw r7,vmmppcvscr+0(r5) ; Write context
1578 stw r8,vmmppcvscr+4(r5)
1579 stw r9,vmmppcvscr+8(r5)
1580 stw r10,vmmppcvscr+12(r5)
1581 stw r11,vmmppcfpscrpad(r5)
1582 stw r12,vmmppcfpscr(r5)
1583
1584 stw r15,savevscr+0(r30) ; Write vm context
1585 stw r24,savevscr+4(r30)
1586 stw r17,savevscr+8(r30)
1587 stw r18,savevscr+12(r30)
1588 stw r19,savefpscrpad(r30)
1589 stw r20,savefpscr(r30)
1590
1591
1592;
1593; Cobble up the exception return code and save any specific return values
1594;
1595
1596 lwz r7,saveexception(r30) ; Pick up the exception code
1597 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1598 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1599 stw r8,return_code(r5) ; Save the exit code
1600 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1601 beq+ swapDSI64 ; Yeah...
1602 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1603 beq+ cr1,swapISI64 ; We had an ISI...
1604 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1605 beq+ swapDSI64 ; An alignment exception looks like a DSI...
1606 beq+ cr1,swapSC64 ; We had a system call...
1607
1608 blr ; Return...
1609
1610;
1611; Set exit returns for a DSI or alignment exception
1612;
1613
1614swapDSI64: ld r10,savedar(r30) ; Get the DAR
1615 lwz r7,savedsisr(r30) ; and the DSISR
1616 bt vmmDoing64,sw64DSI ; Skip to 64-bit stuff...
1617
1618
1619 stw r10,return_params+0(r5) ; Save DAR as first return parm
1620 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1621 blr ; Return...
1622
1623sw64DSI: std r10,return_paramsX+0(r5) ; Save DAR as first return parm
1624 std r7,return_paramsX+8(r5) ; Save DSISR as second return parm (note that this is expanded to 64 bits)
1625 blr ; Return...
1626
1627;
1628; Set exit returns for a ISI
1629;
1630
1631swapISI64: bt vmmDoing64,sw64ISI ; Skip to 64-bit stuff...
1632 lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1633 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1634 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1635 stw r10,return_params+0(r5) ; Save PC as first return parm
1636 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1637 blr ; Return...
1638
1639sw64ISI: ld r7,vmmppcXmsr(r5) ; Get the SRR1 value
1640 ld r10,vmmppcXpc(r5) ; Get the PC as failing address
1641 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1642 std r10,return_paramsX+0(r5) ; Save PC as first return parm
1643 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
1644 blr ; Return...
1645
1646;
1647; Set exit returns for a system call (note: we did the first 3 earlier)
1648; Do we really need to pass parameters back here????
1649;
1650
1651swapSC64: bt vmmDoing64,sw64SC ; Skip to 64-bit stuff...
1652 lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1653 stw r10,return_params+12(r5) ; Save it
1654 blr ; Return...
1655
1656sw64SC: ld r10,vmmppcXr6(r5) ; Get the fourth paramter
1657 std r10,return_paramsX+24(r5) ; Save it
1658 blr ; Return...
1659
d7e50217
A
1660;
1661; vmmFamGuestResume:
1662; Restore Guest context from Fam mode.
1663;
1664
1665vmmFamGuestResume:
91447636
A
1666 mfsprg r10,1 ; Get the current activation
1667 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
d7e50217 1668 lwz r27,vmmCEntry(r3) ; Get the context that is active
55e303ae
A
1669 lwz r4,VMMXAFlgs(r10) ; Get the eXtended Architecture flags
1670 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
d7e50217
A
1671 lwz r15,spcFlags(r10) ; Get per_proc special flags
1672 mr r26,r3 ; Save the activation pointer
1673 lwz r20,vmmContextKern(r27) ; Get the comm area
1674 rlwinm r15,r15,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
1675 stw r15,spcFlags(r10) ; Update the special flags
55e303ae 1676 bne fgrX
d7e50217 1677 lwz r7,famguestpc(r20) ; Load famguest ctx pc
55e303ae 1678 bf++ vmmMapDone,fgrNoMap ; No mapping done for this space.
d7e50217 1679 lwz r3,SAVflags(r30) ; Pick up the savearea flags
55e303ae
A
1680 lwz r2,vmmLastMap(r28) ; Get the last mapped address
1681 lwz r6,vmmLastMap+4(r28) ; Get the last mapped address
d7e50217
A
1682 li r4,T_DATA_ACCESS ; Change to DSI fault
1683 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1684 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
55e303ae 1685 stw r6,savedar+4(r30) ; Set the DAR to the last thing we mapped
d7e50217
A
1686 stw r3,SAVflags(r30) ; Turn on the redrive request
1687 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1688 stw r4,saveexception(r30) ; Say we need to emulate a DSI
55e303ae 1689 li r0,0 ; Clear
d7e50217 1690 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
55e303ae
A
1691 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1692fgrNoMap:
1693 lwz r4,savesrr1+4(r30) ; Get the saved MSR value
1694 stw r7,savesrr0+4(r30) ; Set savearea pc
d7e50217
A
1695 lwz r5,famguestmsr(r20) ; Load famguest ctx msr
1696 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1697 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1698 and r5,r5,r6 ; Keep only the controllable bits
1699 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1700 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1701 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1702 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
55e303ae 1703 stw r5,savesrr1+4(r30) ; Set savearea srr1
d7e50217
A
1704 lwz r4,famguestr0(r20) ; Load famguest ctx r0
1705 lwz r5,famguestr1(r20) ; Load famguest ctx r1
1706 lwz r6,famguestr2(r20) ; Load famguest ctx r2
1707 lwz r7,famguestr3(r20) ; Load famguest ctx r3
55e303ae
A
1708 stw r4,saver0+4(r30) ; Set savearea r0
1709 stw r5,saver1+4(r30) ; Set savearea r1
1710 stw r6,saver2+4(r30) ; Set savearea r2
1711 stw r7,saver3+4(r30) ; Set savearea r3
d7e50217
A
1712 lwz r4,famguestr4(r20) ; Load famguest ctx r4
1713 lwz r5,famguestr5(r20) ; Load famguest ctx r5
1714 lwz r6,famguestr6(r20) ; Load famguest ctx r6
1715 lwz r7,famguestr7(r20) ; Load famguest ctx r7
55e303ae
A
1716 stw r4,saver4+4(r30) ; Set savearea r4
1717 stw r5,saver5+4(r30) ; Set savearea r5
1718 stw r6,saver6+4(r30) ; Set savearea r6
1719 stw r7,saver7+4(r30) ; Set savearea r7
1720 b fgrret
1721fgrX:
1722 ld r7,famguestXpc(r20) ; Load famguest ctx pc
1723 bf++ vmmMapDone,fgrXNoMap ; No mapping done for this space.
1724 lwz r3,SAVflags(r30) ; Pick up the savearea flags
1725 ld r2,vmmLastMap(r28) ; Get the last mapped address
1726 li r4,T_DATA_ACCESS ; Change to DSI fault
1727 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1728 std r2,savedar(r30) ; Set the DAR to the last thing we mapped
1729 stw r3,SAVflags(r30) ; Turn on the redrive request
1730 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1731 stw r4,saveexception(r30) ; Say we need to emulate a DSI
1732 li r0,0 ; Clear
1733 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
1734 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1735fgrXNoMap:
1736 ld r4,savesrr1(r30) ; Get the saved MSR value
1737 std r7,savesrr0(r30) ; Set savearea pc
1738 ld r5,famguestXmsr(r20) ; Load famguest ctx msr
1739 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1740 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1741 and r5,r5,r6 ; Keep only the controllable bits
1742 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1743 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1744 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1745 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1746 std r5,savesrr1(r30) ; Set savearea srr1
1747 ld r4,famguestXr0(r20) ; Load famguest ctx r0
1748 ld r5,famguestXr1(r20) ; Load famguest ctx r1
1749 ld r6,famguestXr2(r20) ; Load famguest ctx r2
1750 ld r7,famguestXr3(r20) ; Load famguest ctx r3
1751 std r4,saver0(r30) ; Set savearea r0
1752 std r5,saver1(r30) ; Set savearea r1
1753 std r6,saver2(r30) ; Set savearea r2
1754 std r7,saver3(r30) ; Set savearea r3
1755 ld r4,famguestXr4(r20) ; Load famguest ctx r4
1756 ld r5,famguestXr5(r20) ; Load famguest ctx r5
1757 ld r6,famguestXr6(r20) ; Load famguest ctx r6
1758 ld r7,famguestXr7(r20) ; Load famguest ctx r7
1759 std r4,saver4(r30) ; Set savearea r4
1760 std r5,saver5(r30) ; Set savearea r5
1761 std r6,saver6(r30) ; Set savearea r6
1762 std r7,saver7(r30) ; Set savearea r7
1763fgrret:
d7e50217 1764 li r3,1 ; Show normal exit with check for AST
91447636 1765 mr r16,r26 ; Restore the thread pointer
d7e50217
A
1766 b EXT(ppcscret) ; Go back to handler...
1767
1768;
55e303ae 1769; FAM Intercept exception handler
d7e50217
A
1770;
1771
1772 .align 5
55e303ae
A
1773 .globl EXT(vmm_fam_exc)
1774
1775LEXT(vmm_fam_exc)
1776 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
1777 lwz r1,pfAvailable(r2) ; Get the CPU features flags
1778 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1779 bne fexcX
1780 lwz r4,saver4+4(r13) ; Load savearea r4
d7e50217
A
1781 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1782 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
55e303ae 1783 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
d7e50217 1784 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
55e303ae
A
1785 bt++ pf64Bitb,fexcVMareaPhys64 ; Go do this on a 64-bit machine...
1786 slwi r3,r3,12 ; Change ppnum to physical address
1787 b fexcVMareaPhysres
1788fexcVMareaPhys64:
1789 mtxer r5 ; Restore xer
1790 lwz r5,saver5+4(r13) ; Load savearea r5
1791 lwz r6,saver6+4(r13) ; Load savearea r6
1792 sldi r3,r3,12 ; Change ppnum to physical address
1793fexcVMareaPhysres:
d7e50217
A
1794 stw r4,famguestr4(r3) ; Save r4 in famguest ctx
1795 stw r5,famguestr5(r3) ; Save r5 in famguest ctx
1796 stw r6,famguestr6(r3) ; Save r6 in famguest ctx
1797 stw r7,famguestr7(r3) ; Save r7 in famguest ctx
55e303ae
A
1798 lwz r4,saver0+4(r13) ; Load savearea r0
1799 lwz r5,saver1+4(r13) ; Load savearea r1
1800 lwz r6,saver2+4(r13) ; Load savearea r2
1801 lwz r7,saver3+4(r13) ; Load savearea r3
d7e50217
A
1802 stw r4,famguestr0(r3) ; Save r0 in famguest ctx
1803 stw r5,famguestr1(r3) ; Save r1 in famguest ctx
1804 stw r6,famguestr2(r3) ; Save r2 in famguest ctx
1805 stw r7,famguestr3(r3) ; Save r3 in famguest ctx
1806 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1807 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1808 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1809 mfsrr0 r2 ; Get the interrupt srr0
1810 mfsrr1 r4 ; Get the interrupt srr1
1811 stw r2,famguestpc(r3) ; Save srr0 in famguest ctx
1812 stw r4,famguestmsr(r3) ; Save srr1 in famguest ctx
1813 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1814 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1815 mtsrr1 r6 ; Set srr1
1816 mr r6,r3 ; Set r6 with phys state page addr
1817 rlwinm r7,r11,30,24,31 ; Convert exception to return code
55e303ae
A
1818 beq+ cr1,fexcPRG ; We had a program exception...
1819 bne+ fexcret
d7e50217
A
1820 ; We had an Alignment...
1821 mfdar r3 ; Load dar
1822 mfdsisr r4 ; Load dsisr
1823 stw r3,famparam+0x4(r6) ; Set famparam 1 with dar
1824 stw r4,famparam+0x8(r6) ; Set famparam 2 with dsir
55e303ae
A
1825 b fexcret ;
1826fexcPRG:
d7e50217
A
1827 stw r4,famparam+0x4(r6) ; Set famparam 1 with srr1
1828 mr r3,r4 ; Set r3 with dsisr
1829 lwz r4,famguestr4(r6) ; Load r4 from famguest context
55e303ae 1830fexcret:
d7e50217
A
1831 lwz r5,famguestr5(r6) ; Load r5 from famguest context
1832 lwz r13,famhandler(r6) ; Load user address to resume
1833 stw r2,famparam(r6) ; Set famparam 0 with srr0
1834 stw r7,famdispcode(r6) ; Save the exit code
1835 lwz r1,famrefcon(r6) ; load refcon
55e303ae 1836 bt++ pf64Bitb,fexcrfi64 ; Go do this on a 64-bit machine...
d7e50217
A
1837 mtcr r0 ; Restore cr
1838 mtsrr0 r13 ; Load srr0
1839 mr r0,r7 ; Set dispatch code
1840 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1841 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1842 mfsprg r13,2 ; Restore r13
1843 mfsprg r11,3 ; Restore r11
1844 rfi
55e303ae
A
1845fexcrfi64:
1846 mtcr r0 ; Restore cr
1847 mtsrr0 r13 ; Load srr0
1848 mr r0,r7 ; Set dispatch code
1849 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1850 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1851 mfsprg r13,2 ; Restore r13
1852 mfsprg r11,3 ; Restore r11
1853 rfid
1854fexcX:
1855 mtxer r5 ; Restore xer
1856 ld r4,saver4(r13) ; Load savearea r4
1857 ld r5,saver5(r13) ; Load savearea r5
1858 ld r6,saver6(r13) ; Load savearea r6
1859 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1860 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1861 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
1862 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
1863 sldi r3,r3,12 ; Change ppnum to physical address
1864 std r4,famguestXr4(r3) ; Save r4 in famguest ctx
1865 std r5,famguestXr5(r3) ; Save r5 in famguest ctx
1866 std r6,famguestXr6(r3) ; Save r6 in famguest ctx
1867 std r7,famguestXr7(r3) ; Save r7 in famguest ctx
1868 ld r4,saver0(r13) ; Load savearea r0
1869 ld r5,saver1(r13) ; Load savearea r1
1870 ld r6,saver2(r13) ; Load savearea r2
1871 ld r7,saver3(r13) ; Load savearea r3
1872 std r4,famguestXr0(r3) ; Save r0 in famguest ctx
1873 std r5,famguestXr1(r3) ; Save r1 in famguest ctx
1874 std r6,famguestXr2(r3) ; Save r2 in famguest ctx
1875 std r7,famguestXr3(r3) ; Save r3 in famguest ctx
1876 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1877 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1878 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1879 mfsrr0 r2 ; Get the interrupt srr0
1880 mfsrr1 r4 ; Get the interrupt srr1
1881 std r2,famguestXpc(r3) ; Save srr0 in famguest ctx
1882 std r4,famguestXmsr(r3) ; Save srr1 in famguest ctx
1883 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1884 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1885 mtsrr1 r6 ; Set srr1
1886 mr r6,r3 ; Set r6 with phys state page addr
1887 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1888 beq+ cr1,fexcXPRG ; We had a program exception...
1889 bne+ fexcXret
1890 ; We had an Alignment...
1891 mfdar r3 ; Load dar
1892 mfdsisr r4 ; Load dsisr
1893 std r3,famparamX+0x8(r6) ; Set famparam 1 with dar
1894 std r4,famparamX+0x10(r6) ; Set famparam 2 with dsir
1895 b fexcXret
1896fexcXPRG:
1897 std r4,famparamX+0x8(r6) ; Set famparam 1 with srr1
1898 mr r3,r4 ; Set r3 with dsisr
1899 ld r4,famguestXr4(r6) ; Load r4 from famguest context
1900fexcXret:
1901 ld r5,famguestXr5(r6) ; Load r5 from famguest context
1902 ld r13,famhandlerX(r6) ; Load user address to resume
1903 std r2,famparamX(r6) ; Set famparam 0 with srr0
1904 std r7,famdispcodeX(r6) ; Save the exit code
1905 ld r1,famrefconX(r6) ; load refcon
1906 mtcr r0 ; Restore cr
1907 mtsrr0 r13 ; Load srr0
1908 mr r0,r7 ; Set dispatch code
1909 ld r7,famguestXr7(r6) ; Load r7 from famguest context
1910 ld r6,famguestXr6(r6) ; Load r6 from famguest context
1911 mfsprg r13,2 ; Restore r13
1912 mfsprg r11,3 ; Restore r11
1913 rfid
d7e50217
A
1914
1915;
1916; FAM Intercept DSI ISI fault handler
1917;
1918
1919 .align 5
55e303ae 1920 .globl EXT(vmm_fam_pf)
d7e50217 1921
55e303ae
A
1922LEXT(vmm_fam_pf)
1923 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
d7e50217 1924 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
55e303ae
A
1925 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1926 bne fpfX
1927 lwz r4,saver0+4(r13) ; Load savearea r0
1928 lwz r5,saver1+4(r13) ; Load savearea r1
1929 lwz r6,saver2+4(r13) ; Load savearea r2
1930 lwz r7,saver3+4(r13) ; Load savearea r3
1931 bt++ pf64Bitb,fpfVMareaPhys64 ; Go do this on a 64-bit machine...
1932 slwi r3,r3,12 ; Change ppnum to physical address
1933 b fpfVMareaPhysret
1934fpfVMareaPhys64:
1935 sldi r3,r3,12 ; Change ppnum to physical address
1936fpfVMareaPhysret:
d7e50217
A
1937 stw r4,famguestr0(r3) ; Save r0 in famguest
1938 stw r5,famguestr1(r3) ; Save r1 in famguest
1939 stw r6,famguestr2(r3) ; Save r2 in famguest
1940 stw r7,famguestr3(r3) ; Save r3 in famguest
55e303ae
A
1941 lwz r4,saver4+4(r13) ; Load savearea r0
1942 lwz r5,saver5+4(r13) ; Load savearea r1
1943 lwz r6,saver6+4(r13) ; Load savearea r2
1944 lwz r7,saver7+4(r13) ; Load savearea r3
d7e50217
A
1945 stw r4,famguestr4(r3) ; Save r4 in famguest
1946 lwz r4,spcFlags(r2) ; Load spcFlags
1947 stw r5,famguestr5(r3) ; Save r5 in famguest
55e303ae 1948 lwz r5,savesrr0+4(r13) ; Get the interrupt srr0
d7e50217 1949 stw r6,famguestr6(r3) ; Save r6 in famguest
55e303ae 1950 lwz r6,savesrr1+4(r13) ; Load srr1
d7e50217
A
1951 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1952 stw r7,famguestr7(r3) ; Save r7 in famguest
1953 stw r4,spcFlags(r2) ; Update spcFlags
1954 lwz r1,famrefcon(r3) ; Load refcon
1955 lwz r2,famhandler(r3) ; Load famhandler to resume
1956 stw r5,famguestpc(r3) ; Save srr0
55e303ae 1957 stw r5,saver2+4(r13) ; Store srr0 in savearea r2
d7e50217
A
1958 stw r5,famparam(r3) ; Store srr0 in fam param 0
1959 stw r6,famguestmsr(r3) ; Save srr1 in famguestmsr
1960 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
1961 rlwinm r7,r11,30,24,31 ; Convert exception to return code
55e303ae
A
1962 beq+ cr1,fpfISI ; We had an ISI...
1963; fpfDSI
1964 lwz r6,savedar+4(r13) ; Load dar from savearea
d7e50217
A
1965 lwz r4,savedsisr(r13) ; Load dsisr from savearea
1966 stw r6,famparam+0x4(r3) ; Store dar in fam param 1
55e303ae 1967 stw r6,saver3+4(r13) ; Store dar in savearea r3
d7e50217 1968 stw r4,famparam+0x8(r3) ; Store dsisr in fam param 2
55e303ae
A
1969 stw r4,saver4+4(r13) ; Store dsisr in savearea r4
1970 b fpfret
1971fpfISI:
d7e50217
A
1972 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
1973 stw r6,famparam+0x4(r3) ; Store srr1 in fam param 1
55e303ae
A
1974 stw r6,saver3+4(r13) ; Store srr1 in savearea r3
1975fpfret:
1976 stw r7,saver0+4(r13) ; Set dispatch code
d7e50217 1977 stw r7,famdispcode(r3) ; Set dispatch code
55e303ae
A
1978 stw r1,saver1+4(r13) ; Store refcon in savearea r1
1979 stw r2,savesrr0+4(r13) ; Store famhandler in srr0
1980 blr
1981fpfX:
1982 ld r4,saver0(r13) ; Load savearea r0
1983 ld r5,saver1(r13) ; Load savearea r1
1984 ld r6,saver2(r13) ; Load savearea r2
1985 ld r7,saver3(r13) ; Load savearea r3
1986 sldi r3,r3,12 ; Change ppnum to physical address
1987 std r4,famguestXr0(r3) ; Save r0 in famguest
1988 std r5,famguestXr1(r3) ; Save r1 in famguest
1989 std r6,famguestXr2(r3) ; Save r2 in famguest
1990 std r7,famguestXr3(r3) ; Save r3 in famguest
1991 ld r4,saver4(r13) ; Load savearea r0
1992 ld r5,saver5(r13) ; Load savearea r1
1993 ld r6,saver6(r13) ; Load savearea r2
1994 ld r7,saver7(r13) ; Load savearea r3
1995 std r4,famguestXr4(r3) ; Save r4 in famguest
1996 lwz r4,spcFlags(r2) ; Load spcFlags
1997 std r5,famguestXr5(r3) ; Save r5 in famguest
1998 ld r5,savesrr0(r13) ; Get the interrupt srr0
1999 std r6,famguestXr6(r3) ; Save r6 in famguest
2000 ld r6,savesrr1(r13) ; Load srr1
2001 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
2002 std r7,famguestXr7(r3) ; Save r7 in famguest
2003 stw r4,spcFlags(r2) ; Update spcFlags
2004 ld r1,famrefconX(r3) ; Load refcon
2005 ld r2,famhandlerX(r3) ; Load famhandler to resume
2006 std r5,famguestXpc(r3) ; Save srr0
2007 std r5,saver2(r13) ; Store srr0 in savearea r2
2008 std r5,famparamX(r3) ; Store srr0 in fam param 0
2009 std r6,famguestXmsr(r3) ; Save srr1 in famguestmsr
2010 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
2011 rlwinm r7,r11,30,24,31 ; Convert exception to return code
2012 beq+ cr1,fpfXISI ; We had an ISI...
2013; fpfXDSI
2014 ld r6,savedar(r13) ; Load dar from savearea
2015 lwz r4,savedsisr(r13) ; Load dsisr from savearea
2016 std r6,famparamX+0x8(r3) ; Store dar in fam param 1
2017 std r6,saver3(r13) ; Store dar in savearea r3
2018 std r4,famparamX+0x10(r3) ; Store dsisr in fam param 2
2019 std r4,saver4(r13) ; Store dsisr in savearea r4
2020 b fpfXret
2021fpfXISI:
2022 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
2023 std r6,famparamX+0x8(r3) ; Store srr1 in fam param 1
2024 std r6,saver3(r13) ; Store srr1 in savearea r3
2025fpfXret:
2026 std r7,saver0(r13) ; Set dispatch code
2027 std r7,famdispcodeX(r3) ; Set dispatch code
2028 std r1,saver1(r13) ; Store refcon in savearea r1
2029 std r2,savesrr0(r13) ; Store famhandler in srr0
d7e50217
A
2030 blr
2031
91447636
A
2032/*
2033 * Ultra Fast Path FAM syscalls
2034 *
2035 * The UFT FAMs are those from kvmmResumeGuest to kvmmSetGuestRegister, inclusive.
2036 * We get here directly from the syscall vector, with interrupts and translation off,
2037 * 64-bit mode on if supported, and all registers live except:
2038 *
2039 * r13 = holds caller's cr
2040 * sprg2 = holds caller's r13
2041 * sprg3 = holds caller's r11
2042 * cr2 = set on (r3==kvmmSetGuestRegister)
2043 * cr5 = set on (r3==kvmmResumeGuest)
2044 */
d7e50217
A
2045
2046 .align 5
2047 .globl EXT(vmm_ufp)
2048
2049LEXT(vmm_ufp)
2050 mfsprg r3,0 ; Get the per_proc area
91447636 2051 mr r11,r13 ; Move saved cr to r11
55e303ae 2052 lwz r13,VMMXAFlgs(r3) ; Get the eXtended Architecture flags
91447636
A
2053 rlwinm. r13,r13,0,0,0 ; Are we doing a 64-bit virtual machine?
2054
55e303ae
A
2055 lwz r13,pfAvailable(r3) ; Get feature flags
2056 mtcrf 0x02,r13 ; Put pf64Bitb etc in cr6
2057 lwz r13,VMMareaPhys(r3) ; Load fast assist area
2058 bt++ pf64Bitb,ufpVMareaPhys64 ; Go do this on a 64-bit machine...
2059 slwi r13,r13,12 ; Change ppnum to physical address
2060 b ufpVMareaPhysret
2061ufpVMareaPhys64:
2062 sldi r13,r13,12 ; Change ppnum to physical address
2063ufpVMareaPhysret:
91447636
A
2064 bne ufpX ; go handle a 64-bit virtual machine
2065
55e303ae 2066 bt cr5_eq,ufpResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
91447636
A
2067 cmplwi cr5,r4,7 ; First argument in range? (ie, 0-7)
2068 bgt cr5,ufpVMret ; Return if not in the range
d7e50217 2069 slwi r4,r4,2 ; multiply index by 4
55e303ae
A
2070 la r3,famguestr0(r13) ; Load the base address
2071 bt cr2_eq,ufpSetGuestReg ; Set/get selector
2072; ufpGetGuestReg
d7e50217
A
2073 lwzx r3,r4,r3 ; Load the guest register
2074 b ufpVMret ; Return
55e303ae 2075ufpSetGuestReg:
d7e50217
A
2076 stwx r5,r4,r3 ; Update the guest register
2077 li r3,0 ; Set return value
2078 b ufpVMret ; Return
55e303ae 2079ufpResumeGuest:
d7e50217
A
2080 lwz r7,spcFlags(r3) ; Pick up the special flags
2081 mtsrr0 r4 ; Set srr0
2082 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2083 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
55e303ae
A
2084 stw r7,spcFlags(r3) ; Update the special flags
2085 mfsrr1 r6 ; Get the current MSR value
2086
2087 lwz r4,famguestmsr(r13) ; Load guest srr1
2088 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2089 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2090 and r4,r4,r1 ; Keep only the controllable bits
2091 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2092 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2093 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2094 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2095 beq ufpnokey ; Branch if not key switch
d7e50217
A
2096 mr r2,r7 ; Save r7
2097 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2098 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
55e303ae
A
2099 beq ufpnokey ; No, go to ResumeGuest_nokey
2100 mr r5,r3 ; Get the per_proc area
2101 stw r7,spcFlags(r3) ; Update the special flags
2102
2103 bt++ pf64Bitb,ufpsave64 ; Go do this on a 64-bit machine...
2104
2105 lwz r3,next_savearea+4(r5) ; Get the exception save area
2106 stw r8,saver8+4(r3) ; Save r8
2107 stw r9,saver9+4(r3) ; Save r9
2108 stw r10,saver10+4(r3) ; Save r10
2109 stw r11,saver11+4(r3) ; Save r11
2110 stw r12,saver12+4(r3) ; Save r12
2111 stw r13,saver13+4(r3) ; Save r12
2112 stw r14,saver14+4(r3) ; Save r14
2113 stw r15,saver15+4(r3) ; Save r15
2114 stw r16,saver16+4(r3) ; Save r16
2115 stw r17,saver17+4(r3) ; Save r17
2116 stw r18,saver18+4(r3) ; Save r18
2117 stw r19,saver19+4(r3) ; Save r19
2118 stw r20,saver20+4(r3) ; Save r20
2119 stw r21,saver21+4(r3) ; Save r21
2120 stw r22,saver22+4(r3) ; Save r22
2121 stw r23,saver23+4(r3) ; Save r23
2122 stw r24,saver24+4(r3) ; Save r24
2123 stw r25,saver25+4(r3) ; Save r25
2124 stw r26,saver26+4(r3) ; Save r26
2125 stw r27,saver27+4(r3) ; Save r27
2126 stw r28,saver28+4(r3) ; Save r28
2127 stw r29,saver29+4(r3) ; Save r29
2128 stw r30,saver30+4(r3) ; Save r30
2129 stw r31,saver31+4(r3) ; Save r31
2130 b ufpsaveres ; Continue
2131
2132ufpsave64:
2133 ld r3,next_savearea(r5) ; Get the exception save area
2134 std r8,saver8(r3) ; Save r8
2135 std r9,saver9(r3) ; Save r9
2136 std r10,saver10(r3) ; Save r10
2137 std r11,saver11(r3) ; Save r11
2138 std r12,saver12(r3) ; Save r12
2139 std r13,saver13(r3) ; Save r12
2140 std r14,saver14(r3) ; Save r14
2141 std r15,saver15(r3) ; Save r15
2142 std r16,saver16(r3) ; Save r16
2143 std r17,saver17(r3) ; Save r17
2144 std r18,saver18(r3) ; Save r18
2145 std r19,saver19(r3) ; Save r19
2146 std r20,saver20(r3) ; Save r20
2147 std r21,saver21(r3) ; Save r21
2148 std r22,saver22(r3) ; Save r22
2149 std r23,saver23(r3) ; Save r23
2150 std r24,saver24(r3) ; Save r24
2151 std r25,saver25(r3) ; Save r25
2152 std r26,saver26(r3) ; Save r26
2153 std r27,saver27(r3) ; Save r27
2154 std r28,saver28(r3) ; Save r28
2155 std r29,saver29(r3) ; Save r29
2156 mfxer r2 ; Get xer
2157 std r30,saver30(r3) ; Save r30
2158 std r31,saver31(r3) ; Save r31
2159 std r2,savexer(r3) ; Save xer
2160
2161ufpsaveres:
2162 mflr r20 ; Get lr
2163 li r2,1 ; Set to 1
2164 stw r7,spcFlags(r5) ; Update the special flags
2165 mr r13,r3 ; Set current savearea
2166 mr r21,r4 ; Save r4
2167 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2168 mr r29,r5 ; Get the per_proc area
2169 mr r3,r4 ; Set MSR value we going to
2170 bl EXT(switchSegs) ; Go handle the segment registers/STB
2171 mr r3,r13 ; Set current savearea
2172 mr r4,r21 ; Restore r4
2173 mtlr r20 ; Set lr
2174
2175 bt++ pf64Bitb,ufprestore64 ; Go do this on a 64-bit machine...
2176 lwz r8,saver8+4(r3) ; Load r8
2177 lwz r9,saver9+4(r3) ; Load r9
2178 lwz r10,saver10+4(r3) ; Load r10
2179 lwz r11,saver11+4(r3) ; Load r11
2180 lwz r12,saver12+4(r3) ; Load r12
2181 lwz r13,saver13+4(r3) ; Load r12
2182 lwz r14,saver14+4(r3) ; Load r14
2183 lwz r15,saver15+4(r3) ; Load r15
2184 lwz r16,saver16+4(r3) ; Load r16
2185 lwz r17,saver17+4(r3) ; Load r17
2186 lwz r18,saver18+4(r3) ; Load r18
2187 lwz r19,saver19+4(r3) ; Load r19
2188 lwz r20,saver20+4(r3) ; Load r20
2189 lwz r21,saver21+4(r3) ; Load r21
2190 lwz r22,saver22+4(r3) ; Load r22
2191 lwz r23,saver23+4(r3) ; Load r23
2192 lwz r24,saver24+4(r3) ; Load r24
2193 lwz r25,saver25+4(r3) ; Load r25
2194 lwz r26,saver26+4(r3) ; Load r26
2195 lwz r27,saver27+4(r3) ; Load r27
2196 lwz r28,saver28+4(r3) ; Load r28
2197 lwz r29,saver29+4(r3) ; Load r29
2198 lwz r30,saver30+4(r3) ; Load r30
2199 lwz r31,saver31+4(r3) ; Load r31
2200 b ufpnokey ; Continue
2201ufprestore64:
2202 ld r2,savexer(r3) ; Load xer
2203 ld r8,saver8(r3) ; Load r8
2204 ld r9,saver9(r3) ; Load r9
2205 ld r10,saver10(r3) ; Load r10
2206 mtxer r2 ; Restore xer
2207 ld r11,saver11(r3) ; Load r11
2208 ld r12,saver12(r3) ; Load r12
2209 ld r13,saver13(r3) ; Load r12
2210 ld r14,saver14(r3) ; Load r14
2211 ld r15,saver15(r3) ; Load r15
2212 ld r16,saver16(r3) ; Load r16
2213 ld r17,saver17(r3) ; Load r17
2214 ld r18,saver18(r3) ; Load r18
2215 ld r19,saver19(r3) ; Load r19
2216 ld r20,saver20(r3) ; Load r20
2217 ld r21,saver21(r3) ; Load r21
2218 ld r22,saver22(r3) ; Load r22
2219 ld r23,saver23(r3) ; Load r23
2220 ld r24,saver24(r3) ; Load r24
2221 ld r25,saver25(r3) ; Load r25
2222 ld r26,saver26(r3) ; Load r26
2223 ld r27,saver27(r3) ; Load r27
2224 ld r28,saver28(r3) ; Load r28
2225 ld r29,saver29(r3) ; Load r29
2226 ld r30,saver30(r3) ; Load r30
2227 ld r31,saver31(r3) ; Load r31
2228ufpnokey:
2229 mfsprg r3,0 ; Get the per_proc area
2230 mtsrr1 r4 ; Set srr1
d7e50217
A
2231 lwz r0,famguestr0(r13) ; Load r0
2232 lwz r1,famguestr1(r13) ; Load r1
2233 lwz r2,famguestr2(r13) ; Load r2
2234 lwz r3,famguestr3(r13) ; Load r3
2235 lwz r4,famguestr4(r13) ; Load r4
2236 lwz r5,famguestr5(r13) ; Load r5
2237 lwz r6,famguestr6(r13) ; Load r6
2238 lwz r7,famguestr7(r13) ; Load r7
2239ufpVMret:
55e303ae
A
2240 mfsprg r13,2 ; Restore R13
2241 bt++ pf64Bitb,ufpVMrfi64 ; Go do this on a 64-bit machine...
d7e50217
A
2242 mtcrf 0xFF,r11 ; Restore CR
2243 mfsprg r11,3 ; Restore R11
de355530 2244 rfi ; All done, go back...
55e303ae
A
2245ufpVMrfi64:
2246 mtcrf 0xFF,r11 ; Restore CR
2247 mfsprg r11,3 ; Restore R11
2248 rfid
2249
91447636 2250ufpX: ; here if virtual machine is 64-bit
55e303ae 2251 bt cr5_eq,ufpXResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
91447636
A
2252 cmplwi cr5,r4,7 ; Is first arg in range 0-7?
2253 bgt cr5,ufpXVMret ; Return if not in the range
55e303ae
A
2254 slwi r4,r4,3 ; multiply index by 8
2255 la r3,famguestXr0(r13) ; Load the base address
2256 bt cr2_eq,ufpXSetGuestReg ; Set/get selector
2257; ufpXGetGuestReg
2258 ldx r3,r4,r3 ; Load the guest register
2259 b ufpXVMret ; Return
2260ufpXSetGuestReg:
2261 stdx r5,r4,r3 ; Update the guest register
2262 li r3,0 ; Set return value
2263 b ufpXVMret ; Return
2264ufpXResumeGuest:
2265 lwz r7,spcFlags(r3) ; Pick up the special flags
2266 mtsrr0 r4 ; Set srr0
2267 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2268 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
2269 stw r7,spcFlags(r3) ; Update the special flags
2270 mfsrr1 r6 ; Get the current MSR value
2271
2272 ld r4,famguestXmsr(r13) ; Load guest srr1
2273 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2274 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2275 and r4,r4,r1 ; Keep only the controllable bits
2276 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2277 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2278 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2279 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2280 beq ufpXnokey ; Branch if not key switch
2281 mr r2,r7 ; Save r7
2282 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2283 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
2284 beq ufpXnokey ; No, go to ResumeGuest_nokey
2285 mr r5,r3 ; Get the per_proc area
2286 stw r7,spcFlags(r3) ; Update the special flags
2287
2288 ld r3,next_savearea(r5) ; Get the exception save area
2289 std r8,saver8(r3) ; Save r8
2290 std r9,saver9(r3) ; Save r9
2291 std r10,saver10(r3) ; Save r10
2292 std r11,saver11(r3) ; Save r11
2293 std r12,saver12(r3) ; Save r12
2294 std r13,saver13(r3) ; Save r12
2295 std r14,saver14(r3) ; Save r14
2296 std r15,saver15(r3) ; Save r15
2297 std r16,saver16(r3) ; Save r16
2298 std r17,saver17(r3) ; Save r17
2299 std r18,saver18(r3) ; Save r18
2300 std r19,saver19(r3) ; Save r19
2301 std r20,saver20(r3) ; Save r20
2302 std r21,saver21(r3) ; Save r21
2303 std r22,saver22(r3) ; Save r22
2304 std r23,saver23(r3) ; Save r23
2305 std r24,saver24(r3) ; Save r24
2306 std r25,saver25(r3) ; Save r25
2307 std r26,saver26(r3) ; Save r26
2308 std r27,saver27(r3) ; Save r27
2309 std r28,saver28(r3) ; Save r28
2310 std r29,saver29(r3) ; Save r29
2311 mfxer r2 ; Get xer
2312 std r30,saver30(r3) ; Save r30
2313 std r31,saver31(r3) ; Save r31
2314 std r2,savexer(r3) ; Save xer
2315
2316 mflr r20 ; Get lr
2317 li r2,1 ; Set to 1
2318 stw r7,spcFlags(r5) ; Update the special flags
2319 mr r13,r3 ; Set current savearea
2320 mr r21,r4 ; Save r4
2321 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2322 mr r29,r5 ; Get the per_proc area
2323 mr r3,r4 ; Set MSR value we going to
2324 bl EXT(switchSegs) ; Go handle the segment registers/STB
2325 mr r3,r13 ; Set current savearea
2326 mr r4,r21 ; Restore r4
2327 mtlr r20 ; Set lr
2328
2329 ld r2,savexer(r3) ; Load xer
2330 ld r8,saver8(r3) ; Load r8
2331 ld r9,saver9(r3) ; Load r9
2332 ld r10,saver10(r3) ; Load r10
2333 mtxer r2 ; Restore xer
2334 ld r11,saver11(r3) ; Load r11
2335 ld r12,saver12(r3) ; Load r12
2336 ld r13,saver13(r3) ; Load r12
2337 ld r14,saver14(r3) ; Load r14
2338 ld r15,saver15(r3) ; Load r15
2339 ld r16,saver16(r3) ; Load r16
2340 ld r17,saver17(r3) ; Load r17
2341 ld r18,saver18(r3) ; Load r18
2342 ld r19,saver19(r3) ; Load r19
2343 ld r20,saver20(r3) ; Load r20
2344 ld r21,saver21(r3) ; Load r21
2345 ld r22,saver22(r3) ; Load r22
2346 ld r23,saver23(r3) ; Load r23
2347 ld r24,saver24(r3) ; Load r24
2348 ld r25,saver25(r3) ; Load r25
2349 ld r26,saver26(r3) ; Load r26
2350 ld r27,saver27(r3) ; Load r27
2351 ld r28,saver28(r3) ; Load r28
2352 ld r29,saver29(r3) ; Load r29
2353 ld r30,saver30(r3) ; Load r30
2354 ld r31,saver31(r3) ; Load r31
2355ufpXnokey:
2356 mtsrr1 r4 ; Set srr1
2357 ld r0,famguestXr0(r13) ; Load r0
2358 ld r1,famguestXr1(r13) ; Load r1
2359 ld r2,famguestXr2(r13) ; Load r2
2360 ld r3,famguestXr3(r13) ; Load r3
2361 ld r4,famguestXr4(r13) ; Load r4
2362 ld r5,famguestXr5(r13) ; Load r5
2363 ld r6,famguestXr6(r13) ; Load r6
2364 ld r7,famguestXr7(r13) ; Load r7
2365ufpXVMret:
2366 mfsprg r13,2 ; Restore R13
2367 mtcrf 0xFF,r11 ; Restore CR
2368 mfsprg r11,3 ; Restore R11
2369 rfid
2370