]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/vmachmon_asm.s
xnu-517.9.4.tar.gz
[apple/xnu.git] / osfmk / ppc / vmachmon_asm.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <assym.s>
23 #include <debug.h>
24 #include <ppc/asm.h>
25 #include <ppc/proc_reg.h>
26 #include <ppc/exception.h>
27
28 /*
29 * This file contains implementations for the Virtual Machine Monitor
30 * facility.
31 */
32
33 #define vmmMapDone 31
34 #define vmmDoing64 30
35
36
37 /*
38 * int vmm_dispatch(savearea, act);
39
40 * vmm_dispatch is a PPC only system call. It is used with a selector (first
41 * parameter) to determine what function to enter. This is treated as an extension
42 * of hw_exceptions.
43 *
44 * Inputs:
45 * R4 = current activation
46 * R16 = current thread
47 * R30 = current savearea
48 */
49
50 .align 5 ; Line up on cache line
51 .globl EXT(vmm_dispatch_table)
52
53 LEXT(vmm_dispatch_table)
54
55 /* Don't change the order of these routines in the table. It's */
56 /* OK to add new routines, but they must be added at the bottom. */
57
58 .long EXT(vmm_get_version_sel) ; Get the version of the VMM interface
59 .long 0 ; Not valid in Fam
60 .long EXT(vmm_get_features_sel) ; Get the features of the VMM interface
61 .long 0 ; Not valid in Fam
62 .long EXT(vmm_init_context_sel) ; Initializes a new VMM context
63 .long 0 ; Not valid in Fam
64 .long EXT(vmm_tear_down_context) ; Tears down a previously-allocated VMM context
65 .long 0 ; Not valid in Fam
66 .long EXT(vmm_tear_down_all) ; Tears down all VMMs
67 .long 0 ; Not valid in Fam
68 .long EXT(vmm_map_page32) ; Maps a page from the main address space into the VM space - supports 32-bit
69 .long 1 ; Valid in Fam
70 .long EXT(vmm_get_page_mapping32) ; Returns client va associated with VM va - supports 32-bit
71 .long 1 ; Valid in Fam
72 .long EXT(vmm_unmap_page32) ; Unmaps a page from the VM space - supports 32-bit
73 .long 1 ; Valid in Fam
74 .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space
75 .long 1 ; Valid in Fam
76 .long EXT(vmm_get_page_dirty_flag32) ; Gets the change bit for a page and optionally clears it - supports 32-bit
77 .long 1 ; Valid in Fam
78 .long EXT(vmm_get_float_state) ; Gets current floating point state
79 .long 0 ; not valid in Fam
80 .long EXT(vmm_get_vector_state) ; Gets current vector state
81 .long 0 ; Not valid in Fam
82 .long EXT(vmm_set_timer) ; Sets a timer value
83 .long 1 ; Valid in Fam
84 .long EXT(vmm_get_timer) ; Gets a timer value
85 .long 1 ; Valid in Fam
86 .long EXT(switchIntoVM) ; Switches to the VM context
87 .long 1 ; Valid in Fam
88 .long EXT(vmm_protect_page32) ; Sets protection values for a page - supports 32-bit
89 .long 1 ; Valid in Fam
90 .long EXT(vmm_map_execute32) ; Maps a page an launches VM - supports 32-bit
91 .long 1 ; Not valid in Fam
92 .long EXT(vmm_protect_execute32) ; Sets protection values for a page and launches VM - supports 32-bit
93 .long 1 ; Valid in Fam
94 .long EXT(vmm_map_list32) ; Maps a list of pages - supports 32-bit
95 .long 1 ; Valid in Fam
96 .long EXT(vmm_unmap_list32) ; Unmaps a list of pages - supports 32-bit
97 .long 1 ; Valid in Fam
98 .long EXT(vmm_fam_reserved) ; exit from Fam to host
99 .long 1 ; Valid in Fam
100 .long EXT(vmm_fam_reserved) ; resume guest from Fam
101 .long 1 ; Valid in Fam
102 .long EXT(vmm_fam_reserved) ; get guest register from Fam
103 .long 1 ; Valid in Fam
104 .long EXT(vmm_fam_reserved) ; Set guest register from Fam
105 .long 1 ; Valid in Fam
106 .long EXT(vmm_set_XA) ; Set extended architecture features for a VM
107 .long 0 ; Not valid in Fam
108 .long EXT(vmm_get_XA) ; Get extended architecture features from a VM
109 .long 1 ; Valid in Fam
110 .long EXT(vmm_map_page) ; Map a host to guest address space - supports 64-bit
111 .long 1 ; Valid in Fam
112 .long EXT(vmm_get_page_mapping) ; Get host address of a guest page - supports 64-bit
113 .long 1 ; Valid in Fam
114 .long EXT(vmm_unmap_page) ; Unmap a guest page - supports 64-bit
115 .long 1 ; Valid in Fam
116 .long EXT(vmm_get_page_dirty_flag) ; Check if guest page modified - supports 64-bit
117 .long 1 ; Valid in Fam
118 .long EXT(vmm_protect_page) ; Sets protection values for a page - supports 64-bit
119 .long 1 ; Valid in Fam
120 .long EXT(vmm_map_execute) ; Map guest page and launch - supports 64-bit
121 .long 1 ; Valid in Fam
122 .long EXT(vmm_protect_execute) ; Set prot attributes and launch - supports 64-bit
123 .long 1 ; Valid in Fam
124 .long EXT(vmm_map_list64) ; Map a list of pages into guest address spaces - supports 64-bit
125 .long 1 ; Valid in Fam
126 .long EXT(vmm_unmap_list64) ; Unmap a list of pages from guest address spaces - supports 64-bit
127 .long 1 ; Valid in Fam
128 .long EXT(vmm_max_addr) ; Returns the maximum virtual address
129 .long 1 ; Valid in Fam
130
131
132 .set vmm_count,(.-EXT(vmm_dispatch_table))/8 ; Get the top number
133
134
135 .align 5
136 .globl EXT(vmm_dispatch)
137
138 LEXT(vmm_dispatch)
139
140 lwz r11,saver3+4(r30) ; Get the selector
141 mr r3,r4 ; All of our functions want the activation as the first parm
142 lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table
143 cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now?
144 cmplwi cr1,r11,vmm_count ; See if we have a valid selector
145 ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table
146 lwz r4,saver4+4(r30) ; Get 1st parameter after selector
147 beq+ EXT(switchIntoVM) ; Yes, go switch to it....
148 rlwinm r11,r11,3,0,28 ; Index into table
149 bge- cr1,vmmBogus ; It is a bogus entry
150 add r12,r10,r11 ; Get the vmm dispatch syscall entry
151 mfsprg r10,0 ; Get the per_proc
152 lwz r13,0(r12) ; Get address of routine
153 lwz r12,4(r12) ; Get validity flag
154 lwz r5,spcFlags(r10) ; Get per_proc special flags
155 cmpwi cr1,r12,0 ; Check Fam valid
156 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
157 crand cr0_eq,cr1_eq,cr0_gt ; In Fam and Invalid syscall
158 beq vmmBogus ; Intercept to host
159 lwz r5,saver5+4(r30) ; Get 2nd parameter after selector - note that some of these parameters may actually be long longs
160 lwz r6,saver6+4(r30) ; Get 3rd parameter after selector
161 mtlr r13 ; Set the routine address
162 lwz r7,saver7+4(r30) ; Get 4th parameter after selector
163 lwz r8,saver8+4(r30) ; Get 5th parameter after selector
164 lwz r9,saver9+4(r30) ; Get 6th parameter after selector
165 ;
166 ; NOTE: some of the above parameters are actually long longs. We have glue code that transforms
167 ; all needed parameters and/or adds 32-/64-bit flavors to the needed functions.
168 ;
169
170 blrl ; Call function
171
172 vmmRetPt: li r0,0 ; Clear this out
173 stw r0,saver3(r30) ; Make sure top of RC is clear
174 stw r3,saver3+4(r30) ; Pass back the return code
175 stw r0,saver4(r30) ; Make sure bottom of RC is clear (just in case)
176 stw r4,saver4+4(r30) ; Pass back the bottom return code (just in case)
177 li r3,1 ; Set normal return with check for AST
178 b EXT(ppcscret) ; Go back to handler...
179
180 vmmBogus:
181 mfsprg r10,0 ; Get the per_proc
182 mfsprg r3,1 ; Load current activation
183 lwz r5,spcFlags(r10) ; Get per_proc special flags
184 rlwinm. r5,r5,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
185 bne vmmexitcall ; Do it to it
186 li r3,0 ; Bogus selector, treat like a bogus system call
187 b EXT(ppcscret) ; Go back to handler...
188
189
190 .align 5
191 .globl EXT(vmm_get_version_sel)
192
193 LEXT(vmm_get_version_sel) ; Selector based version of get version
194
195 lis r3,hi16(EXT(vmm_get_version))
196 ori r3,r3,lo16(EXT(vmm_get_version))
197 b selcomm
198
199
200 .align 5
201 .globl EXT(vmm_get_features_sel)
202
203 LEXT(vmm_get_features_sel) ; Selector based version of get features
204
205 lis r3,hi16(EXT(vmm_get_features))
206 ori r3,r3,lo16(EXT(vmm_get_features))
207 b selcomm
208
209
210 .align 5
211 .globl EXT(vmm_init_context_sel)
212
213 LEXT(vmm_init_context_sel) ; Selector based version of init context
214
215 lwz r4,saver4+4(r30) ; Get the passed in version
216 lwz r5,saver5+4(r30) ; Get the passed in comm area
217 lis r3,hi16(EXT(vmm_init_context))
218 stw r4,saver3+4(r30) ; Cheat and move this parameter over
219 ori r3,r3,lo16(EXT(vmm_init_context))
220 stw r5,saver4+4(r30) ; Cheat and move this parameter over
221
222 selcomm: mtlr r3 ; Set the real routine address
223 mr r3,r30 ; Pass in the savearea
224 blrl ; Call the function
225 b EXT(ppcscret) ; Go back to handler...
226
227 .align 5
228 .globl EXT(vmm_map_page32)
229
230 LEXT(vmm_map_page32)
231 mr r9,r7 ; Move prot to correct parm
232 mr r8,r6 ; Move guest address to low half of long long
233 li r7,0 ; Clear high half of guest address
234 mr r6,r5 ; Move host address to low half of long long
235 li r5,0 ; Clear high half of host address
236 b EXT(vmm_map_page) ; Transition to real function...
237
238 .align 5
239 .globl EXT(vmm_get_page_mapping32)
240
241 LEXT(vmm_get_page_mapping32)
242 mr r6,r5 ; Move guest address to low half of long long
243 li r5,0 ; Clear high half of guest address
244 bl EXT(vmm_get_page_mapping) ; Transition to real function...
245 mr r3,r4 ; Convert addr64_t to vm_offset_t, dropping top half
246 b vmmRetPt ; Join normal return...
247
248 .align 5
249 .globl EXT(vmm_unmap_page32)
250
251 LEXT(vmm_unmap_page32)
252 mr r6,r5 ; Move guest address to low half of long long
253 li r5,0 ; Clear high half of guest address
254 b EXT(vmm_unmap_page) ; Transition to real function...
255
256 .align 5
257 .globl EXT(vmm_get_page_dirty_flag32)
258
259 LEXT(vmm_get_page_dirty_flag32)
260 mr r7,r6 ; Move reset flag
261 mr r6,r5 ; Move guest address to low half of long long
262 li r5,0 ; Clear high half of guest address
263 b EXT(vmm_get_page_dirty_flag) ; Transition to real function...
264
265 .align 5
266 .globl EXT(vmm_protect_page32)
267
268 LEXT(vmm_protect_page32)
269 mr r7,r6 ; Move protection bits
270 mr r6,r5 ; Move guest address to low half of long long
271 li r5,0 ; Clear high half of guest address
272 b EXT(vmm_protect_page) ; Transition to real function...
273
274 .align 5
275 .globl EXT(vmm_map_execute32)
276
277 LEXT(vmm_map_execute32)
278 mr r9,r7 ; Move prot to correct parm
279 mr r8,r6 ; Move guest address to low half of long long
280 li r7,0 ; Clear high half of guest address
281 mr r6,r5 ; Move host address to low half of long long
282 li r5,0 ; Clear high half of host address
283 b EXT(vmm_map_execute) ; Transition to real function...
284
285 .align 5
286 .globl EXT(vmm_protect_execute32)
287
288 LEXT(vmm_protect_execute32)
289 mr r7,r6 ; Move protection bits
290 mr r6,r5 ; Move guest address to low half of long long
291 li r5,0 ; Clear high half of guest address
292 b EXT(vmm_protect_execute) ; Transition to real function...
293
294 .align 5
295 .globl EXT(vmm_map_list32)
296
297 LEXT(vmm_map_list32)
298 li r6,0 ; Set 32-bit flavor
299 b EXT(vmm_map_list) ; Go to common routine...
300
301 .align 5
302 .globl EXT(vmm_map_list64)
303
304 LEXT(vmm_map_list64)
305 li r6,1 ; Set 64-bit flavor
306 b EXT(vmm_map_list) ; Go to common routine...
307
308 .align 5
309 .globl EXT(vmm_map_list32)
310
311 LEXT(vmm_unmap_list32)
312 li r6,0 ; Set 32-bit flavor
313 b EXT(vmm_unmap_list) ; Go to common routine...
314
315 .align 5
316 .globl EXT(vmm_map_list64)
317
318 LEXT(vmm_unmap_list64)
319 li r6,1 ; Set 64-bit flavor
320 b EXT(vmm_unmap_list) ; Go to common routine...
321
322 /*
323 * Here is where we transition to the virtual machine.
324 *
325 * We will swap the register context in the savearea with that which is saved in our shared
326 * context area. We will validity check a bit and clear any nasty bits in the MSR and force
327 * the manditory ones on.
328 *
329 * Then we will setup the new address space to run with, and anything else that is normally part
330 * of a context switch.
331 *
332 * The vmm_execute_vm entry point is for the fused vmm_map_execute and vmm_protect_execute
333 * calls. This is called, but never returned from. We always go directly back to the
334 * user from here.
335 *
336 *
337 */
338
339
340 .align 5
341 .globl EXT(vmm_execute_vm)
342
343 LEXT(vmm_execute_vm)
344 lwz r30,ACT_MACT_PCB(r3) ; Restore the savearea pointer because it could be trash here
345 b EXT(switchIntoVM) ; Join common...
346
347
348 .align 5
349 .globl EXT(switchIntoVM)
350
351 LEXT(switchIntoVM)
352 mfsprg r10,0 ; Get the per_proc
353 rlwinm r31,r4,24,24,31 ; Get the address space
354 rlwinm r4,r4,0,24,31 ; Isolate the context id
355 lwz r28,vmmControl(r3) ; Pick up the control table address
356 subi r4,r4,1 ; Switch to zero offset
357 rlwinm. r2,r28,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we
358 ; do not try this while we are transitioning off to on
359 cmplwi cr1,r4,kVmmMaxContexts ; Is the index valid?
360 beq- vmmBogus ; Not started, treat like a bogus system call
361 subic. r31,r31,1 ; Make address space 0 based and test if we use default
362 mulli r2,r4,vmmCEntrySize ; Get displacement from index
363 bge- cr1,swvmmBogus ; Index is bogus...
364 add r2,r2,r28 ; Point to the entry
365 bge-- swvmmDAdsp ; There was an explicit address space request
366 mr r31,r4 ; Default the address space to the context ID
367
368 swvmmDAdsp: la r2,vmmc(r2) ; Get the offset to the context array
369 lwz r8,vmmGFlags(r28) ; Get the general flags
370 lwz r4,vmmFlags(r2) ; Get the flags for the selected entry
371 crset vmmMapDone ; Assume we will be mapping something
372 lwz r5,vmmContextKern(r2) ; Get the context area address
373 rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use
374 cmplwi cr1,r31,kVmmMaxContexts ; See if we have a valid address space ID
375 rlwinm r8,r8,0,24,31 ; Clean up address space
376 beq-- swvmmBogus ; This context is no good...
377
378 la r26,vmmAdsp(r28) ; Point to the pmaps
379 sub r8,r8,r31 ; Get diff between launching address space - 1 and last mapped into (should be 1 if the same)
380 rlwinm r31,r31,2,0,29 ; Index to the pmap
381 cmplwi r8,1 ; See if we have the same address space
382 bge-- cr1,swvmmBogAdsp ; Address space is no good...
383 lwzx r31,r26,r31 ; Get the requested address space pmap
384 li r0,0 ; Get a 0 in case we need to trash redrive
385 lwz r15,spcFlags(r10) ; Get per_proc special flags
386 beq swvmmAdspOk ; Do not invalidate address space if we are launching the same
387 crclr vmmMapDone ; Clear map done flag
388 stb r0,vmmGFlags+3(r28) ; Clear the last mapped address space ID so we will not redrive later
389 ;
390 ; Here we check for any immediate intercepts. So far, the only
391 ; two of these are a timer pop and and external stop. We will not dispatch if
392 ; either is true. They need to either reset the timer (i.e. set timer
393 ; to 0) or to set a future time, or if it is external stop, set the vmmXStopRst flag.
394 ;
395
396 swvmmAdspOk:
397 rlwinm. r0,r15,0,FamVMmodebit,FamVMmodebit ; Test FamVMmodebit
398 stw r31,vmmPmap(r2) ; Save the last dispatched address space
399 bne vmmFamGuestResume
400 lwz r6,vmmCntrl(r5) ; Get the control field
401 rlwinm. r7,r6,0,vmmXStartb,vmmXStartb ; Clear all but start bit
402 beq+ swvmChkStop ; Do not reset stop
403 andc r6,r6,r7 ; Clear it
404 li r8,vmmFlags ; Point to the flags
405 stw r6,vmmCntrl(r5) ; Set the control field
406
407 swvmtryx: lwarx r4,r8,r2 ; Pick up the flags
408 rlwinm r4,r4,0,vmmXStopb+1,vmmXStopb-1 ; Clear the stop bit
409 stwcx. r4,r8,r2 ; Save the updated field
410 bne- swvmtryx ; Try again...
411
412 swvmChkStop:
413 rlwinm. r26,r4,0,vmmXStopb,vmmXStopb ; Is this VM stopped?
414 bne-- swvmSetStop ; Yes...
415
416 rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer go pop?
417 cmplwi cr1,r31,0 ; Is there actually an address space defined?
418 bne-- svvmTimerPop ; Yes...
419
420 ;
421 ; Special note: we need to intercept any attempt to launch a guest into a non-existent address space.
422 ; We will just go emulate an ISI if there is not one.
423 ;
424
425 beq-- cr1,swvmEmulateISI ; We are trying to launch into an undefined address space. This is not so good...
426
427 ;
428 ; Here is where we actually swap into the VM (alternate) context.
429 ; We will bulk do a wholesale swap of the registers in the context area (the VMs)
430 ; with the ones in the savearea (our main code). During the copy, we will fix up the
431 ; MSR, forcing on a few bits and turning off a few others. Then we will deal with the
432 ; PMAP and other per_proc stuff. Finally, we will exit back through the main exception
433 ; handler to deal with unstacking saveareas and ASTs, etc.
434 ;
435
436 swvmDoSwitch:
437
438 ;
439 ; First, we save the volatile registers we care about. Remember, all register
440 ; handling here is pretty funky anyway, so we just pick the ones that are ok.
441 ;
442 mr r26,r3 ; Save the activation pointer
443
444 la r11,vmmFacCtx(r2) ; Point to the virtual machine facility context
445 mr r27,r2 ; Save the context entry
446 stw r11,deferctx(r3) ; Start using the virtual machine facility context when we exit
447
448 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
449 mr r3,r31 ; Get the pointer to the PMAP
450 oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now
451 bl EXT(hw_set_user_space_dis) ; Swap the address spaces
452 lwz r17,vmmFlags(r27) ; Get the status flags
453 lwz r20,vmmContextKern(r27) ; Get the state page kernel addr
454 lwz r21,vmmCntrl(r20) ; Get vmmCntrl
455 rlwinm. r22,r21,0,vmmFamEnab,vmmFamEnab ; Is vmmFamEnab set?
456 lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
457 stw r22,VMMXAFlgs(r10) ; Store vmmXAFlgs in per_proc VMMXAFlgs
458 beq swvmNoFam ; No Fam intercept
459 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
460 rlwimi r15,r21,32+vmmFamSetb-FamVMmodebit,FamVMmodebit,FamVMmodebit ; Set FamVMmode bit
461 rlwinm r21,r21,0,vmmFamSetb+1,vmmFamSetb-1 ; Clear FamSet bit
462 bne swvmXfamintercpt
463 lwz r22,famintercepts(r20) ; Load intercept bit field
464 b swvmfamintercptres
465 swvmXfamintercpt:
466 lwz r22,faminterceptsX(r20) ; Load intercept bit field
467 swvmfamintercptres:
468 stw r21,vmmCntrl(r20) ; Update vmmCntrl
469 lwz r19,vmmContextPhys(r27) ; Get vmmFAMarea address
470 stw r22,vmmFAMintercept(r27) ; Get vmmFAMintercept
471 stw r22,FAMintercept(r10) ; Store vmmFAMintercept in per_proc FAMintercept
472 stw r19,VMMareaPhys(r10) ; Store VMMareaPhys
473 oris r15,r15,hi16(FamVMena) ; Set FamVMenabit
474 swvmNoFam:
475 stw r27,vmmCEntry(r26) ; Remember what context we are running
476 bf++ vmmMapDone,swvmNoMap ; We have not mapped anything or it was not for this address space
477
478 ;
479 ; This little bit of hoopala here (triggered by vmmMapDone) is
480 ; a performance enhancement. This will change the returning savearea
481 ; to look like we had a DSI rather than a system call. Then, setting
482 ; the redrive bit, the exception handler will redrive the exception as
483 ; a DSI, entering the last mapped address into the hash table. This keeps
484 ; double faults from happening. Note that there is only a gain if the VM
485 ; takes a fault, then the emulator resolves it only, and then begins
486 ; the VM execution again. It seems like this should be the normal case.
487 ;
488 ; Note that we need to revisit this when we move the virtual machines to the task because
489 ; then it will be possible for more than one thread to access this stuff at the same time.
490 ;
491
492 lwz r3,SAVflags(r30) ; Pick up the savearea flags
493 lwz r2,vmmLastMap(r28) ; Get the last mapped address
494 lwz r14,vmmLastMap+4(r28) ; Get the last mapped address low half
495 li r20,T_DATA_ACCESS ; Change to DSI fault
496 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
497 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
498 stw r14,savedar+4(r30) ; Set the DAR to the last thing we mapped
499 stw r3,SAVflags(r30) ; Turn on the redrive request
500 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
501 li r0,0 ; Clear
502 stw r20,saveexception(r30) ; Say we need to emulate a DSI
503 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
504 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
505
506 swvmNoMap: lwz r20,vmmContextKern(r27) ; Get the comm area
507 rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits
508 lwz r20,vmmCntrl(r20) ; Get the control flags
509 rlwimi r17,r11,8,24,31 ; Save the old spf flags
510 rlwimi r15,r20,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
511 stw r15,spcFlags(r10) ; Set per_proc copy of the special flags
512 stw r15,ACT_MACT_SPF(r26) ; Get the special flags
513
514 stw r17,vmmFlags(r27) ; Set the status flags
515
516 bl swapCtxt ; First, swap the general register state
517
518 lwz r17,vmmContextKern(r27) ; Get the comm area back
519 la r25,vmmFacCtx(r27) ; Point to the facility context
520 lwz r15,vmmCntrl(r17) ; Get the control flags again
521 mfsprg r29,0 ; Get the per_proc
522
523 ;
524 ; Check if there is new floating point context to load
525 ;
526
527 rlwinm. r0,r15,0,vmmFloatLoadb,vmmFloatLoadb ; Are there new floating point values?
528 lhz r29,PP_CPU_NUMBER(r29) ; Get our cpu number
529 li r14,vmmppcFPRs ; Get displacement to the new values
530 andc r15,r15,r0 ; Clear the bit
531 beq+ swvmNoNewFloats ; Nope, good...
532
533 lwz r19,FPUcpu(r25) ; Get the last CPU we ran on
534
535 stw r29,FPUcpu(r25) ; Claim the context for ourselves
536
537 eieio ; Make sure this stays in order
538
539 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
540 mulli r19,r19,ppSize ; Find offset to the owner per_proc
541 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
542 li r16,FPUowner ; Displacement to float owner
543 add r19,r18,r19 ; Point to the owner per_proc
544
545 swvminvfpu: lwarx r18,r16,r19 ; Get the owner
546
547 sub r0,r18,r25 ; Subtract one from the other
548 sub r3,r25,r18 ; Subtract the other from the one
549 or r3,r3,r0 ; Combine them
550 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
551 and r18,r18,r3 ; Make 0 if same, unchanged if not
552 stwcx. r18,r16,r19 ; Try to invalidate it
553 bne-- swvminvfpu ; Try again if there was a collision...
554
555 lwz r3,FPUsave(r25) ; Get the FPU savearea
556 dcbt r14,r17 ; Touch in first line of new stuff
557 mr. r3,r3 ; Is there one?
558 bne+ swvmGotFloat ; Yes...
559
560 bl EXT(save_get) ; Get a savearea
561
562 li r7,SAVfloat ; Get floating point flag
563 stw r26,SAVact(r3) ; Save our activation
564 li r0,0 ; Get a zero
565 stb r7,SAVflags+2(r3) ; Set that this is floating point
566 stw r0,SAVprev+4(r3) ; Clear the back chain
567 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
568
569 stw r3,FPUsave(r25) ; Chain us to context
570
571 swvmGotFloat:
572 la r4,savefp0(r3) ; Point to the destination
573 mr r21,r3 ; Save the save area
574 la r3,vmmppcFPRs(r17) ; Point to the source
575 li r5,32*8 ; Get the size (32 FPRs at 8 bytes each)
576
577 bl EXT(bcopy) ; Copy the new values
578
579 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
580 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad
581 rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here
582 lwz r14,vmmStat(r17) ; Get the status flags
583 mfsprg r10,0 ; Get the per_proc
584 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
585 rlwinm r14,r14,0,vmmFloatCngdb+1,vmmFloatCngdb-1 ; Clear the changed flag
586 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
587 stw r14,vmmStat(r17) ; Set the status flags sans vmmFloatCngd
588
589 ;
590 ; Check if there is new vector context to load
591 ;
592
593 swvmNoNewFloats:
594 rlwinm. r0,r15,0,vmmVectLoadb,vmmVectLoadb ; Are there new vector values?
595 li r14,vmmppcVRs ; Get displacement to the new values
596 andc r15,r15,r0 ; Clear the bit
597 beq+ swvmNoNewVects ; Nope, good...
598
599 lwz r19,VMXcpu(r25) ; Get the last CPU we ran on
600
601 stw r29,VMXcpu(r25) ; Claim the context for ourselves
602
603 eieio ; Make sure this stays in order
604
605 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
606 mulli r19,r19,ppSize ; Find offset to the owner per_proc
607 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
608 li r16,VMXowner ; Displacement to vector owner
609 add r19,r18,r19 ; Point to the owner per_proc
610
611 swvminvvec: lwarx r18,r16,r19 ; Get the owner
612
613 sub r0,r18,r25 ; Subtract one from the other
614 sub r3,r25,r18 ; Subtract the other from the one
615 or r3,r3,r0 ; Combine them
616 srawi r3,r3,31 ; Get a 0 if equal or -1 of not
617 and r18,r18,r3 ; Make 0 if same, unchanged if not
618 stwcx. r18,r16,r19 ; Try to invalidate it
619 bne-- swvminvfpu ; Try again if there was a collision...
620
621 swvminvved: lwz r3,VMXsave(r25) ; Get the vector savearea
622 dcbt r14,r17 ; Touch in first line of new stuff
623 mr. r3,r3 ; Is there one?
624 bne+ swvmGotVect ; Yes...
625
626 bl EXT(save_get) ; Get a savearea
627
628 li r7,SAVvector ; Get the vector type flag
629 stw r26,SAVact(r3) ; Save our activation
630 li r0,0 ; Get a zero
631 stb r7,SAVflags+2(r3) ; Set that this is vector
632 stw r0,SAVprev+4(r3) ; Clear the back chain
633 stw r0,SAVlevel(r3) ; We are always at level 0 (user state)
634
635 stw r3,VMXsave(r25) ; Chain us to context
636
637 swvmGotVect:
638 mr r21,r3 ; Save the pointer to the savearea
639 la r4,savevr0(r3) ; Point to the destination
640 la r3,vmmppcVRs(r17) ; Point to the source
641 li r5,32*16 ; Get the size (32 vectors at 16 bytes each)
642
643 bl EXT(bcopy) ; Copy the new values
644
645 lwz r8,savevrsave(r30) ; Get the current VRSave
646
647 lwz r11,ACT_MACT_SPF(r26) ; Get the special flags
648 stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad
649 rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here
650 stw r8,savevrvalid(r21) ; Set the current VRSave as valid saved
651 lwz r14,vmmStat(r17) ; Get the status flags
652 mfsprg r10,0 ; Get the per_proc
653 stw r11,ACT_MACT_SPF(r26) ; Get the special flags
654 rlwinm r14,r14,0,vmmVectCngdb+1,vmmVectCngdb-1 ; Clear the changed flag
655 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
656 stw r14,vmmStat(r17) ; Set the status flags sans vmmVectCngd
657
658 swvmNoNewVects:
659 li r3,1 ; Show normal exit with check for AST
660 lwz r16,ACT_THREAD(r26) ; Restore the thread pointer
661 b EXT(ppcscret) ; Go back to handler...
662
663 .align 5
664
665 swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return
666 li r0,0 ; Clear
667 li r3,1 ; Set normal return with check for AST
668 stw r0,saver3(r30) ; Clear upper half
669 stw r2,saver3+4(r30) ; Pass back the return code
670 b EXT(ppcscret) ; Go back to handler...
671
672 swvmmBogAdsp:
673 li r2,kVmmInvalidAdSpace ; Set bogus address space return
674 li r0,0 ; Clear
675 li r3,1 ; Set normal return with check for AST
676 stw r0,saver3(r30) ; Clear upper half
677 stw r2,saver3+4(r30) ; Pass back the return code
678 b EXT(ppcscret) ; Go back to handler...
679
680 swvmSetStop:
681 li r2,kVmmStopped ; Set stopped return
682 li r0,0 ; Clear
683 li r3,1 ; Set normal return with check for AST
684 stw r0,saver3(r30) ; Clear upper half
685 stw r2,saver3+4(r30) ; Pass back the return code
686 stw r2,return_code(r5) ; Save the exit code
687 b EXT(ppcscret) ; Go back to handler...
688
689 svvmTimerPop:
690 li r2,kVmmReturnNull ; Set null return
691 li r0,0 ; Clear
692 li r3,1 ; Set normal return with check for AST
693 stw r0,saver3(r30) ; Clear upper half
694 stw r2,saver3+4(r30) ; Pass back the return code
695 stw r2,return_code(r5) ; Save the exit code
696 b EXT(ppcscret) ; Go back to handler...
697
698 swvmEmulateISI:
699 mfsprg r10,2 ; Get feature flags
700 lwz r11,vmmXAFlgs(r28) ; Get the eXtended Architecture flags
701 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
702 rlwinm. r11,r11,0,0,0 ; Are we doing a 64-bit virtual machine?
703 li r2,kVmmReturnInstrPageFault ; Set ISI
704 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
705 li r0,0 ; Clear
706 li r3,1 ; Set normal return with check for AST
707 stw r0,saver3(r30) ; Clear upper half
708 stw r2,saver3+4(r30) ; Pass back the return code
709 stw r2,return_code(r5) ; Save the exit code
710 lis r7,hi16(MASK(DSISR_HASH)) ; Pretend like we got a PTE miss
711 bt vmmDoing64,vmISI64 ; Go do this for a 64-bit VM...
712
713 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
714 stw r10,return_params+0(r5) ; Save PC as first return parm
715 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
716 b EXT(ppcscret) ; Go back to handler...
717
718 vmISI64: ld r10,vmmppcXpc(r5) ; Get the PC as failing address
719 std r10,return_paramsX+0(r5) ; Save PC as first return parm
720 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
721 b EXT(ppcscret) ; Go back to handler...
722
723 ;
724 ; These syscalls are invalid, FAM syscall fast path
725 ;
726
727 .align 5
728 .globl EXT(vmm_fam_reserved)
729
730 LEXT(vmm_fam_reserved)
731 li r3,0 ; Force exception
732 b EXT(ppcscret) ; Go back to handler...
733
734 ;
735 ; Here is where we exit from vmm mode. We do this on any kind of exception.
736 ; Interruptions (decrementer, external, etc.) are another story though.
737 ; These we just pass through. We also switch back explicity when requested.
738 ; This will happen in response to a timer pop and some kinds of ASTs.
739 ;
740 ; Inputs:
741 ; R3 = activation
742 ; R4 = savearea
743 ;
744
745 .align 5
746 .globl EXT(vmm_exit)
747
748 LEXT(vmm_exit)
749
750 vmmexitcall:
751 lwz r2,vmmCEntry(r3) ; Get the context that is active
752 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
753 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
754 lwz r19,vmmFlags(r2) ; Get the status flags
755 mr r16,r3 ; R16 is safe to use for the activation address
756
757 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
758 li r0,0 ; Get a zero
759 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
760 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
761 rlwinm r11,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
762 stw r0,vmmCEntry(r16) ; Clear pointer to active context
763 stw r19,vmmFlags(r2) ; Set the status flags
764 rlwinm r11,r11,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
765 mfsprg r10,0 ; Get the per_proc block
766 rlwinm r11,r11,0,FamVMenabit+1,FamVMenabit-1 ; Clear FamVMEnable
767 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
768 lwz r5,vmmContextKern(r2) ; Get the state page kernel addr
769 rlwinm r11,r11,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMMode
770 lwz r6,vmmCntrl(r5) ; Get the control field
771 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
772 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
773 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
774 stw r11,ACT_MACT_SPF(r16) ; Get the special flags
775 stw r6,vmmCntrl(r5) ; Store the control field
776 stw r11,spcFlags(r10) ; Set per_proc copy of the special flags
777
778 mr r26,r16 ; Save the activation pointer
779 mr r27,r2 ; Save the context entry
780
781 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
782
783 la r5,facctx(r16) ; Point to the main facility context
784 mr r2,r27 ; Restore
785 stw r5,deferctx(r16) ; Start using the main facility context on the way out
786 lwz r5,vmmContextKern(r27) ; Get the context area address
787 mr r3,r16 ; Restore activation address
788 stw r19,vmmStat(r5) ; Save the changed and popped flags
789 bl swapCtxt ; Exchange the VM context for the emulator one
790 stw r8,saver3+4(r30) ; Set the return code as the return value also
791 b EXT(retFromVM) ; Go back to handler...
792
793
794 ;
795 ; Here is where we force exit from vmm mode. We do this when as
796 ; part of termination and is used to insure that we are not executing
797 ; in an alternate context. Because this is called from C we need to save
798 ; all non-volatile registers.
799 ;
800 ; Inputs:
801 ; R3 = activation
802 ; R4 = user savearea
803 ; Interruptions disabled
804 ;
805
806 .align 5
807 .globl EXT(vmm_force_exit)
808
809 LEXT(vmm_force_exit)
810
811 stwu r1,-(FM_ALIGN(20*4)+FM_SIZE)(r1) ; Get enough space for the registers
812 mflr r0 ; Save the return
813 stmw r13,FM_ARG0(r1) ; Save all non-volatile registers
814 stw r0,(FM_ALIGN(20*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
815
816 lwz r2,vmmCEntry(r3) ; Get the context that is active
817 lwz r11,ACT_MACT_SPF(r3) ; Get the special flags
818 lwz r19,vmmFlags(r2) ; Get the status flags
819 lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy
820
821 rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits
822 mr r26,r3 ; Save the activation pointer
823 rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf
824 li r0,0 ; Get a zero
825 rlwinm r9,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag
826 cmplw r9,r11 ; Check if we were in a vm
827 lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation
828 beq- vfeNotRun ; We were not in a vm....
829 rlwinm r9,r9,0,userProtKeybit+1,userProtKeybit-1 ; Set back to normal protection key
830 stw r0,vmmCEntry(r26) ; Clear pointer to active context
831 mfsprg r10,0 ; Get the per_proc block
832 lwz r18,spcFlags(r10) ; Get per_proc copy of the special flags
833 rlwinm r9,r9,0,FamVMenabit+1,FamVMenabit-1 ; Clear Fam Enable
834 rlwinm r9,r9,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear Fam Enable
835 lwz r5,vmmContextKern(r2) ; Get the context area address
836 lwz r6,vmmCntrl(r5) ; Get the control field
837 rlwimi r19,r18,FamVMmodebit-vmmFAMmodeb,vmmFAMmodeb,vmmFAMmodeb ; Shift and insert changed bits
838 rlwimi r6,r18,FamVMmodebit-vmmFamSetb,vmmFamSetb,vmmFamSetb ; Shift and insert changed bits
839 rlwimi r6,r18,userProtKeybit-vmmKeyb,vmmKeyb,vmmKeyb ; Shift and insert changed bits
840 stw r6,vmmCntrl(r5) ; Store the control field
841 stw r9,ACT_MACT_SPF(r26) ; Get the special flags
842 stw r9,spcFlags(r10) ; Set per_proc copy of the special flags
843
844 mr r27,r2 ; Save the context entry
845 mr r30,r4 ; Save the savearea
846
847 bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator
848
849 la r7,facctx(r26) ; Point to the main facility context
850
851 lwz r5,vmmContextKern(r27) ; Get the context area address
852 stw r19,vmmStat(r5) ; Save the changed and popped flags
853 stw r7,deferctx(r26) ; Tell context launcher to switch facility context
854
855 bl swapCtxt ; Exchange the VM context for the emulator one
856
857 lwz r8,saveexception(r30) ; Pick up the exception code
858 lwz r7,SAVflags(r30) ; Pick up the savearea flags
859 lis r9,hi16(SAVredrive) ; Get exception redrive bit
860 rlwinm r8,r8,30,24,31 ; Convert exception to return code
861 andc r7,r7,r9 ; Make sure redrive is off because we are intercepting
862 stw r8,saver3+4(r30) ; Set the return code as the return value also
863 stw r7,SAVflags(r30) ; Set the savearea flags
864
865
866 vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers
867 lwz r1,0(r1) ; Pop the stack
868 lwz r0,FM_LR_SAVE(r1) ; Get the return address
869 mtlr r0 ; Set return
870 blr
871
872 ;
873 ; Note: we will not do any DCBTs to the savearea. It was just stored to a few cycles ago and should
874 ; still be in the cache.
875 ;
876 ; NOTE NOTE: R16 is important to save!!!!
877 ;
878 .align 5
879
880 swapCtxt:
881 mfsprg r10,2 ; Get feature flags
882 la r6,vmmppcpc(r5) ; Point to the first line
883 mtcrf 0x02,r10 ; Move pf64Bit to its normal place in CR6
884
885 lwz r14,saveexception(r30) ; Get the exception code
886 dcbt 0,r6 ; Touch in the first line of the context area
887 bt++ pf64Bitb,swap64 ; Go do this swap on a 64-bit machine...
888
889 lwz r7,savesrr0+4(r30) ; Start moving context
890 lwz r8,savesrr1+4(r30)
891 lwz r9,saver0+4(r30)
892 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
893 lwz r10,saver1+4(r30)
894 lwz r11,saver2+4(r30)
895 lwz r12,saver3+4(r30)
896 lwz r13,saver4+4(r30)
897 la r6,vmmppcr6(r5) ; Point to second line
898 lwz r14,saver5+4(r30)
899
900 dcbt 0,r6 ; Touch second line of context area
901
902 lwz r15,vmmppcpc(r5) ; First line of context
903 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
904 lwz r23,vmmppcmsr(r5)
905 ori r22,r22,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
906 lwz r17,vmmppcr0(r5)
907 lwz r18,vmmppcr1(r5)
908 and r23,r23,r22 ; Keep only the controllable bits
909 lwz r19,vmmppcr2(r5)
910 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
911 lwz r20,vmmppcr3(r5)
912 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
913 lwz r21,vmmppcr4(r5)
914 lwz r22,vmmppcr5(r5)
915
916 dcbt 0,r6 ; Touch third line of context area
917
918 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
919 stw r8,vmmppcmsr(r5)
920 stw r9,vmmppcr0(r5)
921 stw r10,vmmppcr1(r5)
922 stw r11,vmmppcr2(r5)
923 stw r12,vmmppcr3(r5)
924 stw r13,vmmppcr4(r5)
925 stw r14,vmmppcr5(r5)
926
927 ;
928 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
929 ;
930 bne+ cr1,swapnotsc ; Skip next if not an SC exception...
931 stw r12,return_params+0(r5) ; Save the first return
932 stw r13,return_params+4(r5) ; Save the second return
933 stw r14,return_params+8(r5) ; Save the third return
934
935 swapnotsc: li r6,0 ; Clear this out
936 stw r6,savesrr0(r30) ; Insure that high order is clear
937 stw r15,savesrr0+4(r30) ; Save vm context into the savearea
938 stw r6,savesrr1(r30) ; Insure that high order is clear
939 stw r23,savesrr1+4(r30)
940 stw r17,saver0+4(r30)
941 stw r18,saver1+4(r30)
942 stw r19,saver2+4(r30)
943 stw r20,saver3+4(r30)
944 stw r21,saver4+4(r30)
945 la r6,vmmppcr14(r5) ; Point to fourth line
946 stw r22,saver5+4(r30)
947
948 dcbt 0,r6 ; Touch fourth line
949
950 ; Swap 8 registers
951
952 lwz r7,saver6+4(r30) ; Read savearea
953 lwz r8,saver7+4(r30)
954 lwz r9,saver8+4(r30)
955 lwz r10,saver9+4(r30)
956 lwz r11,saver10+4(r30)
957 lwz r12,saver11+4(r30)
958 lwz r13,saver12+4(r30)
959 lwz r14,saver13+4(r30)
960
961 lwz r15,vmmppcr6(r5) ; Read vm context
962 lwz r24,vmmppcr7(r5)
963 lwz r17,vmmppcr8(r5)
964 lwz r18,vmmppcr9(r5)
965 lwz r19,vmmppcr10(r5)
966 lwz r20,vmmppcr11(r5)
967 lwz r21,vmmppcr12(r5)
968 lwz r22,vmmppcr13(r5)
969
970 stw r7,vmmppcr6(r5) ; Write context
971 stw r8,vmmppcr7(r5)
972 stw r9,vmmppcr8(r5)
973 stw r10,vmmppcr9(r5)
974 stw r11,vmmppcr10(r5)
975 stw r12,vmmppcr11(r5)
976 stw r13,vmmppcr12(r5)
977 la r6,vmmppcr22(r5) ; Point to fifth line
978 stw r14,vmmppcr13(r5)
979
980 dcbt 0,r6 ; Touch fifth line
981
982 stw r15,saver6+4(r30) ; Write vm context
983 stw r24,saver7+4(r30)
984 stw r17,saver8+4(r30)
985 stw r18,saver9+4(r30)
986 stw r19,saver10+4(r30)
987 stw r20,saver11+4(r30)
988 stw r21,saver12+4(r30)
989 stw r22,saver13+4(r30)
990
991 ; Swap 8 registers
992
993 lwz r7,saver14+4(r30) ; Read savearea
994 lwz r8,saver15+4(r30)
995 lwz r9,saver16+4(r30)
996 lwz r10,saver17+4(r30)
997 lwz r11,saver18+4(r30)
998 lwz r12,saver19+4(r30)
999 lwz r13,saver20+4(r30)
1000 lwz r14,saver21+4(r30)
1001
1002 lwz r15,vmmppcr14(r5) ; Read vm context
1003 lwz r24,vmmppcr15(r5)
1004 lwz r17,vmmppcr16(r5)
1005 lwz r18,vmmppcr17(r5)
1006 lwz r19,vmmppcr18(r5)
1007 lwz r20,vmmppcr19(r5)
1008 lwz r21,vmmppcr20(r5)
1009 lwz r22,vmmppcr21(r5)
1010
1011 stw r7,vmmppcr14(r5) ; Write context
1012 stw r8,vmmppcr15(r5)
1013 stw r9,vmmppcr16(r5)
1014 stw r10,vmmppcr17(r5)
1015 stw r11,vmmppcr18(r5)
1016 stw r12,vmmppcr19(r5)
1017 stw r13,vmmppcr20(r5)
1018 la r6,vmmppcr30(r5) ; Point to sixth line
1019 stw r14,vmmppcr21(r5)
1020
1021 dcbt 0,r6 ; Touch sixth line
1022
1023 stw r15,saver14+4(r30) ; Write vm context
1024 stw r24,saver15+4(r30)
1025 stw r17,saver16+4(r30)
1026 stw r18,saver17+4(r30)
1027 stw r19,saver18+4(r30)
1028 stw r20,saver19+4(r30)
1029 stw r21,saver20+4(r30)
1030 stw r22,saver21+4(r30)
1031
1032 ; Swap 8 registers
1033
1034 lwz r7,saver22+4(r30) ; Read savearea
1035 lwz r8,saver23+4(r30)
1036 lwz r9,saver24+4(r30)
1037 lwz r10,saver25+4(r30)
1038 lwz r11,saver26+4(r30)
1039 lwz r12,saver27+4(r30)
1040 lwz r13,saver28+4(r30)
1041 lwz r14,saver29+4(r30)
1042
1043 lwz r15,vmmppcr22(r5) ; Read vm context
1044 lwz r24,vmmppcr23(r5)
1045 lwz r17,vmmppcr24(r5)
1046 lwz r18,vmmppcr25(r5)
1047 lwz r19,vmmppcr26(r5)
1048 lwz r20,vmmppcr27(r5)
1049 lwz r21,vmmppcr28(r5)
1050 lwz r22,vmmppcr29(r5)
1051
1052 stw r7,vmmppcr22(r5) ; Write context
1053 stw r8,vmmppcr23(r5)
1054 stw r9,vmmppcr24(r5)
1055 stw r10,vmmppcr25(r5)
1056 stw r11,vmmppcr26(r5)
1057 stw r12,vmmppcr27(r5)
1058 stw r13,vmmppcr28(r5)
1059 la r6,vmmppcvscr(r5) ; Point to seventh line
1060 stw r14,vmmppcr29(r5)
1061
1062 dcbt 0,r6 ; Touch seventh line
1063
1064 stw r15,saver22+4(r30) ; Write vm context
1065 stw r24,saver23+4(r30)
1066 stw r17,saver24+4(r30)
1067 stw r18,saver25+4(r30)
1068 stw r19,saver26+4(r30)
1069 stw r20,saver27+4(r30)
1070 stw r21,saver28+4(r30)
1071 stw r22,saver29+4(r30)
1072
1073 ; Swap 8 registers
1074
1075 lwz r7,saver30+4(r30) ; Read savearea
1076 lwz r8,saver31+4(r30)
1077 lwz r9,savecr(r30)
1078 lwz r10,savexer+4(r30)
1079 lwz r11,savelr+4(r30)
1080 lwz r12,savectr+4(r30)
1081 lwz r14,savevrsave(r30)
1082
1083 lwz r15,vmmppcr30(r5) ; Read vm context
1084 lwz r24,vmmppcr31(r5)
1085 lwz r17,vmmppccr(r5)
1086 lwz r18,vmmppcxer(r5)
1087 lwz r19,vmmppclr(r5)
1088 lwz r20,vmmppcctr(r5)
1089 lwz r22,vmmppcvrsave(r5)
1090
1091 stw r7,vmmppcr30(r5) ; Write context
1092 stw r8,vmmppcr31(r5)
1093 stw r9,vmmppccr(r5)
1094 stw r10,vmmppcxer(r5)
1095 stw r11,vmmppclr(r5)
1096 stw r12,vmmppcctr(r5)
1097 stw r14,vmmppcvrsave(r5)
1098
1099 stw r15,saver30+4(r30) ; Write vm context
1100 stw r24,saver31+4(r30)
1101 stw r17,savecr(r30)
1102 stw r18,savexer+4(r30)
1103 stw r19,savelr+4(r30)
1104 stw r20,savectr+4(r30)
1105 stw r22,savevrsave(r30)
1106
1107 ; Swap 8 registers
1108
1109 lwz r7,savevscr+0(r30) ; Read savearea
1110 lwz r8,savevscr+4(r30)
1111 lwz r9,savevscr+8(r30)
1112 lwz r10,savevscr+12(r30)
1113 lwz r11,savefpscrpad(r30)
1114 lwz r12,savefpscr(r30)
1115
1116 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1117 lwz r24,vmmppcvscr+4(r5)
1118 lwz r17,vmmppcvscr+8(r5)
1119 lwz r18,vmmppcvscr+12(r5)
1120 lwz r19,vmmppcfpscrpad(r5)
1121 lwz r20,vmmppcfpscr(r5)
1122
1123 stw r7,vmmppcvscr+0(r5) ; Write context
1124 stw r8,vmmppcvscr+4(r5)
1125 stw r9,vmmppcvscr+8(r5)
1126 stw r10,vmmppcvscr+12(r5)
1127 stw r11,vmmppcfpscrpad(r5)
1128 stw r12,vmmppcfpscr(r5)
1129
1130 stw r15,savevscr+0(r30) ; Write vm context
1131 stw r24,savevscr+4(r30)
1132 stw r17,savevscr+8(r30)
1133 stw r18,savevscr+12(r30)
1134 stw r19,savefpscrpad(r30)
1135 stw r20,savefpscr(r30)
1136
1137
1138 ;
1139 ; Cobble up the exception return code and save any specific return values
1140 ;
1141
1142 lwz r7,saveexception(r30) ; Pick up the exception code
1143 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1144 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1145 stw r8,return_code(r5) ; Save the exit code
1146 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1147 beq+ swapDSI ; Yeah...
1148 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1149 beq+ cr1,swapISI ; We had an ISI...
1150 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1151 beq+ swapDSI ; An alignment exception looks like a DSI...
1152 beq+ cr1,swapSC ; We had a system call...
1153
1154 blr ; Return...
1155
1156 ;
1157 ; Set exit returns for a DSI or alignment exception
1158 ;
1159
1160 swapDSI: lwz r10,savedar+4(r30) ; Get the DAR
1161 lwz r7,savedsisr(r30) ; and the DSISR
1162 stw r10,return_params+0(r5) ; Save DAR as first return parm
1163 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1164 blr ; Return...
1165
1166 ;
1167 ; Set exit returns for a ISI
1168 ;
1169
1170 swapISI: lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1171 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1172 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1173 stw r10,return_params+0(r5) ; Save PC as first return parm
1174 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1175 blr ; Return...
1176
1177 ;
1178 ; Set exit returns for a system call (note: we did the first 3 earlier)
1179 ; Do we really need to pass parameters back here????
1180 ;
1181
1182 swapSC: lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1183 stw r10,return_params+12(r5) ; Save it
1184 blr ; Return...
1185
1186 ;
1187 ; Here is the swap for 64-bit machines
1188 ;
1189
1190 swap64: lwz r22,vmmXAFlgs(r27) ; Get the eXtended Architecture flags
1191 ld r7,savesrr0(r30) ; Start moving context
1192 ld r8,savesrr1(r30)
1193 ld r9,saver0(r30)
1194 cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call?
1195 ld r10,saver1(r30)
1196 ld r11,saver2(r30)
1197 rlwinm. r22,r22,0,0,0 ; Are we doing a 64-bit virtual machine?
1198 ld r12,saver3(r30)
1199 crnot vmmDoing64,cr0_eq ; Remember if this is a 64-bit VM
1200 ld r13,saver4(r30)
1201 la r6,vmmppcr6(r5) ; Point to second line
1202 ld r14,saver5(r30)
1203
1204 dcbt 0,r6 ; Touch second line of context area
1205
1206 bt vmmDoing64,sw64x1 ; Skip to 64-bit stuff
1207
1208 lwz r15,vmmppcpc(r5) ; First line of context
1209 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1210 lwz r23,vmmppcmsr(r5)
1211 ori r22,r25,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1212 lwz r17,vmmppcr0(r5)
1213 lwz r18,vmmppcr1(r5)
1214 and r23,r23,r22 ; Keep only the controllable bits
1215 lwz r19,vmmppcr2(r5)
1216 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1217 lwz r20,vmmppcr3(r5)
1218 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1219 lwz r21,vmmppcr4(r5)
1220 lwz r22,vmmppcr5(r5)
1221
1222 dcbt 0,r6 ; Touch third line of context area
1223
1224 stw r7,vmmppcpc(r5) ; Save emulator context into the context area
1225 stw r8,vmmppcmsr(r5)
1226 stw r9,vmmppcr0(r5)
1227 stw r10,vmmppcr1(r5)
1228 stw r11,vmmppcr2(r5)
1229 stw r12,vmmppcr3(r5)
1230 stw r13,vmmppcr4(r5)
1231 stw r14,vmmppcr5(r5)
1232
1233 ;
1234 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
1235 ;
1236 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1237 stw r12,return_params+0(r5) ; Save the first return
1238 stw r13,return_params+4(r5) ; Save the second return
1239 stw r14,return_params+8(r5) ; Save the third return
1240 b sw64x1done ; We are done with this section...
1241
1242 sw64x1: ld r15,vmmppcXpc(r5) ; First line of context
1243 li r0,1 ; Get a 1 to turn on 64-bit
1244 lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user (we will also allow 64-bit here)
1245 sldi r0,r0,63 ; Get 64-bit bit
1246 ld r23,vmmppcXmsr(r5)
1247 ori r22,r25,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1248 ld r17,vmmppcXr0(r5)
1249 or r22,r22,r0 ; Add the 64-bit bit
1250 ld r18,vmmppcXr1(r5)
1251 and r23,r23,r22 ; Keep only the controllable bits
1252 ld r19,vmmppcXr2(r5)
1253 oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1254 ld r20,vmmppcXr3(r5)
1255 ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1256 ld r21,vmmppcXr4(r5)
1257 ld r22,vmmppcXr5(r5)
1258
1259 dcbt 0,r6 ; Touch third line of context area
1260
1261 std r7,vmmppcXpc(r5) ; Save emulator context into the context area
1262 std r8,vmmppcXmsr(r5)
1263 std r9,vmmppcXr0(r5)
1264 std r10,vmmppcXr1(r5)
1265 std r11,vmmppcXr2(r5)
1266 std r12,vmmppcXr3(r5)
1267 std r13,vmmppcXr4(r5)
1268 std r14,vmmppcXr5(r5)
1269
1270 ;
1271 ; Save the first 3 parameters if we are an SC (we will take care of the last later)
1272 ;
1273 bne+ cr1,sw64x1done ; Skip next if not an SC exception...
1274 std r12,return_paramsX+0(r5) ; Save the first return
1275 std r13,return_paramsX+8(r5) ; Save the second return
1276 std r14,return_paramsX+16(r5) ; Save the third return
1277
1278 sw64x1done:
1279 std r15,savesrr0(r30) ; Save vm context into the savearea
1280 std r23,savesrr1(r30)
1281 std r17,saver0(r30)
1282 std r18,saver1(r30)
1283 std r19,saver2(r30)
1284 std r20,saver3(r30)
1285 std r21,saver4(r30)
1286 la r6,vmmppcr14(r5) ; Point to fourth line
1287 std r22,saver5(r30)
1288
1289 dcbt 0,r6 ; Touch fourth line
1290
1291 ; Swap 8 registers
1292
1293 ld r7,saver6(r30) ; Read savearea
1294 ld r8,saver7(r30)
1295 ld r9,saver8(r30)
1296 ld r10,saver9(r30)
1297 ld r11,saver10(r30)
1298 ld r12,saver11(r30)
1299 ld r13,saver12(r30)
1300 ld r14,saver13(r30)
1301
1302 bt vmmDoing64,sw64x2 ; Skip to 64-bit stuff
1303
1304 lwz r15,vmmppcr6(r5) ; Read vm context
1305 lwz r24,vmmppcr7(r5)
1306 lwz r17,vmmppcr8(r5)
1307 lwz r18,vmmppcr9(r5)
1308 lwz r19,vmmppcr10(r5)
1309 lwz r20,vmmppcr11(r5)
1310 lwz r21,vmmppcr12(r5)
1311 lwz r22,vmmppcr13(r5)
1312
1313 stw r7,vmmppcr6(r5) ; Write context
1314 stw r8,vmmppcr7(r5)
1315 stw r9,vmmppcr8(r5)
1316 stw r10,vmmppcr9(r5)
1317 stw r11,vmmppcr10(r5)
1318 stw r12,vmmppcr11(r5)
1319 stw r13,vmmppcr12(r5)
1320 la r6,vmmppcr22(r5) ; Point to fifth line
1321 stw r14,vmmppcr13(r5)
1322
1323 dcbt 0,r6 ; Touch fifth line
1324 b sw64x2done ; We are done with this section...
1325
1326 sw64x2: ld r15,vmmppcXr6(r5) ; Read vm context
1327 ld r24,vmmppcXr7(r5)
1328 ld r17,vmmppcXr8(r5)
1329 ld r18,vmmppcXr9(r5)
1330 ld r19,vmmppcXr10(r5)
1331 ld r20,vmmppcXr11(r5)
1332 ld r21,vmmppcXr12(r5)
1333 ld r22,vmmppcXr13(r5)
1334
1335 std r7,vmmppcXr6(r5) ; Write context
1336 std r8,vmmppcXr7(r5)
1337 std r9,vmmppcXr8(r5)
1338 std r10,vmmppcXr9(r5)
1339 std r11,vmmppcXr10(r5)
1340 std r12,vmmppcXr11(r5)
1341 std r13,vmmppcXr12(r5)
1342 la r6,vmmppcXr22(r5) ; Point to fifth line
1343 std r14,vmmppcXr13(r5)
1344
1345 dcbt 0,r6 ; Touch fifth line
1346
1347 sw64x2done: std r15,saver6(r30) ; Write vm context
1348 std r24,saver7(r30)
1349 std r17,saver8(r30)
1350 std r18,saver9(r30)
1351 std r19,saver10(r30)
1352 std r20,saver11(r30)
1353 std r21,saver12(r30)
1354 std r22,saver13(r30)
1355
1356 ; Swap 8 registers
1357
1358 ld r7,saver14(r30) ; Read savearea
1359 ld r8,saver15(r30)
1360 ld r9,saver16(r30)
1361 ld r10,saver17(r30)
1362 ld r11,saver18(r30)
1363 ld r12,saver19(r30)
1364 ld r13,saver20(r30)
1365 ld r14,saver21(r30)
1366
1367 bt vmmDoing64,sw64x3 ; Skip to 64-bit stuff
1368
1369 lwz r15,vmmppcr14(r5) ; Read vm context
1370 lwz r24,vmmppcr15(r5)
1371 lwz r17,vmmppcr16(r5)
1372 lwz r18,vmmppcr17(r5)
1373 lwz r19,vmmppcr18(r5)
1374 lwz r20,vmmppcr19(r5)
1375 lwz r21,vmmppcr20(r5)
1376 lwz r22,vmmppcr21(r5)
1377
1378 stw r7,vmmppcr14(r5) ; Write context
1379 stw r8,vmmppcr15(r5)
1380 stw r9,vmmppcr16(r5)
1381 stw r10,vmmppcr17(r5)
1382 stw r11,vmmppcr18(r5)
1383 stw r12,vmmppcr19(r5)
1384 stw r13,vmmppcr20(r5)
1385 la r6,vmmppcr30(r5) ; Point to sixth line
1386 stw r14,vmmppcr21(r5)
1387
1388 dcbt 0,r6 ; Touch sixth line
1389 b sw64x3done ; Done with this section...
1390
1391 sw64x3: ld r15,vmmppcXr14(r5) ; Read vm context
1392 ld r24,vmmppcXr15(r5)
1393 ld r17,vmmppcXr16(r5)
1394 ld r18,vmmppcXr17(r5)
1395 ld r19,vmmppcXr18(r5)
1396 ld r20,vmmppcXr19(r5)
1397 ld r21,vmmppcXr20(r5)
1398 ld r22,vmmppcXr21(r5)
1399
1400 std r7,vmmppcXr14(r5) ; Write context
1401 std r8,vmmppcXr15(r5)
1402 std r9,vmmppcXr16(r5)
1403 std r10,vmmppcXr17(r5)
1404 std r11,vmmppcXr18(r5)
1405 std r12,vmmppcXr19(r5)
1406 std r13,vmmppcXr20(r5)
1407 la r6,vmmppcXr30(r5) ; Point to sixth line
1408 std r14,vmmppcXr21(r5)
1409
1410 dcbt 0,r6 ; Touch sixth line
1411
1412 sw64x3done: std r15,saver14(r30) ; Write vm context
1413 std r24,saver15(r30)
1414 std r17,saver16(r30)
1415 std r18,saver17(r30)
1416 std r19,saver18(r30)
1417 std r20,saver19(r30)
1418 std r21,saver20(r30)
1419 std r22,saver21(r30)
1420
1421 ; Swap 8 registers
1422
1423 ld r7,saver22(r30) ; Read savearea
1424 ld r8,saver23(r30)
1425 ld r9,saver24(r30)
1426 ld r10,saver25(r30)
1427 ld r11,saver26(r30)
1428 ld r12,saver27(r30)
1429 ld r13,saver28(r30)
1430 ld r14,saver29(r30)
1431
1432 bt vmmDoing64,sw64x4 ; Skip to 64-bit stuff
1433
1434 lwz r15,vmmppcr22(r5) ; Read vm context
1435 lwz r24,vmmppcr23(r5)
1436 lwz r17,vmmppcr24(r5)
1437 lwz r18,vmmppcr25(r5)
1438 lwz r19,vmmppcr26(r5)
1439 lwz r20,vmmppcr27(r5)
1440 lwz r21,vmmppcr28(r5)
1441 lwz r22,vmmppcr29(r5)
1442
1443 stw r7,vmmppcr22(r5) ; Write context
1444 stw r8,vmmppcr23(r5)
1445 stw r9,vmmppcr24(r5)
1446 stw r10,vmmppcr25(r5)
1447 stw r11,vmmppcr26(r5)
1448 stw r12,vmmppcr27(r5)
1449 stw r13,vmmppcr28(r5)
1450 la r6,vmmppcvscr(r5) ; Point to seventh line
1451 stw r14,vmmppcr29(r5)
1452 dcbt 0,r6 ; Touch seventh line
1453 b sw64x4done ; Done with this section...
1454
1455 sw64x4: ld r15,vmmppcXr22(r5) ; Read vm context
1456 ld r24,vmmppcXr23(r5)
1457 ld r17,vmmppcXr24(r5)
1458 ld r18,vmmppcXr25(r5)
1459 ld r19,vmmppcXr26(r5)
1460 ld r20,vmmppcXr27(r5)
1461 ld r21,vmmppcXr28(r5)
1462 ld r22,vmmppcXr29(r5)
1463
1464 std r7,vmmppcXr22(r5) ; Write context
1465 std r8,vmmppcXr23(r5)
1466 std r9,vmmppcXr24(r5)
1467 std r10,vmmppcXr25(r5)
1468 std r11,vmmppcXr26(r5)
1469 std r12,vmmppcXr27(r5)
1470 std r13,vmmppcXr28(r5)
1471 la r6,vmmppcvscr(r5) ; Point to seventh line
1472 std r14,vmmppcXr29(r5)
1473
1474 dcbt 0,r6 ; Touch seventh line
1475
1476 sw64x4done: std r15,saver22(r30) ; Write vm context
1477 std r24,saver23(r30)
1478 std r17,saver24(r30)
1479 std r18,saver25(r30)
1480 std r19,saver26(r30)
1481 std r20,saver27(r30)
1482 std r21,saver28(r30)
1483 std r22,saver29(r30)
1484
1485 ; Swap 8 registers
1486
1487 ld r7,saver30(r30) ; Read savearea
1488 ld r8,saver31(r30)
1489 lwz r9,savecr(r30)
1490 ld r10,savexer(r30)
1491 ld r11,savelr(r30)
1492 ld r12,savectr(r30)
1493 lwz r14,savevrsave(r30)
1494
1495 bt vmmDoing64,sw64x5 ; Skip to 64-bit stuff
1496
1497 lwz r15,vmmppcr30(r5) ; Read vm context
1498 lwz r24,vmmppcr31(r5)
1499 lwz r17,vmmppccr(r5)
1500 lwz r18,vmmppcxer(r5)
1501 lwz r19,vmmppclr(r5)
1502 lwz r20,vmmppcctr(r5)
1503 lwz r22,vmmppcvrsave(r5)
1504
1505 stw r7,vmmppcr30(r5) ; Write context
1506 stw r8,vmmppcr31(r5)
1507 stw r9,vmmppccr(r5)
1508 stw r10,vmmppcxer(r5)
1509 stw r11,vmmppclr(r5)
1510 stw r12,vmmppcctr(r5)
1511 stw r14,vmmppcvrsave(r5)
1512 b sw64x5done ; Done here...
1513
1514 sw64x5: ld r15,vmmppcXr30(r5) ; Read vm context
1515 ld r24,vmmppcXr31(r5)
1516 lwz r17,vmmppcXcr(r5)
1517 ld r18,vmmppcXxer(r5)
1518 ld r19,vmmppcXlr(r5)
1519 ld r20,vmmppcXctr(r5)
1520 lwz r22,vmmppcXvrsave(r5)
1521
1522 std r7,vmmppcXr30(r5) ; Write context
1523 std r8,vmmppcXr31(r5)
1524 stw r9,vmmppcXcr(r5)
1525 std r10,vmmppcXxer(r5)
1526 std r11,vmmppcXlr(r5)
1527 std r12,vmmppcXctr(r5)
1528 stw r14,vmmppcXvrsave(r5)
1529
1530 sw64x5done: std r15,saver30(r30) ; Write vm context
1531 std r24,saver31(r30)
1532 stw r17,savecr(r30)
1533 std r18,savexer(r30)
1534 std r19,savelr(r30)
1535 std r20,savectr(r30)
1536 stw r22,savevrsave(r30)
1537
1538 ; Swap 8 registers
1539
1540 lwz r7,savevscr+0(r30) ; Read savearea
1541 lwz r8,savevscr+4(r30)
1542 lwz r9,savevscr+8(r30)
1543 lwz r10,savevscr+12(r30)
1544 lwz r11,savefpscrpad(r30)
1545 lwz r12,savefpscr(r30)
1546
1547 lwz r15,vmmppcvscr+0(r5) ; Read vm context
1548 lwz r24,vmmppcvscr+4(r5)
1549 lwz r17,vmmppcvscr+8(r5)
1550 lwz r18,vmmppcvscr+12(r5)
1551 lwz r19,vmmppcfpscrpad(r5)
1552 lwz r20,vmmppcfpscr(r5)
1553
1554 stw r7,vmmppcvscr+0(r5) ; Write context
1555 stw r8,vmmppcvscr+4(r5)
1556 stw r9,vmmppcvscr+8(r5)
1557 stw r10,vmmppcvscr+12(r5)
1558 stw r11,vmmppcfpscrpad(r5)
1559 stw r12,vmmppcfpscr(r5)
1560
1561 stw r15,savevscr+0(r30) ; Write vm context
1562 stw r24,savevscr+4(r30)
1563 stw r17,savevscr+8(r30)
1564 stw r18,savevscr+12(r30)
1565 stw r19,savefpscrpad(r30)
1566 stw r20,savefpscr(r30)
1567
1568
1569 ;
1570 ; Cobble up the exception return code and save any specific return values
1571 ;
1572
1573 lwz r7,saveexception(r30) ; Pick up the exception code
1574 rlwinm r8,r7,30,24,31 ; Convert exception to return code
1575 cmplwi r7,T_DATA_ACCESS ; Was this a DSI?
1576 stw r8,return_code(r5) ; Save the exit code
1577 cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI?
1578 beq+ swapDSI64 ; Yeah...
1579 cmplwi r7,T_ALIGNMENT ; Alignment exception?
1580 beq+ cr1,swapISI64 ; We had an ISI...
1581 cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call?
1582 beq+ swapDSI64 ; An alignment exception looks like a DSI...
1583 beq+ cr1,swapSC64 ; We had a system call...
1584
1585 blr ; Return...
1586
1587 ;
1588 ; Set exit returns for a DSI or alignment exception
1589 ;
1590
1591 swapDSI64: ld r10,savedar(r30) ; Get the DAR
1592 lwz r7,savedsisr(r30) ; and the DSISR
1593 bt vmmDoing64,sw64DSI ; Skip to 64-bit stuff...
1594
1595
1596 stw r10,return_params+0(r5) ; Save DAR as first return parm
1597 stw r7,return_params+4(r5) ; Save DSISR as second return parm
1598 blr ; Return...
1599
1600 sw64DSI: std r10,return_paramsX+0(r5) ; Save DAR as first return parm
1601 std r7,return_paramsX+8(r5) ; Save DSISR as second return parm (note that this is expanded to 64 bits)
1602 blr ; Return...
1603
1604 ;
1605 ; Set exit returns for a ISI
1606 ;
1607
1608 swapISI64: bt vmmDoing64,sw64ISI ; Skip to 64-bit stuff...
1609 lwz r7,vmmppcmsr(r5) ; Get the SRR1 value
1610 lwz r10,vmmppcpc(r5) ; Get the PC as failing address
1611 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1612 stw r10,return_params+0(r5) ; Save PC as first return parm
1613 stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm
1614 blr ; Return...
1615
1616 sw64ISI: ld r7,vmmppcXmsr(r5) ; Get the SRR1 value
1617 ld r10,vmmppcXpc(r5) ; Get the PC as failing address
1618 rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR
1619 std r10,return_paramsX+0(r5) ; Save PC as first return parm
1620 std r7,return_paramsX+8(r5) ; Save the pseudo-DSISR as second return parm
1621 blr ; Return...
1622
1623 ;
1624 ; Set exit returns for a system call (note: we did the first 3 earlier)
1625 ; Do we really need to pass parameters back here????
1626 ;
1627
1628 swapSC64: bt vmmDoing64,sw64SC ; Skip to 64-bit stuff...
1629 lwz r10,vmmppcr6(r5) ; Get the fourth paramter
1630 stw r10,return_params+12(r5) ; Save it
1631 blr ; Return...
1632
1633 sw64SC: ld r10,vmmppcXr6(r5) ; Get the fourth paramter
1634 std r10,return_paramsX+24(r5) ; Save it
1635 blr ; Return...
1636
1637 ;
1638 ; vmmFamGuestResume:
1639 ; Restore Guest context from Fam mode.
1640 ;
1641
1642 vmmFamGuestResume:
1643 mfsprg r10,0 ; Get the per_proc
1644 lwz r27,vmmCEntry(r3) ; Get the context that is active
1645 lwz r4,VMMXAFlgs(r10) ; Get the eXtended Architecture flags
1646 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1647 lwz r15,spcFlags(r10) ; Get per_proc special flags
1648 mr r26,r3 ; Save the activation pointer
1649 lwz r20,vmmContextKern(r27) ; Get the comm area
1650 rlwinm r15,r15,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
1651 stw r15,spcFlags(r10) ; Update the special flags
1652 bne fgrX
1653 lwz r7,famguestpc(r20) ; Load famguest ctx pc
1654 bf++ vmmMapDone,fgrNoMap ; No mapping done for this space.
1655 lwz r3,SAVflags(r30) ; Pick up the savearea flags
1656 lwz r2,vmmLastMap(r28) ; Get the last mapped address
1657 lwz r6,vmmLastMap+4(r28) ; Get the last mapped address
1658 li r4,T_DATA_ACCESS ; Change to DSI fault
1659 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1660 stw r2,savedar(r30) ; Set the DAR to the last thing we mapped
1661 stw r6,savedar+4(r30) ; Set the DAR to the last thing we mapped
1662 stw r3,SAVflags(r30) ; Turn on the redrive request
1663 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1664 stw r4,saveexception(r30) ; Say we need to emulate a DSI
1665 li r0,0 ; Clear
1666 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
1667 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1668 fgrNoMap:
1669 lwz r4,savesrr1+4(r30) ; Get the saved MSR value
1670 stw r7,savesrr0+4(r30) ; Set savearea pc
1671 lwz r5,famguestmsr(r20) ; Load famguest ctx msr
1672 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1673 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1674 and r5,r5,r6 ; Keep only the controllable bits
1675 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1676 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1677 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1678 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1679 stw r5,savesrr1+4(r30) ; Set savearea srr1
1680 lwz r4,famguestr0(r20) ; Load famguest ctx r0
1681 lwz r5,famguestr1(r20) ; Load famguest ctx r1
1682 lwz r6,famguestr2(r20) ; Load famguest ctx r2
1683 lwz r7,famguestr3(r20) ; Load famguest ctx r3
1684 stw r4,saver0+4(r30) ; Set savearea r0
1685 stw r5,saver1+4(r30) ; Set savearea r1
1686 stw r6,saver2+4(r30) ; Set savearea r2
1687 stw r7,saver3+4(r30) ; Set savearea r3
1688 lwz r4,famguestr4(r20) ; Load famguest ctx r4
1689 lwz r5,famguestr5(r20) ; Load famguest ctx r5
1690 lwz r6,famguestr6(r20) ; Load famguest ctx r6
1691 lwz r7,famguestr7(r20) ; Load famguest ctx r7
1692 stw r4,saver4+4(r30) ; Set savearea r4
1693 stw r5,saver5+4(r30) ; Set savearea r5
1694 stw r6,saver6+4(r30) ; Set savearea r6
1695 stw r7,saver7+4(r30) ; Set savearea r7
1696 b fgrret
1697 fgrX:
1698 ld r7,famguestXpc(r20) ; Load famguest ctx pc
1699 bf++ vmmMapDone,fgrXNoMap ; No mapping done for this space.
1700 lwz r3,SAVflags(r30) ; Pick up the savearea flags
1701 ld r2,vmmLastMap(r28) ; Get the last mapped address
1702 li r4,T_DATA_ACCESS ; Change to DSI fault
1703 oris r3,r3,hi16(SAVredrive) ; Set exception redrive
1704 std r2,savedar(r30) ; Set the DAR to the last thing we mapped
1705 stw r3,SAVflags(r30) ; Turn on the redrive request
1706 lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss
1707 stw r4,saveexception(r30) ; Say we need to emulate a DSI
1708 li r0,0 ; Clear
1709 stw r2,savedsisr(r30) ; Pretend we have a PTE miss
1710 stb r0,vmmGFlags+3(r28) ; Show that the redrive has been taken care of
1711 fgrXNoMap:
1712 ld r4,savesrr1(r30) ; Get the saved MSR value
1713 std r7,savesrr0(r30) ; Set savearea pc
1714 ld r5,famguestXmsr(r20) ; Load famguest ctx msr
1715 lis r6,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
1716 ori r6,r6,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
1717 and r5,r5,r6 ; Keep only the controllable bits
1718 oris r5,r5,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
1719 ori r5,r5,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
1720 rlwimi r5,r4,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
1721 rlwimi r5,r4,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
1722 std r5,savesrr1(r30) ; Set savearea srr1
1723 ld r4,famguestXr0(r20) ; Load famguest ctx r0
1724 ld r5,famguestXr1(r20) ; Load famguest ctx r1
1725 ld r6,famguestXr2(r20) ; Load famguest ctx r2
1726 ld r7,famguestXr3(r20) ; Load famguest ctx r3
1727 std r4,saver0(r30) ; Set savearea r0
1728 std r5,saver1(r30) ; Set savearea r1
1729 std r6,saver2(r30) ; Set savearea r2
1730 std r7,saver3(r30) ; Set savearea r3
1731 ld r4,famguestXr4(r20) ; Load famguest ctx r4
1732 ld r5,famguestXr5(r20) ; Load famguest ctx r5
1733 ld r6,famguestXr6(r20) ; Load famguest ctx r6
1734 ld r7,famguestXr7(r20) ; Load famguest ctx r7
1735 std r4,saver4(r30) ; Set savearea r4
1736 std r5,saver5(r30) ; Set savearea r5
1737 std r6,saver6(r30) ; Set savearea r6
1738 std r7,saver7(r30) ; Set savearea r7
1739 fgrret:
1740 li r3,1 ; Show normal exit with check for AST
1741 lwz r16,ACT_THREAD(r26) ; Restore the thread pointer
1742 b EXT(ppcscret) ; Go back to handler...
1743
1744 ;
1745 ; FAM Intercept exception handler
1746 ;
1747
1748 .align 5
1749 .globl EXT(vmm_fam_exc)
1750
1751 LEXT(vmm_fam_exc)
1752 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
1753 lwz r1,pfAvailable(r2) ; Get the CPU features flags
1754 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1755 bne fexcX
1756 lwz r4,saver4+4(r13) ; Load savearea r4
1757 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1758 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1759 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
1760 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
1761 bt++ pf64Bitb,fexcVMareaPhys64 ; Go do this on a 64-bit machine...
1762 slwi r3,r3,12 ; Change ppnum to physical address
1763 b fexcVMareaPhysres
1764 fexcVMareaPhys64:
1765 mtxer r5 ; Restore xer
1766 lwz r5,saver5+4(r13) ; Load savearea r5
1767 lwz r6,saver6+4(r13) ; Load savearea r6
1768 sldi r3,r3,12 ; Change ppnum to physical address
1769 fexcVMareaPhysres:
1770 stw r4,famguestr4(r3) ; Save r4 in famguest ctx
1771 stw r5,famguestr5(r3) ; Save r5 in famguest ctx
1772 stw r6,famguestr6(r3) ; Save r6 in famguest ctx
1773 stw r7,famguestr7(r3) ; Save r7 in famguest ctx
1774 lwz r4,saver0+4(r13) ; Load savearea r0
1775 lwz r5,saver1+4(r13) ; Load savearea r1
1776 lwz r6,saver2+4(r13) ; Load savearea r2
1777 lwz r7,saver3+4(r13) ; Load savearea r3
1778 stw r4,famguestr0(r3) ; Save r0 in famguest ctx
1779 stw r5,famguestr1(r3) ; Save r1 in famguest ctx
1780 stw r6,famguestr2(r3) ; Save r2 in famguest ctx
1781 stw r7,famguestr3(r3) ; Save r3 in famguest ctx
1782 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1783 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1784 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1785 mfsrr0 r2 ; Get the interrupt srr0
1786 mfsrr1 r4 ; Get the interrupt srr1
1787 stw r2,famguestpc(r3) ; Save srr0 in famguest ctx
1788 stw r4,famguestmsr(r3) ; Save srr1 in famguest ctx
1789 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1790 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1791 mtsrr1 r6 ; Set srr1
1792 mr r6,r3 ; Set r6 with phys state page addr
1793 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1794 beq+ cr1,fexcPRG ; We had a program exception...
1795 bne+ fexcret
1796 ; We had an Alignment...
1797 mfdar r3 ; Load dar
1798 mfdsisr r4 ; Load dsisr
1799 stw r3,famparam+0x4(r6) ; Set famparam 1 with dar
1800 stw r4,famparam+0x8(r6) ; Set famparam 2 with dsir
1801 b fexcret ;
1802 fexcPRG:
1803 stw r4,famparam+0x4(r6) ; Set famparam 1 with srr1
1804 mr r3,r4 ; Set r3 with dsisr
1805 lwz r4,famguestr4(r6) ; Load r4 from famguest context
1806 fexcret:
1807 lwz r5,famguestr5(r6) ; Load r5 from famguest context
1808 lwz r13,famhandler(r6) ; Load user address to resume
1809 stw r2,famparam(r6) ; Set famparam 0 with srr0
1810 stw r7,famdispcode(r6) ; Save the exit code
1811 lwz r1,famrefcon(r6) ; load refcon
1812 bt++ pf64Bitb,fexcrfi64 ; Go do this on a 64-bit machine...
1813 mtcr r0 ; Restore cr
1814 mtsrr0 r13 ; Load srr0
1815 mr r0,r7 ; Set dispatch code
1816 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1817 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1818 mfsprg r13,2 ; Restore r13
1819 mfsprg r11,3 ; Restore r11
1820 rfi
1821 fexcrfi64:
1822 mtcr r0 ; Restore cr
1823 mtsrr0 r13 ; Load srr0
1824 mr r0,r7 ; Set dispatch code
1825 lwz r7,famguestr7(r6) ; Load r7 from famguest context
1826 lwz r6,famguestr6(r6) ; Load r6 from famguest context
1827 mfsprg r13,2 ; Restore r13
1828 mfsprg r11,3 ; Restore r11
1829 rfid
1830 fexcX:
1831 mtxer r5 ; Restore xer
1832 ld r4,saver4(r13) ; Load savearea r4
1833 ld r5,saver5(r13) ; Load savearea r5
1834 ld r6,saver6(r13) ; Load savearea r6
1835 cmplwi r11,T_ALIGNMENT ; Alignment exception?
1836 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1837 mtcrf 0x02,r1 ; Move pf64Bit to its normal place in CR6
1838 cmplwi cr1,r11,T_PROGRAM ; Exiting because of an PRG?
1839 sldi r3,r3,12 ; Change ppnum to physical address
1840 std r4,famguestXr4(r3) ; Save r4 in famguest ctx
1841 std r5,famguestXr5(r3) ; Save r5 in famguest ctx
1842 std r6,famguestXr6(r3) ; Save r6 in famguest ctx
1843 std r7,famguestXr7(r3) ; Save r7 in famguest ctx
1844 ld r4,saver0(r13) ; Load savearea r0
1845 ld r5,saver1(r13) ; Load savearea r1
1846 ld r6,saver2(r13) ; Load savearea r2
1847 ld r7,saver3(r13) ; Load savearea r3
1848 std r4,famguestXr0(r3) ; Save r0 in famguest ctx
1849 std r5,famguestXr1(r3) ; Save r1 in famguest ctx
1850 std r6,famguestXr2(r3) ; Save r2 in famguest ctx
1851 std r7,famguestXr3(r3) ; Save r3 in famguest ctx
1852 lwz r4,spcFlags(r2) ; Load per_proc spcFlags
1853 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1854 stw r4,spcFlags(r2) ; Update per_proc spcFlags
1855 mfsrr0 r2 ; Get the interrupt srr0
1856 mfsrr1 r4 ; Get the interrupt srr1
1857 std r2,famguestXpc(r3) ; Save srr0 in famguest ctx
1858 std r4,famguestXmsr(r3) ; Save srr1 in famguest ctx
1859 li r6,lo16(MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)|MASK(MSR_FE1))
1860 andc r6,r4,r6 ; Clear SE BE FE0 FE1
1861 mtsrr1 r6 ; Set srr1
1862 mr r6,r3 ; Set r6 with phys state page addr
1863 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1864 beq+ cr1,fexcXPRG ; We had a program exception...
1865 bne+ fexcXret
1866 ; We had an Alignment...
1867 mfdar r3 ; Load dar
1868 mfdsisr r4 ; Load dsisr
1869 std r3,famparamX+0x8(r6) ; Set famparam 1 with dar
1870 std r4,famparamX+0x10(r6) ; Set famparam 2 with dsir
1871 b fexcXret
1872 fexcXPRG:
1873 std r4,famparamX+0x8(r6) ; Set famparam 1 with srr1
1874 mr r3,r4 ; Set r3 with dsisr
1875 ld r4,famguestXr4(r6) ; Load r4 from famguest context
1876 fexcXret:
1877 ld r5,famguestXr5(r6) ; Load r5 from famguest context
1878 ld r13,famhandlerX(r6) ; Load user address to resume
1879 std r2,famparamX(r6) ; Set famparam 0 with srr0
1880 std r7,famdispcodeX(r6) ; Save the exit code
1881 ld r1,famrefconX(r6) ; load refcon
1882 mtcr r0 ; Restore cr
1883 mtsrr0 r13 ; Load srr0
1884 mr r0,r7 ; Set dispatch code
1885 ld r7,famguestXr7(r6) ; Load r7 from famguest context
1886 ld r6,famguestXr6(r6) ; Load r6 from famguest context
1887 mfsprg r13,2 ; Restore r13
1888 mfsprg r11,3 ; Restore r11
1889 rfid
1890
1891 ;
1892 ; FAM Intercept DSI ISI fault handler
1893 ;
1894
1895 .align 5
1896 .globl EXT(vmm_fam_pf)
1897
1898 LEXT(vmm_fam_pf)
1899 lwz r4,VMMXAFlgs(r2) ; Get the eXtended Architecture flags
1900 lwz r3,VMMareaPhys(r2) ; Load phys state page addr
1901 rlwinm. r4,r4,0,0,0 ; Are we doing a 64-bit virtual machine?
1902 bne fpfX
1903 lwz r4,saver0+4(r13) ; Load savearea r0
1904 lwz r5,saver1+4(r13) ; Load savearea r1
1905 lwz r6,saver2+4(r13) ; Load savearea r2
1906 lwz r7,saver3+4(r13) ; Load savearea r3
1907 bt++ pf64Bitb,fpfVMareaPhys64 ; Go do this on a 64-bit machine...
1908 slwi r3,r3,12 ; Change ppnum to physical address
1909 b fpfVMareaPhysret
1910 fpfVMareaPhys64:
1911 sldi r3,r3,12 ; Change ppnum to physical address
1912 fpfVMareaPhysret:
1913 stw r4,famguestr0(r3) ; Save r0 in famguest
1914 stw r5,famguestr1(r3) ; Save r1 in famguest
1915 stw r6,famguestr2(r3) ; Save r2 in famguest
1916 stw r7,famguestr3(r3) ; Save r3 in famguest
1917 lwz r4,saver4+4(r13) ; Load savearea r0
1918 lwz r5,saver5+4(r13) ; Load savearea r1
1919 lwz r6,saver6+4(r13) ; Load savearea r2
1920 lwz r7,saver7+4(r13) ; Load savearea r3
1921 stw r4,famguestr4(r3) ; Save r4 in famguest
1922 lwz r4,spcFlags(r2) ; Load spcFlags
1923 stw r5,famguestr5(r3) ; Save r5 in famguest
1924 lwz r5,savesrr0+4(r13) ; Get the interrupt srr0
1925 stw r6,famguestr6(r3) ; Save r6 in famguest
1926 lwz r6,savesrr1+4(r13) ; Load srr1
1927 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1928 stw r7,famguestr7(r3) ; Save r7 in famguest
1929 stw r4,spcFlags(r2) ; Update spcFlags
1930 lwz r1,famrefcon(r3) ; Load refcon
1931 lwz r2,famhandler(r3) ; Load famhandler to resume
1932 stw r5,famguestpc(r3) ; Save srr0
1933 stw r5,saver2+4(r13) ; Store srr0 in savearea r2
1934 stw r5,famparam(r3) ; Store srr0 in fam param 0
1935 stw r6,famguestmsr(r3) ; Save srr1 in famguestmsr
1936 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
1937 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1938 beq+ cr1,fpfISI ; We had an ISI...
1939 ; fpfDSI
1940 lwz r6,savedar+4(r13) ; Load dar from savearea
1941 lwz r4,savedsisr(r13) ; Load dsisr from savearea
1942 stw r6,famparam+0x4(r3) ; Store dar in fam param 1
1943 stw r6,saver3+4(r13) ; Store dar in savearea r3
1944 stw r4,famparam+0x8(r3) ; Store dsisr in fam param 2
1945 stw r4,saver4+4(r13) ; Store dsisr in savearea r4
1946 b fpfret
1947 fpfISI:
1948 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
1949 stw r6,famparam+0x4(r3) ; Store srr1 in fam param 1
1950 stw r6,saver3+4(r13) ; Store srr1 in savearea r3
1951 fpfret:
1952 stw r7,saver0+4(r13) ; Set dispatch code
1953 stw r7,famdispcode(r3) ; Set dispatch code
1954 stw r1,saver1+4(r13) ; Store refcon in savearea r1
1955 stw r2,savesrr0+4(r13) ; Store famhandler in srr0
1956 blr
1957 fpfX:
1958 ld r4,saver0(r13) ; Load savearea r0
1959 ld r5,saver1(r13) ; Load savearea r1
1960 ld r6,saver2(r13) ; Load savearea r2
1961 ld r7,saver3(r13) ; Load savearea r3
1962 sldi r3,r3,12 ; Change ppnum to physical address
1963 std r4,famguestXr0(r3) ; Save r0 in famguest
1964 std r5,famguestXr1(r3) ; Save r1 in famguest
1965 std r6,famguestXr2(r3) ; Save r2 in famguest
1966 std r7,famguestXr3(r3) ; Save r3 in famguest
1967 ld r4,saver4(r13) ; Load savearea r0
1968 ld r5,saver5(r13) ; Load savearea r1
1969 ld r6,saver6(r13) ; Load savearea r2
1970 ld r7,saver7(r13) ; Load savearea r3
1971 std r4,famguestXr4(r3) ; Save r4 in famguest
1972 lwz r4,spcFlags(r2) ; Load spcFlags
1973 std r5,famguestXr5(r3) ; Save r5 in famguest
1974 ld r5,savesrr0(r13) ; Get the interrupt srr0
1975 std r6,famguestXr6(r3) ; Save r6 in famguest
1976 ld r6,savesrr1(r13) ; Load srr1
1977 oris r4,r4,hi16(FamVMmode) ; Set FAM mode
1978 std r7,famguestXr7(r3) ; Save r7 in famguest
1979 stw r4,spcFlags(r2) ; Update spcFlags
1980 ld r1,famrefconX(r3) ; Load refcon
1981 ld r2,famhandlerX(r3) ; Load famhandler to resume
1982 std r5,famguestXpc(r3) ; Save srr0
1983 std r5,saver2(r13) ; Store srr0 in savearea r2
1984 std r5,famparamX(r3) ; Store srr0 in fam param 0
1985 std r6,famguestXmsr(r3) ; Save srr1 in famguestmsr
1986 cmplwi cr1,r11,T_INSTRUCTION_ACCESS ; Was this a ISI?
1987 rlwinm r7,r11,30,24,31 ; Convert exception to return code
1988 beq+ cr1,fpfXISI ; We had an ISI...
1989 ; fpfXDSI
1990 ld r6,savedar(r13) ; Load dar from savearea
1991 lwz r4,savedsisr(r13) ; Load dsisr from savearea
1992 std r6,famparamX+0x8(r3) ; Store dar in fam param 1
1993 std r6,saver3(r13) ; Store dar in savearea r3
1994 std r4,famparamX+0x10(r3) ; Store dsisr in fam param 2
1995 std r4,saver4(r13) ; Store dsisr in savearea r4
1996 b fpfXret
1997 fpfXISI:
1998 rlwinm r6,r6,0,1,4 ; Save the bits that match the DSISR
1999 std r6,famparamX+0x8(r3) ; Store srr1 in fam param 1
2000 std r6,saver3(r13) ; Store srr1 in savearea r3
2001 fpfXret:
2002 std r7,saver0(r13) ; Set dispatch code
2003 std r7,famdispcodeX(r3) ; Set dispatch code
2004 std r1,saver1(r13) ; Store refcon in savearea r1
2005 std r2,savesrr0(r13) ; Store famhandler in srr0
2006 blr
2007
2008 ;
2009 ; Ultra Fast Path FAM syscalls
2010 ;
2011
2012 .align 5
2013 .globl EXT(vmm_ufp)
2014
2015 LEXT(vmm_ufp)
2016 mfsprg r3,0 ; Get the per_proc area
2017 mr r11,r13 ; Saved cr in r11
2018 lwz r13,VMMXAFlgs(r3) ; Get the eXtended Architecture flags
2019 rlwinm. r13,r13,0,0,0 ; Are we doing a 64-bit virtual machine?
2020 lwz r13,pfAvailable(r3) ; Get feature flags
2021 mtcrf 0x02,r13 ; Put pf64Bitb etc in cr6
2022 lwz r13,VMMareaPhys(r3) ; Load fast assist area
2023 bt++ pf64Bitb,ufpVMareaPhys64 ; Go do this on a 64-bit machine...
2024 slwi r13,r13,12 ; Change ppnum to physical address
2025 b ufpVMareaPhysret
2026 ufpVMareaPhys64:
2027 sldi r13,r13,12 ; Change ppnum to physical address
2028 ufpVMareaPhysret:
2029 bne ufpX
2030 bt cr5_eq,ufpResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
2031 cmpwi cr7,r4,0 ; Compare first arg with 0
2032 cmpwi cr5,r4,7 ; Compare first arg with 7
2033 cror cr1_eq,cr7_lt,cr5_gt ; Is it in 0 to 7 range
2034 beq cr1,ufpVMret ; Return if not in the range
2035 slwi r4,r4,2 ; multiply index by 4
2036 la r3,famguestr0(r13) ; Load the base address
2037 bt cr2_eq,ufpSetGuestReg ; Set/get selector
2038 ; ufpGetGuestReg
2039 lwzx r3,r4,r3 ; Load the guest register
2040 b ufpVMret ; Return
2041 ufpSetGuestReg:
2042 stwx r5,r4,r3 ; Update the guest register
2043 li r3,0 ; Set return value
2044 b ufpVMret ; Return
2045 ufpResumeGuest:
2046 lwz r7,spcFlags(r3) ; Pick up the special flags
2047 mtsrr0 r4 ; Set srr0
2048 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2049 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
2050 stw r7,spcFlags(r3) ; Update the special flags
2051 mfsrr1 r6 ; Get the current MSR value
2052
2053 lwz r4,famguestmsr(r13) ; Load guest srr1
2054 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2055 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2056 and r4,r4,r1 ; Keep only the controllable bits
2057 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2058 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2059 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2060 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2061 beq ufpnokey ; Branch if not key switch
2062 mr r2,r7 ; Save r7
2063 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2064 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
2065 beq ufpnokey ; No, go to ResumeGuest_nokey
2066 mr r5,r3 ; Get the per_proc area
2067 stw r7,spcFlags(r3) ; Update the special flags
2068
2069 bt++ pf64Bitb,ufpsave64 ; Go do this on a 64-bit machine...
2070
2071 lwz r3,next_savearea+4(r5) ; Get the exception save area
2072 stw r8,saver8+4(r3) ; Save r8
2073 stw r9,saver9+4(r3) ; Save r9
2074 stw r10,saver10+4(r3) ; Save r10
2075 stw r11,saver11+4(r3) ; Save r11
2076 stw r12,saver12+4(r3) ; Save r12
2077 stw r13,saver13+4(r3) ; Save r12
2078 stw r14,saver14+4(r3) ; Save r14
2079 stw r15,saver15+4(r3) ; Save r15
2080 stw r16,saver16+4(r3) ; Save r16
2081 stw r17,saver17+4(r3) ; Save r17
2082 stw r18,saver18+4(r3) ; Save r18
2083 stw r19,saver19+4(r3) ; Save r19
2084 stw r20,saver20+4(r3) ; Save r20
2085 stw r21,saver21+4(r3) ; Save r21
2086 stw r22,saver22+4(r3) ; Save r22
2087 stw r23,saver23+4(r3) ; Save r23
2088 stw r24,saver24+4(r3) ; Save r24
2089 stw r25,saver25+4(r3) ; Save r25
2090 stw r26,saver26+4(r3) ; Save r26
2091 stw r27,saver27+4(r3) ; Save r27
2092 stw r28,saver28+4(r3) ; Save r28
2093 stw r29,saver29+4(r3) ; Save r29
2094 stw r30,saver30+4(r3) ; Save r30
2095 stw r31,saver31+4(r3) ; Save r31
2096 b ufpsaveres ; Continue
2097
2098 ufpsave64:
2099 ld r3,next_savearea(r5) ; Get the exception save area
2100 std r8,saver8(r3) ; Save r8
2101 std r9,saver9(r3) ; Save r9
2102 std r10,saver10(r3) ; Save r10
2103 std r11,saver11(r3) ; Save r11
2104 std r12,saver12(r3) ; Save r12
2105 std r13,saver13(r3) ; Save r12
2106 std r14,saver14(r3) ; Save r14
2107 std r15,saver15(r3) ; Save r15
2108 std r16,saver16(r3) ; Save r16
2109 std r17,saver17(r3) ; Save r17
2110 std r18,saver18(r3) ; Save r18
2111 std r19,saver19(r3) ; Save r19
2112 std r20,saver20(r3) ; Save r20
2113 std r21,saver21(r3) ; Save r21
2114 std r22,saver22(r3) ; Save r22
2115 std r23,saver23(r3) ; Save r23
2116 std r24,saver24(r3) ; Save r24
2117 std r25,saver25(r3) ; Save r25
2118 std r26,saver26(r3) ; Save r26
2119 std r27,saver27(r3) ; Save r27
2120 std r28,saver28(r3) ; Save r28
2121 std r29,saver29(r3) ; Save r29
2122 mfxer r2 ; Get xer
2123 std r30,saver30(r3) ; Save r30
2124 std r31,saver31(r3) ; Save r31
2125 std r2,savexer(r3) ; Save xer
2126
2127 ufpsaveres:
2128 mflr r20 ; Get lr
2129 li r2,1 ; Set to 1
2130 stw r7,spcFlags(r5) ; Update the special flags
2131 mr r13,r3 ; Set current savearea
2132 mr r21,r4 ; Save r4
2133 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2134 mr r29,r5 ; Get the per_proc area
2135 mr r3,r4 ; Set MSR value we going to
2136 bl EXT(switchSegs) ; Go handle the segment registers/STB
2137 mr r3,r13 ; Set current savearea
2138 mr r4,r21 ; Restore r4
2139 mtlr r20 ; Set lr
2140
2141 bt++ pf64Bitb,ufprestore64 ; Go do this on a 64-bit machine...
2142 lwz r8,saver8+4(r3) ; Load r8
2143 lwz r9,saver9+4(r3) ; Load r9
2144 lwz r10,saver10+4(r3) ; Load r10
2145 lwz r11,saver11+4(r3) ; Load r11
2146 lwz r12,saver12+4(r3) ; Load r12
2147 lwz r13,saver13+4(r3) ; Load r12
2148 lwz r14,saver14+4(r3) ; Load r14
2149 lwz r15,saver15+4(r3) ; Load r15
2150 lwz r16,saver16+4(r3) ; Load r16
2151 lwz r17,saver17+4(r3) ; Load r17
2152 lwz r18,saver18+4(r3) ; Load r18
2153 lwz r19,saver19+4(r3) ; Load r19
2154 lwz r20,saver20+4(r3) ; Load r20
2155 lwz r21,saver21+4(r3) ; Load r21
2156 lwz r22,saver22+4(r3) ; Load r22
2157 lwz r23,saver23+4(r3) ; Load r23
2158 lwz r24,saver24+4(r3) ; Load r24
2159 lwz r25,saver25+4(r3) ; Load r25
2160 lwz r26,saver26+4(r3) ; Load r26
2161 lwz r27,saver27+4(r3) ; Load r27
2162 lwz r28,saver28+4(r3) ; Load r28
2163 lwz r29,saver29+4(r3) ; Load r29
2164 lwz r30,saver30+4(r3) ; Load r30
2165 lwz r31,saver31+4(r3) ; Load r31
2166 b ufpnokey ; Continue
2167 ufprestore64:
2168 ld r2,savexer(r3) ; Load xer
2169 ld r8,saver8(r3) ; Load r8
2170 ld r9,saver9(r3) ; Load r9
2171 ld r10,saver10(r3) ; Load r10
2172 mtxer r2 ; Restore xer
2173 ld r11,saver11(r3) ; Load r11
2174 ld r12,saver12(r3) ; Load r12
2175 ld r13,saver13(r3) ; Load r12
2176 ld r14,saver14(r3) ; Load r14
2177 ld r15,saver15(r3) ; Load r15
2178 ld r16,saver16(r3) ; Load r16
2179 ld r17,saver17(r3) ; Load r17
2180 ld r18,saver18(r3) ; Load r18
2181 ld r19,saver19(r3) ; Load r19
2182 ld r20,saver20(r3) ; Load r20
2183 ld r21,saver21(r3) ; Load r21
2184 ld r22,saver22(r3) ; Load r22
2185 ld r23,saver23(r3) ; Load r23
2186 ld r24,saver24(r3) ; Load r24
2187 ld r25,saver25(r3) ; Load r25
2188 ld r26,saver26(r3) ; Load r26
2189 ld r27,saver27(r3) ; Load r27
2190 ld r28,saver28(r3) ; Load r28
2191 ld r29,saver29(r3) ; Load r29
2192 ld r30,saver30(r3) ; Load r30
2193 ld r31,saver31(r3) ; Load r31
2194 ufpnokey:
2195 mfsprg r3,0 ; Get the per_proc area
2196 mtsrr1 r4 ; Set srr1
2197 lwz r0,famguestr0(r13) ; Load r0
2198 lwz r1,famguestr1(r13) ; Load r1
2199 lwz r2,famguestr2(r13) ; Load r2
2200 lwz r3,famguestr3(r13) ; Load r3
2201 lwz r4,famguestr4(r13) ; Load r4
2202 lwz r5,famguestr5(r13) ; Load r5
2203 lwz r6,famguestr6(r13) ; Load r6
2204 lwz r7,famguestr7(r13) ; Load r7
2205 ufpVMret:
2206 mfsprg r13,2 ; Restore R13
2207 bt++ pf64Bitb,ufpVMrfi64 ; Go do this on a 64-bit machine...
2208 mtcrf 0xFF,r11 ; Restore CR
2209 mfsprg r11,3 ; Restore R11
2210 rfi ; All done, go back...
2211 ufpVMrfi64:
2212 mtcrf 0xFF,r11 ; Restore CR
2213 mfsprg r11,3 ; Restore R11
2214 rfid
2215
2216 ufpX:
2217 bt cr5_eq,ufpXResumeGuest ; if kvmmResumeGuest, branch to ResumeGuest
2218 cmpwi cr7,r4,0 ; Compare first arg with 0
2219 cmpwi cr5,r4,7 ; Compare first arg with 7
2220 cror cr1_eq,cr7_lt,cr5_gt ; Is it in 0 to 7 range
2221 beq cr1,ufpXVMret ; Return if not in the range
2222 slwi r4,r4,3 ; multiply index by 8
2223 la r3,famguestXr0(r13) ; Load the base address
2224 bt cr2_eq,ufpXSetGuestReg ; Set/get selector
2225 ; ufpXGetGuestReg
2226 ldx r3,r4,r3 ; Load the guest register
2227 b ufpXVMret ; Return
2228 ufpXSetGuestReg:
2229 stdx r5,r4,r3 ; Update the guest register
2230 li r3,0 ; Set return value
2231 b ufpXVMret ; Return
2232 ufpXResumeGuest:
2233 lwz r7,spcFlags(r3) ; Pick up the special flags
2234 mtsrr0 r4 ; Set srr0
2235 rlwinm. r6,r6,0,vmmKeyb,vmmKeyb ; Check vmmKeyb in maskCntrl
2236 rlwinm r7,r7,0,FamVMmodebit+1,FamVMmodebit-1 ; Clear FamVMmodebit
2237 stw r7,spcFlags(r3) ; Update the special flags
2238 mfsrr1 r6 ; Get the current MSR value
2239
2240 ld r4,famguestXmsr(r13) ; Load guest srr1
2241 lis r1,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user
2242 ori r1,r1,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user
2243 and r4,r4,r1 ; Keep only the controllable bits
2244 oris r4,r4,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits
2245 ori r4,r4,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits
2246 rlwimi r4,r6,0,MSR_FP_BIT,MSR_FP_BIT ; Propagate guest FP
2247 rlwimi r4,r6,0,MSR_VEC_BIT,MSR_VEC_BIT ; Propagate guest Vector
2248 beq ufpXnokey ; Branch if not key switch
2249 mr r2,r7 ; Save r7
2250 rlwimi r7,r5,32+vmmKeyb-userProtKeybit,userProtKeybit,userProtKeybit ; Set the protection key
2251 cmpw cr0,r7,r2 ; Is userProtKeybit changed?
2252 beq ufpXnokey ; No, go to ResumeGuest_nokey
2253 mr r5,r3 ; Get the per_proc area
2254 stw r7,spcFlags(r3) ; Update the special flags
2255
2256 ld r3,next_savearea(r5) ; Get the exception save area
2257 std r8,saver8(r3) ; Save r8
2258 std r9,saver9(r3) ; Save r9
2259 std r10,saver10(r3) ; Save r10
2260 std r11,saver11(r3) ; Save r11
2261 std r12,saver12(r3) ; Save r12
2262 std r13,saver13(r3) ; Save r12
2263 std r14,saver14(r3) ; Save r14
2264 std r15,saver15(r3) ; Save r15
2265 std r16,saver16(r3) ; Save r16
2266 std r17,saver17(r3) ; Save r17
2267 std r18,saver18(r3) ; Save r18
2268 std r19,saver19(r3) ; Save r19
2269 std r20,saver20(r3) ; Save r20
2270 std r21,saver21(r3) ; Save r21
2271 std r22,saver22(r3) ; Save r22
2272 std r23,saver23(r3) ; Save r23
2273 std r24,saver24(r3) ; Save r24
2274 std r25,saver25(r3) ; Save r25
2275 std r26,saver26(r3) ; Save r26
2276 std r27,saver27(r3) ; Save r27
2277 std r28,saver28(r3) ; Save r28
2278 std r29,saver29(r3) ; Save r29
2279 mfxer r2 ; Get xer
2280 std r30,saver30(r3) ; Save r30
2281 std r31,saver31(r3) ; Save r31
2282 std r2,savexer(r3) ; Save xer
2283
2284 mflr r20 ; Get lr
2285 li r2,1 ; Set to 1
2286 stw r7,spcFlags(r5) ; Update the special flags
2287 mr r13,r3 ; Set current savearea
2288 mr r21,r4 ; Save r4
2289 sth r2,ppInvSeg(r5) ; Force a reload of the SRs
2290 mr r29,r5 ; Get the per_proc area
2291 mr r3,r4 ; Set MSR value we going to
2292 bl EXT(switchSegs) ; Go handle the segment registers/STB
2293 mr r3,r13 ; Set current savearea
2294 mr r4,r21 ; Restore r4
2295 mtlr r20 ; Set lr
2296
2297 ld r2,savexer(r3) ; Load xer
2298 ld r8,saver8(r3) ; Load r8
2299 ld r9,saver9(r3) ; Load r9
2300 ld r10,saver10(r3) ; Load r10
2301 mtxer r2 ; Restore xer
2302 ld r11,saver11(r3) ; Load r11
2303 ld r12,saver12(r3) ; Load r12
2304 ld r13,saver13(r3) ; Load r12
2305 ld r14,saver14(r3) ; Load r14
2306 ld r15,saver15(r3) ; Load r15
2307 ld r16,saver16(r3) ; Load r16
2308 ld r17,saver17(r3) ; Load r17
2309 ld r18,saver18(r3) ; Load r18
2310 ld r19,saver19(r3) ; Load r19
2311 ld r20,saver20(r3) ; Load r20
2312 ld r21,saver21(r3) ; Load r21
2313 ld r22,saver22(r3) ; Load r22
2314 ld r23,saver23(r3) ; Load r23
2315 ld r24,saver24(r3) ; Load r24
2316 ld r25,saver25(r3) ; Load r25
2317 ld r26,saver26(r3) ; Load r26
2318 ld r27,saver27(r3) ; Load r27
2319 ld r28,saver28(r3) ; Load r28
2320 ld r29,saver29(r3) ; Load r29
2321 ld r30,saver30(r3) ; Load r30
2322 ld r31,saver31(r3) ; Load r31
2323 ufpXnokey:
2324 mtsrr1 r4 ; Set srr1
2325 ld r0,famguestXr0(r13) ; Load r0
2326 ld r1,famguestXr1(r13) ; Load r1
2327 ld r2,famguestXr2(r13) ; Load r2
2328 ld r3,famguestXr3(r13) ; Load r3
2329 ld r4,famguestXr4(r13) ; Load r4
2330 ld r5,famguestXr5(r13) ; Load r5
2331 ld r6,famguestXr6(r13) ; Load r6
2332 ld r7,famguestXr7(r13) ; Load r7
2333 ufpXVMret:
2334 mfsprg r13,2 ; Restore R13
2335 mtcrf 0xFF,r11 ; Restore CR
2336 mfsprg r11,3 ; Restore R11
2337 rfid
2338