2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <db_machine_commands.h>
27 #include <mach_debug.h>
29 #include <ppc/proc_reg.h>
30 #include <ppc/exception.h>
31 #include <ppc/Performance.h>
32 #include <ppc/exception.h>
33 #include <mach/ppc/vm_param.h>
42 ; +--------+--------+--------+--------+--------+--------+--------+--------+
43 ; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
44 ; +--------+--------+--------+--------+--------+--------+--------+--------+
48 ; +--------+--------+--------+
49 ; |//////BB|BBBBBBBB|BBBB////| - SID - base
50 ; +--------+--------+--------+
54 ; +--------+--------+--------+
55 ; |////////|11111111|111111//| - SID - copy 1
56 ; +--------+--------+--------+
60 ; +--------+--------+--------+
61 ; |////////|//222222|22222222| - SID - copy 2
62 ; +--------+--------+--------+
66 ; +--------+--------+--------+
67 ; |//////33|33333333|33//////| - SID - copy 3 - not needed
68 ; +--------+--------+--------+ for 65 bit VPN
72 ; +--------+--------+--------+--------+--------+--------+--------+
73 ; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
74 ; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
77 ; +--------+--------+--------+--------+--------+--------+--------+
78 ; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
79 ; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
86 ; +--------+--------+--------+--------+--------+--------+--------+
87 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
88 ; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
90 ; 0 0 1 2 3 4 4 5 6 7 7
91 ; 0 8 6 4 2 0 8 6 4 2 9
92 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
93 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
94 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
98 /* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
100 * Maps a page or block into a pmap
102 * Returns 0 if add worked or the vaddr of the first overlap if not
104 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
106 * 1) bump mapping busy count
108 * 3) find mapping full path - finds all possible list previous elements
109 * 4) upgrade pmap to exclusive
110 * 5) add mapping to search list
116 * 11) drop mapping busy count
119 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
121 * 1) bump mapping busy count
123 * 3) find mapping full path - finds all possible list previous elements
124 * 4) upgrade pmap to exclusive
125 * 5) add mapping to search list
127 * 7) drop mapping busy count
132 .globl EXT(hw_add_map)
136 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
137 mflr r0 ; Save the link register
138 stw r17,FM_ARG0+0x00(r1) ; Save a register
139 stw r18,FM_ARG0+0x04(r1) ; Save a register
140 stw r19,FM_ARG0+0x08(r1) ; Save a register
141 mfsprg r19,2 ; Get feature flags
142 stw r20,FM_ARG0+0x0C(r1) ; Save a register
143 stw r21,FM_ARG0+0x10(r1) ; Save a register
144 mtcrf 0x02,r19 ; move pf64Bit cr6
145 stw r22,FM_ARG0+0x14(r1) ; Save a register
146 stw r23,FM_ARG0+0x18(r1) ; Save a register
147 stw r24,FM_ARG0+0x1C(r1) ; Save a register
148 stw r25,FM_ARG0+0x20(r1) ; Save a register
149 stw r26,FM_ARG0+0x24(r1) ; Save a register
150 stw r27,FM_ARG0+0x28(r1) ; Save a register
151 stw r28,FM_ARG0+0x2C(r1) ; Save a register
152 stw r29,FM_ARG0+0x30(r1) ; Save a register
153 stw r30,FM_ARG0+0x34(r1) ; Save a register
154 stw r31,FM_ARG0+0x38(r1) ; Save a register
155 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
158 lwz r11,pmapFlags(r3) ; Get pmaps flags
159 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
160 bne hamPanic ; Call not valid for guest shadow assist pmap
163 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
164 mr r28,r3 ; Save the pmap
165 mr r31,r4 ; Save the mapping
166 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
167 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
168 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
172 hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
173 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
175 hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
177 mr r17,r11 ; Save the MSR
178 xor r28,r28,r20 ; Convert the pmap to physical addressing
179 xor r31,r31,r21 ; Convert the mapping to physical addressing
181 la r3,pmapSXlk(r28) ; Point to the pmap search lock
182 bl sxlkShared ; Go get a shared lock on the mapping lists
183 mr. r3,r3 ; Did we get the lock?
184 lwz r24,mpFlags(r31) ; Pick up the flags
185 bne-- hamBadLock ; Nope...
187 li r21,0 ; Remember that we have the shared lock
190 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
191 ; here so that we will know the previous elements so we can dequeue them
195 hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
196 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
197 mr r3,r28 ; Pass in pmap to search
198 lhz r23,mpBSize(r31) ; Get the block size for later
199 mr r29,r4 ; Save top half of vaddr for later
200 mr r30,r5 ; Save bottom half of vaddr for later
203 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[16] - Take stamp before mapSearchFull
204 stw r0,0x6100+(16*16)+0x0(0) ; INSTRUMENT - Save it
205 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
206 stw r0,0x6100+(16*16)+0x4(0) ; INSTRUMENT - Save it
207 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
208 stw r0,0x6100+(16*16)+0x8(0) ; INSTRUMENT - Save it
209 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
210 stw r0,0x6100+(16*16)+0xC(0) ; INSTRUMENT - Save it
213 bl EXT(mapSearchFull) ; Go see if we can find it
216 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[14] - Take stamp after mapSearchFull
217 stw r0,0x6100+(17*16)+0x0(0) ; INSTRUMENT - Save it
218 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
219 stw r0,0x6100+(17*16)+0x4(0) ; INSTRUMENT - Save it
220 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
221 stw r0,0x6100+(17*16)+0x8(0) ; INSTRUMENT - Save it
222 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
223 stw r0,0x6100+(17*16)+0xC(0) ; INSTRUMENT - Save it
226 rlwinm r0,r24,0,mpType ; Isolate the mapping type
227 rlwinm r23,r23,12,0,19 ; Convert standard block size to bytes
228 cmplwi r0,mpNest ; Is this a nested type?
229 cmplwi cr1,r0,mpLinkage ; Linkage type?
230 cror cr0_eq,cr1_eq,cr0_eq ; Nested or linkage type?
231 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
232 li r22,0 ; Assume high part of size is 0
233 bne++ hamNoNest ; This is not a nested or linkage type
235 rlwinm r22,r23,16,16,31 ; Convert partially converted size to segments
236 rlwinm r23,r23,16,0,3 ; Finish shift
238 hamNoNest: add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
239 mr. r3,r3 ; Did we find a mapping here?
240 or r0,r0,r30 ; Make sure a carry will propagate all the way in 64-bit
241 crmove cr5_eq,cr0_eq ; Remember that if we found the mapping
242 addc r9,r0,r23 ; Add size to get last page in new range
243 or. r0,r4,r5 ; Are we beyond the end?
244 adde r8,r29,r22 ; Add the rest of the length on
245 bne-- cr5,hamOverlay ; Yeah, this is no good, can not double map...
246 rlwinm r9,r9,0,0,31 ; Clean top half of sum
247 beq++ hamFits ; We are at the end...
249 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
250 cmplw r8,r4 ; Is our end before the next (top part)
251 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
252 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
254 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
257 ; Here we try to convert to an exclusive lock. This will fail if someone else
260 hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
261 la r3,pmapSXlk(r28) ; Point to the pmap search lock
263 bne-- hamGotX ; We already have the exclusive...
265 bl sxlkPromote ; Try to promote shared to exclusive
266 mr. r3,r3 ; Could we?
267 beq++ hamGotX ; Yeah...
270 ; Since we could not promote our lock, we need to convert to it.
271 ; That means that we drop the shared lock and wait to get it
272 ; exclusive. Since we release the lock, we need to do the look up
276 la r3,pmapSXlk(r28) ; Point to the pmap search lock
277 bl sxlkConvert ; Convert shared to exclusive
278 mr. r3,r3 ; Could we?
279 bne-- hamBadLock ; Nope, we must have timed out...
281 li r21,1 ; Remember that we have the exclusive lock
282 b hamRescan ; Go look again...
288 mfspr r3,pmc1 ; INSTRUMENT - saveinstr[18] - Take stamp before mapSearchFull
289 stw r3,0x6100+(18*16)+0x0(0) ; INSTRUMENT - Save it
290 mfspr r3,pmc2 ; INSTRUMENT - Get stamp
291 stw r3,0x6100+(18*16)+0x4(0) ; INSTRUMENT - Save it
292 mfspr r3,pmc3 ; INSTRUMENT - Get stamp
293 stw r3,0x6100+(18*16)+0x8(0) ; INSTRUMENT - Save it
294 mfspr r3,pmc4 ; INSTRUMENT - Get stamp
295 stw r4,0x6100+(18*16)+0xC(0) ; INSTRUMENT - Save it
297 mr r3,r28 ; Get the pmap to insert into
298 mr r4,r31 ; Point to the mapping
299 bl EXT(mapInsert) ; Insert the mapping into the list
302 mfspr r4,pmc1 ; INSTRUMENT - saveinstr[19] - Take stamp before mapSearchFull
303 stw r4,0x6100+(19*16)+0x0(0) ; INSTRUMENT - Save it
304 mfspr r4,pmc2 ; INSTRUMENT - Get stamp
305 stw r4,0x6100+(19*16)+0x4(0) ; INSTRUMENT - Save it
306 mfspr r4,pmc3 ; INSTRUMENT - Get stamp
307 stw r4,0x6100+(19*16)+0x8(0) ; INSTRUMENT - Save it
308 mfspr r4,pmc4 ; INSTRUMENT - Get stamp
309 stw r4,0x6100+(19*16)+0xC(0) ; INSTRUMENT - Save it
312 rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table
313 lhz r8,mpSpace(r31) ; Get the address space
314 lwz r11,lgpPcfg(r11) ; Get the page config
315 mfsdr1 r7 ; Get the hash table base/bounds
316 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
318 andi. r0,r24,mpType ; Is this a normal mapping?
320 rlwimi r8,r8,14,4,17 ; Double address space
321 rlwinm r9,r30,0,4,31 ; Clear segment
322 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
323 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
324 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
325 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
326 addi r4,r4,1 ; Bump up the mapped page count
327 srw r9,r9,r11 ; Isolate just the page index
328 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
329 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
330 xor r9,r9,r10 ; Get the hash to the PTEG
332 bne-- hamDoneNP ; Not a normal mapping, therefore, no physent...
334 bl mapPhysFindLock ; Go find and lock the physent
336 bt++ pf64Bitb,ham64 ; This is 64-bit...
338 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
339 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
340 slwi r9,r9,6 ; Make PTEG offset
341 ori r7,r7,0xFFC0 ; Stick in the bottom part
342 rlwinm r12,r11,0,~ppFlags ; Clean it up
343 and r9,r9,r7 ; Wrap offset into table
344 mr r4,r31 ; Set the link to install
345 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
346 stw r12,mpAlias+4(r31) ; Move to the mapping
347 bl mapPhyCSet32 ; Install the link
348 b hamDone ; Go finish up...
352 ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer
353 subfic r7,r7,46 ; Get number of leading zeros
354 eqv r4,r4,r4 ; Get all ones
355 ld r11,ppLink(r3) ; Get the alias chain pointer
356 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
357 srd r4,r4,r7 ; Get the wrap mask
358 sldi r9,r9,7 ; Change hash to PTEG offset
359 andc r11,r11,r0 ; Clean out the lock and flags
360 and r9,r9,r4 ; Wrap to PTEG
362 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
363 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
365 bl mapPhyCSet64 ; Install the link
367 hamDone: bl mapPhysUnlock ; Unlock the physent chain
369 hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
370 bl sxlkUnlock ; Unlock the search list
372 mr r3,r31 ; Get the mapping pointer
373 bl mapDropBusy ; Drop the busy count
375 li r3,0 ; Set successful return
376 li r4,0 ; Set successful return
378 hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
380 mtmsr r17 ; Restore enables/translation/etc.
382 b hamReturnC ; Join common...
384 hamR64: mtmsrd r17 ; Restore enables/translation/etc.
389 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[20] - Take stamp before mapSearchFull
390 stw r0,0x6100+(20*16)+0x0(0) ; INSTRUMENT - Save it
391 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
392 stw r0,0x6100+(20*16)+0x4(0) ; INSTRUMENT - Save it
393 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
394 stw r0,0x6100+(20*16)+0x8(0) ; INSTRUMENT - Save it
395 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
396 stw r0,0x6100+(20*16)+0xC(0) ; INSTRUMENT - Save it
398 lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
399 lwz r17,FM_ARG0+0x00(r1) ; Save a register
400 lwz r18,FM_ARG0+0x04(r1) ; Save a register
401 lwz r19,FM_ARG0+0x08(r1) ; Save a register
402 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
403 mtlr r0 ; Restore the return
404 lwz r21,FM_ARG0+0x10(r1) ; Save a register
405 lwz r22,FM_ARG0+0x14(r1) ; Save a register
406 lwz r23,FM_ARG0+0x18(r1) ; Save a register
407 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
408 lwz r25,FM_ARG0+0x20(r1) ; Save a register
409 lwz r26,FM_ARG0+0x24(r1) ; Save a register
410 lwz r27,FM_ARG0+0x28(r1) ; Save a register
411 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
412 lwz r29,FM_ARG0+0x30(r1) ; Save a register
413 lwz r30,FM_ARG0+0x34(r1) ; Save a register
414 lwz r31,FM_ARG0+0x38(r1) ; Save a register
415 lwz r1,0(r1) ; Pop the stack
422 hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
423 li r0,mpC|mpR ; Get a mask to turn off RC bits
424 lwz r23,mpFlags(r31) ; Get the requested flags
425 lwz r20,mpVAddr(r3) ; Get the overlay address
426 lwz r8,mpVAddr(r31) ; Get the requested address
427 lwz r21,mpVAddr+4(r3) ; Get the overlay address
428 lwz r9,mpVAddr+4(r31) ; Get the requested address
429 lhz r10,mpBSize(r3) ; Get the overlay length
430 lhz r11,mpBSize(r31) ; Get the requested length
431 lwz r24,mpPAddr(r3) ; Get the overlay physical address
432 lwz r25,mpPAddr(r31) ; Get the requested physical address
433 andc r21,r21,r0 ; Clear RC bits
434 andc r9,r9,r0 ; Clear RC bits
436 la r3,pmapSXlk(r28) ; Point to the pmap search lock
437 bl sxlkUnlock ; Unlock the search list
439 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
440 mr r3,r20 ; Save the top of the colliding address
441 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
443 bne++ hamRemv ; Removing, go say so so we help...
445 cmplw r20,r8 ; High part of vaddr the same?
446 cmplw cr1,r21,r9 ; Low part?
447 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
449 cmplw r10,r11 ; Size the same?
450 cmplw cr1,r24,r25 ; Physical address?
451 crand cr5_eq,cr5_eq,cr0_eq ; Remember
452 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
454 xor r23,r23,r22 ; Compare mapping flag words
455 andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same?
456 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
457 bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash...
459 ori r4,r4,mapRtMapDup ; Set duplicate
460 b hamReturn ; And leave...
462 hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
463 b hamReturn ; Come back yall...
465 hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do
466 b hamReturn ; Join common epilog code
470 hamBadLock: li r3,0 ; Set lock time out error code
471 li r4,mapRtBadLk ; Set lock time out error code
472 b hamReturn ; Leave....
474 hamPanic: lis r0,hi16(Choke) ; System abend
475 ori r0,r0,lo16(Choke) ; System abend
476 li r3,failMapping ; Show that we failed some kind of mapping thing
483 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
485 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
486 * a 64-bit quantity, it is a long long so it is in R4 and R5.
488 * We return the virtual address of the removed mapping as a
491 * Note that this is designed to be called from 32-bit mode with a stack.
493 * We disable translation and all interruptions here. This keeps is
494 * from having to worry about a deadlock due to having anything locked
495 * and needing it to process a fault.
497 * Note that this must be done with both interruptions off and VM off
499 * Remove mapping via pmap, regular page, no pte
502 * 2) find mapping full path - finds all possible list previous elements
503 * 4) upgrade pmap to exclusive
504 * 3) bump mapping busy count
505 * 5) remove mapping from search list
508 * 8) remove from physent
510 * 10) drop mapping busy count
511 * 11) drain mapping busy count
514 * Remove mapping via pmap, regular page, with pte
517 * 2) find mapping full path - finds all possible list previous elements
518 * 3) upgrade lock to exclusive
519 * 4) bump mapping busy count
521 * 6) invalidate pte and tlbie
522 * 7) atomic merge rc into physent
524 * 9) remove mapping from search list
527 * 12) remove from physent
529 * 14) drop mapping busy count
530 * 15) drain mapping busy count
533 * Remove mapping via pmap, I/O or block
536 * 2) find mapping full path - finds all possible list previous elements
537 * 3) upgrade lock to exclusive
538 * 4) bump mapping busy count
539 * 5) mark remove-in-progress
540 * 6) check and bump remove chunk cursor if needed
542 * 8) if something to invalidate, go to step 11
545 * 10) return with mapRtRemove to force higher level to call again
548 * 12) invalidate ptes, no tlbie
550 * 14) repeat 11 - 13 for all pages in chunk
551 * 15) if not final chunk, go to step 9
552 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
553 * 17) lock pmap share
554 * 18) find mapping full path - finds all possible list previous elements
555 * 19) upgrade lock to exclusive
556 * 20) remove mapping from search list
557 * 21) drop mapping busy count
558 * 22) drain mapping busy count
563 .globl EXT(hw_rem_map)
568 ; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
569 ; THE HW_PURGE_* ROUTINES ALSO
572 #define hrmStackSize ((31-15+1)*4)+4
573 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
574 mflr r0 ; Save the link register
575 stw r15,FM_ARG0+0x00(r1) ; Save a register
576 stw r16,FM_ARG0+0x04(r1) ; Save a register
577 stw r17,FM_ARG0+0x08(r1) ; Save a register
578 stw r18,FM_ARG0+0x0C(r1) ; Save a register
579 stw r19,FM_ARG0+0x10(r1) ; Save a register
580 mfsprg r19,2 ; Get feature flags
581 stw r20,FM_ARG0+0x14(r1) ; Save a register
582 stw r21,FM_ARG0+0x18(r1) ; Save a register
583 mtcrf 0x02,r19 ; move pf64Bit cr6
584 stw r22,FM_ARG0+0x1C(r1) ; Save a register
585 stw r23,FM_ARG0+0x20(r1) ; Save a register
586 stw r24,FM_ARG0+0x24(r1) ; Save a register
587 stw r25,FM_ARG0+0x28(r1) ; Save a register
588 stw r26,FM_ARG0+0x2C(r1) ; Save a register
589 stw r27,FM_ARG0+0x30(r1) ; Save a register
590 stw r28,FM_ARG0+0x34(r1) ; Save a register
591 stw r29,FM_ARG0+0x38(r1) ; Save a register
592 stw r30,FM_ARG0+0x3C(r1) ; Save a register
593 stw r31,FM_ARG0+0x40(r1) ; Save a register
594 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
595 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
598 lwz r11,pmapFlags(r3) ; Get pmaps flags
599 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
600 bne hrmPanic ; Call not valid for guest shadow assist pmap
603 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
604 lwz r9,pmapvr+4(r3) ; Get conversion mask
607 hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
610 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
612 xor r28,r3,r9 ; Convert the pmap to physical addressing
615 ; Here is where we join in from the hw_purge_* routines
618 hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags
619 mfsprg r19,2 ; Get feature flags again (for alternate entries)
621 mr r17,r11 ; Save the MSR
622 mr r29,r4 ; Top half of vaddr
623 mr r30,r5 ; Bottom half of vaddr
625 rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active?
626 bne-- hrmGuest ; Yes, handle specially
628 la r3,pmapSXlk(r28) ; Point to the pmap search lock
629 bl sxlkShared ; Go get a shared lock on the mapping lists
630 mr. r3,r3 ; Did we get the lock?
631 bne-- hrmBadLock ; Nope...
634 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
635 ; here so that we will know the previous elements so we can dequeue them
636 ; later. Note: we get back mpFlags in R7.
639 mr r3,r28 ; Pass in pmap to search
640 mr r4,r29 ; High order of address
641 mr r5,r30 ; Low order of address
642 bl EXT(mapSearchFull) ; Go see if we can find it
644 andi. r0,r7,mpPerm ; Mapping marked permanent?
645 crmove cr5_eq,cr0_eq ; Remember permanent marking
646 mr r20,r7 ; Remember mpFlags
647 mr. r31,r3 ; Did we? (And remember mapping address for later)
648 mr r15,r4 ; Save top of next vaddr
649 mr r16,r5 ; Save bottom of next vaddr
650 beq-- hrmNotFound ; Nope, not found...
652 bf-- cr5_eq,hrmPerm ; This one can't be removed...
654 ; Here we try to promote to an exclusive lock. This will fail if someone else
658 la r3,pmapSXlk(r28) ; Point to the pmap search lock
659 bl sxlkPromote ; Try to promote shared to exclusive
660 mr. r3,r3 ; Could we?
661 beq++ hrmGotX ; Yeah...
664 ; Since we could not promote our lock, we need to convert to it.
665 ; That means that we drop the shared lock and wait to get it
666 ; exclusive. Since we release the lock, we need to do the look up
670 la r3,pmapSXlk(r28) ; Point to the pmap search lock
671 bl sxlkConvert ; Convert shared to exclusive
672 mr. r3,r3 ; Could we?
673 bne-- hrmBadLock ; Nope, we must have timed out...
675 mr r3,r28 ; Pass in pmap to search
676 mr r4,r29 ; High order of address
677 mr r5,r30 ; Low order of address
678 bl EXT(mapSearchFull) ; Rescan the list
680 andi. r0,r7,mpPerm ; Mapping marked permanent?
681 crmove cr5_eq,cr0_eq ; Remember permanent marking
682 mr. r31,r3 ; Did we lose it when we converted?
683 mr r20,r7 ; Remember mpFlags
684 mr r15,r4 ; Save top of next vaddr
685 mr r16,r5 ; Save bottom of next vaddr
686 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
688 bf-- cr5_eq,hrmPerm ; This one can't be removed...
691 ; We have an exclusive lock on the mapping chain. And we
692 ; also have the busy count bumped in the mapping so it can
696 hrmGotX: mr r3,r31 ; Get the mapping
697 bl mapBumpBusy ; Bump up the busy count
700 ; Invalidate any PTEs associated with this
701 ; mapping (more than one if a block) and accumulate the reference
704 ; Here is also where we need to split 32- and 64-bit processing
707 lwz r21,mpPte(r31) ; Grab the offset to the PTE
708 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
709 mfsdr1 r29 ; Get the hash table base and size
711 rlwinm r0,r20,0,mpType ; Isolate mapping type
712 cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping
713 cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type
715 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
716 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
717 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
718 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
719 crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping
720 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
721 andc r29,r29,r2 ; Clean up hash table base
722 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
723 mr r30,r23 ; Move the now merged vaddr to the correct register
724 add r26,r29,r21 ; Point to the PTEG slot
726 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
728 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
729 beq- cr5,hrmBlock32 ; Go treat block specially...
730 subfic r9,r9,-4 ; Get the PCA entry offset
731 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
732 add r7,r9,r29 ; Point to the PCA slot
734 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
736 lwz r21,mpPte(r31) ; Get the quick pointer again
737 lwz r5,0(r26) ; Get the top of PTE
739 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
740 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
741 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
742 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
743 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
745 stw r5,0(r26) ; Invalidate the PTE
747 li r9,tlbieLock ; Get the TLBIE lock
749 sync ; Make sure the invalid PTE is actually in memory
751 hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
752 mr. r5,r5 ; Is it locked?
753 li r5,1 ; Get locked indicator
754 bne- hrmPtlb32 ; It is locked, go spin...
755 stwcx. r5,0,r9 ; Try to get it
756 bne- hrmPtlb32 ; We was beat...
758 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
760 tlbie r30 ; Invalidate it all corresponding TLB entries
762 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
764 eieio ; Make sure that the tlbie happens first
765 tlbsync ; Wait for everyone to catch up
766 sync ; Make sure of it all
768 hrmNTlbs: li r0,0 ; Clear this
769 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
770 stw r0,tlbieLock(0) ; Clear the tlbie lock
771 lis r0,0x8000 ; Get bit for slot 0
772 eieio ; Make sure those RC bit have been stashed in PTE
774 srw r0,r0,r2 ; Get the allocation hash mask
775 lwz r22,4(r26) ; Get the latest reference and change bits
776 or r6,r6,r0 ; Show that this slot is free
779 eieio ; Make sure all updates come first
780 stw r6,0(r7) ; Unlock the PTEG
783 ; Now, it is time to remove the mapping and unlock the chain.
784 ; But first, we need to make sure no one else is using this
785 ; mapping so we drain the busy now
788 hrmPysDQ32: mr r3,r31 ; Point to the mapping
789 bl mapDrainBusy ; Go wait until mapping is unused
791 mr r3,r28 ; Get the pmap to remove from
792 mr r4,r31 ; Point to the mapping
793 bl EXT(mapRemove) ; Remove the mapping from the list
795 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
796 rlwinm r0,r20,0,mpType ; Isolate mapping type
797 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
798 la r3,pmapSXlk(r28) ; Point to the pmap search lock
799 subi r4,r4,1 ; Drop down the mapped page count
800 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
801 bl sxlkUnlock ; Unlock the search list
803 bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done...
805 bl mapPhysFindLock ; Go find and lock the physent
807 lwz r9,ppLink+4(r3) ; Get first mapping
809 mr r4,r22 ; Get the RC bits we just got
810 bl mapPhysMerge ; Go merge the RC bits
812 rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer
814 cmplw r9,r31 ; Are we the first on the list?
815 bne- hrmNot1st ; Nope...
818 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
819 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
820 bl mapPhyCSet32 ; Go set the physent link and preserve flags
822 b hrmPhyDQd ; Join up and unlock it all...
826 hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
827 and r8,r8,r31 ; Get back to a page
828 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
830 la r3,pmapSXlk(r28) ; Point to the pmap search lock
831 bl sxlkUnlock ; Unlock the search list
833 xor r3,r31,r8 ; Flip mapping address to virtual
834 ori r3,r3,mapRtPerm ; Set permanent mapping error
837 hrmBadLock: li r3,mapRtBadLk ; Set bad lock
841 la r3,pmapSXlk(r28) ; Point to the pmap search lock
842 bl sxlkUnlock ; Unlock the search list
845 mr r3,r31 ; Point to the mapping
846 bl mapDropBusy ; Drop the busy here since we need to come back
847 li r3,mapRtRemove ; Say we are still removing this
853 la r3,pmapSXlk(r28) ; Point to the pmap search lock
854 bl sxlkUnlock ; Unlock the search list
855 li r3,mapRtNotFnd ; No mapping found
857 hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
859 mtmsr r17 ; Restore enables/translation/etc.
861 b hrmRetnCmn ; Join the common return code...
863 hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
865 b hrmRetnCmn ; Join the common return code...
869 hrmNot1st: mr. r8,r9 ; Remember and test current node
870 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
871 lwz r9,mpAlias+4(r9) ; Chain to the next
872 cmplw r9,r31 ; Is this us?
873 bne- hrmNot1st ; Not us...
875 lwz r9,mpAlias+4(r9) ; Get our forward pointer
876 stw r9,mpAlias+4(r8) ; Unchain us
880 hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
882 hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
883 mr r3,r31 ; Copy the pointer to the mapping
884 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
885 bl mapDrainBusy ; Go wait until mapping is unused
887 xor r3,r31,r8 ; Flip mapping address to virtual
889 mtmsr r17 ; Restore enables/translation/etc.
892 hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
893 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
894 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
895 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
896 mr. r6,r6 ; Should we pass back the "next" vaddr?
897 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
898 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
899 mtlr r0 ; Restore the return
901 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
902 beq hrmNoNextAdr ; Do not pass back the next vaddr...
903 stw r15,0(r6) ; Pass back the top of the next vaddr
904 stw r16,4(r6) ; Pass back the bottom of the next vaddr
907 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
908 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
909 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
910 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
911 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
912 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
913 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
914 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
915 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
916 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
917 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
918 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
919 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
920 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
921 lwz r1,0(r1) ; Pop the stack
925 ; Here is where we come when all is lost. Somehow, we failed a mapping function
926 ; that must work... All hope is gone. Alas, we die.......
929 hrmPanic: lis r0,hi16(Choke) ; System abend
930 ori r0,r0,lo16(Choke) ; System abend
931 li r3,failMapping ; Show that we failed some kind of mapping thing
936 ; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
937 ; in the range. Then, if we did not finish, return a code indicating that we need to
938 ; be called again. Eventually, we will finish and then, we will do a TLBIE for each
939 ; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
941 ; A potential speed up is that we stop the invalidate loop once we have walked through
942 ; the hash table once. This really is not worth the trouble because we need to have
943 ; mapped 1/2 of physical RAM in an individual block. Way unlikely.
945 ; We should rethink this and see if we think it will be faster to check PTE and
946 ; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
952 lhz r23,mpSpace(r31) ; Get the address space hash
953 lhz r25,mpBSize(r31) ; Get the number of pages in block
954 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
955 ori r0,r20,mpRIP ; Turn on the remove in progress flag
956 mfsdr1 r29 ; Get the hash table base and size
957 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
958 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
959 sub r4,r25,r9 ; Get number of pages left
960 cmplw cr1,r9,r25 ; Have we already hit the end?
961 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
962 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
963 rlwinm r26,r29,16,7,15 ; Get the hash table size
964 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
965 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
966 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
967 cmpwi cr7,r2,0 ; Remember if we have finished
968 slwi r0,r9,12 ; Make cursor into page offset
969 or r24,r24,r23 ; Get full hash
970 and r4,r4,r2 ; If more than a chunk, bring this back to 0
971 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
972 add r27,r27,r0 ; Adjust vaddr to start of current chunk
973 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
975 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
977 la r3,pmapSXlk(r28) ; Point to the pmap search lock
978 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
979 bl sxlkUnlock ; Unlock the search list while we are invalidating
981 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
982 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
983 xor r24,r24,r8 ; Get the proper VSID
984 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
985 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
986 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
987 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
988 add r22,r22,r30 ; Get end address (in PTEG units)
990 hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
991 xor r23,r23,r24 ; Hash it
992 and r23,r23,r26 ; Wrap it into the table
993 rlwinm r3,r23,28,4,29 ; Change to PCA offset
994 subfic r3,r3,-4 ; Get the PCA entry offset
995 add r7,r3,r29 ; Point to the PCA slot
996 cmplw cr5,r30,r22 ; Check if we reached the end of the range
997 addi r30,r30,64 ; bump to the next vaddr
999 bl mapLockPteg ; Lock the PTEG
1001 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
1002 add r5,r23,r29 ; Point to the PTEG
1003 li r0,0 ; Set an invalid PTE value
1004 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
1005 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1006 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1008 bf 0,hrmSlot0 ; No autogen here
1009 stw r0,0x00(r5) ; Invalidate PTE
1011 hrmSlot0: bf 1,hrmSlot1 ; No autogen here
1012 stw r0,0x08(r5) ; Invalidate PTE
1014 hrmSlot1: bf 2,hrmSlot2 ; No autogen here
1015 stw r0,0x10(r5) ; Invalidate PTE
1017 hrmSlot2: bf 3,hrmSlot3 ; No autogen here
1018 stw r0,0x18(r5) ; Invalidate PTE
1020 hrmSlot3: bf 4,hrmSlot4 ; No autogen here
1021 stw r0,0x20(r5) ; Invalidate PTE
1023 hrmSlot4: bf 5,hrmSlot5 ; No autogen here
1024 stw r0,0x28(r5) ; Invalidate PTE
1026 hrmSlot5: bf 6,hrmSlot6 ; No autogen here
1027 stw r0,0x30(r5) ; Invalidate PTE
1029 hrmSlot6: bf 7,hrmSlot7 ; No autogen here
1030 stw r0,0x38(r5) ; Invalidate PTE
1032 hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1033 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1034 andc r6,r6,r0 ; Turn off all the old autogen bits
1036 hrmBNone32: eieio ; Make sure all updates come first
1038 stw r6,0(r7) ; Unlock and set the PCA
1040 bne+ cr5,hrmBInv32 ; Go invalidate the next...
1042 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1044 mr r3,r31 ; Copy the pointer to the mapping
1045 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1047 sync ; Make sure memory is consistent
1049 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
1050 li r6,63 ; Assume full invalidate for now
1051 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1052 andc r6,r6,r5 ; Clear max if we have less to do
1053 and r5,r25,r5 ; Clear count if we have more than max
1054 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1055 li r7,tlbieLock ; Get the TLBIE lock
1056 or r5,r5,r6 ; Get number of TLBIEs needed
1058 hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1059 mr. r2,r2 ; Is it locked?
1060 li r2,1 ; Get our lock value
1061 bne- hrmBTLBlck ; It is locked, go wait...
1062 stwcx. r2,0,r7 ; Try to get it
1063 bne- hrmBTLBlck ; We was beat...
1065 hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1066 tlbie r27 ; Invalidate it everywhere
1067 addi r27,r27,0x1000 ; Up to the next page
1068 bge+ hrmBTLBi ; Make sure we have done it all...
1070 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1071 li r2,0 ; Lock clear value
1073 sync ; Make sure all is quiet
1074 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1076 eieio ; Make sure that the tlbie happens first
1077 tlbsync ; Wait for everyone to catch up
1078 sync ; Wait for quiet again
1080 hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1082 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1083 bl sxlkShared ; Go get a shared lock on the mapping lists
1084 mr. r3,r3 ; Did we get the lock?
1085 bne- hrmPanic ; Nope...
1087 lwz r4,mpVAddr(r31) ; High order of address
1088 lwz r5,mpVAddr+4(r31) ; Low order of address
1089 mr r3,r28 ; Pass in pmap to search
1090 mr r29,r4 ; Save this in case we need it (only promote fails)
1091 mr r30,r5 ; Save this in case we need it (only promote fails)
1092 bl EXT(mapSearchFull) ; Go see if we can find it
1094 mr. r3,r3 ; Did we? (And remember mapping address for later)
1095 mr r15,r4 ; Save top of next vaddr
1096 mr r16,r5 ; Save bottom of next vaddr
1097 beq- hrmPanic ; Nope, not found...
1099 cmplw r3,r31 ; Same mapping?
1100 bne- hrmPanic ; Not good...
1102 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1103 bl sxlkPromote ; Try to promote shared to exclusive
1104 mr. r3,r3 ; Could we?
1105 mr r3,r31 ; Restore the mapping pointer
1106 beq+ hrmBDone1 ; Yeah...
1108 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1109 bl sxlkConvert ; Convert shared to exclusive
1110 mr. r3,r3 ; Could we?
1111 bne-- hrmPanic ; Nope, we must have timed out...
1113 mr r3,r28 ; Pass in pmap to search
1114 mr r4,r29 ; High order of address
1115 mr r5,r30 ; Low order of address
1116 bl EXT(mapSearchFull) ; Rescan the list
1118 mr. r3,r3 ; Did we lose it when we converted?
1119 mr r15,r4 ; Save top of next vaddr
1120 mr r16,r5 ; Save bottom of next vaddr
1121 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1123 hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1125 mr r3,r28 ; Get the pmap to remove from
1126 mr r4,r31 ; Point to the mapping
1127 bl EXT(mapRemove) ; Remove the mapping from the list
1129 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1130 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1131 subi r4,r4,1 ; Drop down the mapped page count
1132 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1133 bl sxlkUnlock ; Unlock the search list
1135 b hrmRetn32 ; We are all done, get out...
1138 ; Here we handle the 64-bit version of hw_rem_map
1143 hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
1144 beq-- cr5,hrmBlock64 ; Go treat block specially...
1145 subfic r9,r9,-4 ; Get the PCA entry offset
1146 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1147 add r7,r9,r29 ; Point to the PCA slot
1149 bl mapLockPteg ; Go lock up the PTEG
1151 lwz r21,mpPte(r31) ; Get the quick pointer again
1152 ld r5,0(r26) ; Get the top of PTE
1154 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
1155 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
1156 sldi r23,r5,16 ; Shift AVPN up to EA format
1157 // **** Need to adjust above shift based on the page size - large pages need to shift a bit more
1158 rldicr r5,r5,0,62 ; Clear the valid bit
1159 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1160 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1161 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1163 std r5,0(r26) ; Invalidate the PTE
1165 li r9,tlbieLock ; Get the TLBIE lock
1167 sync ; Make sure the invalid PTE is actually in memory
1169 hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1170 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1171 mr. r5,r5 ; Is it locked?
1172 li r5,1 ; Get locked indicator
1173 bne-- hrmPtlb64w ; It is locked, go spin...
1174 stwcx. r5,0,r9 ; Try to get it
1175 bne-- hrmPtlb64 ; We was beat...
1177 tlbie r23 ; Invalidate all corresponding TLB entries
1179 eieio ; Make sure that the tlbie happens first
1180 tlbsync ; Wait for everyone to catch up
1182 ptesync ; Make sure of it all
1183 li r0,0 ; Clear this
1184 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1185 stw r0,tlbieLock(0) ; Clear the tlbie lock
1186 oris r0,r0,0x8000 ; Assume slot 0
1188 srw r0,r0,r2 ; Get slot mask to deallocate
1190 lwz r22,12(r26) ; Get the latest reference and change bits
1191 or r6,r6,r0 ; Make the guy we killed free
1194 eieio ; Make sure all updates come first
1196 stw r6,0(r7) ; Unlock and change the PCA
1198 hrmPysDQ64: mr r3,r31 ; Point to the mapping
1199 bl mapDrainBusy ; Go wait until mapping is unused
1201 mr r3,r28 ; Get the pmap to remove from
1202 mr r4,r31 ; Point to the mapping
1203 bl EXT(mapRemove) ; Remove the mapping from the list
1205 rlwinm r0,r20,0,mpType ; Isolate mapping type
1206 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
1207 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1208 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1209 subi r4,r4,1 ; Drop down the mapped page count
1210 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1211 bl sxlkUnlock ; Unlock the search list
1213 bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done...
1215 bl mapPhysFindLock ; Go find and lock the physent
1217 li r0,ppLFAmask ; Get mask to clean up mapping pointer
1218 ld r9,ppLink(r3) ; Get first mapping
1219 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1220 mr r4,r22 ; Get the RC bits we just got
1222 bl mapPhysMerge ; Go merge the RC bits
1224 andc r9,r9,r0 ; Clean up the mapping pointer
1226 cmpld r9,r31 ; Are we the first on the list?
1227 bne-- hrmNot1st64 ; Nope...
1230 ld r4,mpAlias(r31) ; Get our forward pointer
1232 std r9,mpAlias(r31) ; Make sure we are off the chain
1233 bl mapPhyCSet64 ; Go set the physent link and preserve flags
1235 b hrmPhyDQd64 ; Join up and unlock it all...
1237 hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1238 stwcx. r5,0,r5 ; Clear the pending reservation
1241 hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1242 mr. r5,r5 ; is it locked?
1243 beq++ hrmPtlb64 ; Nope...
1244 b hrmPtlb64x ; Sniff some more...
1249 mr. r8,r9 ; Remember and test current node
1250 beq-- hrmPhyDQd64 ; Could not find our node...
1251 ld r9,mpAlias(r9) ; Chain to the next
1252 cmpld r9,r31 ; Is this us?
1253 bne-- hrmNot1st64 ; Not us...
1255 ld r9,mpAlias(r9) ; Get our forward pointer
1256 std r9,mpAlias(r8) ; Unchain us
1261 bl mapPhysUnlock ; Unlock the physent chain
1263 hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1264 mr r3,r31 ; Copy the pointer to the mapping
1265 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1266 bl mapDrainBusy ; Go wait until mapping is unused
1268 xor r3,r31,r8 ; Flip mapping address to virtual
1270 mtmsrd r17 ; Restore enables/translation/etc.
1273 b hrmRetnCmn ; Join the common return path...
1277 ; Check hrmBlock32 for comments.
1283 lhz r24,mpSpace(r31) ; Get the address space hash
1284 lhz r25,mpBSize(r31) ; Get the number of pages in block
1285 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
1286 ori r0,r20,mpRIP ; Turn on the remove in progress flag
1287 mfsdr1 r29 ; Get the hash table base and size
1288 ld r27,mpVAddr(r31) ; Get the base vaddr
1289 rlwinm r5,r29,0,27,31 ; Isolate the size
1290 sub r4,r25,r9 ; Get number of pages left
1291 cmplw cr1,r9,r25 ; Have we already hit the end?
1292 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1293 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1294 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1295 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1296 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1297 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1298 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1299 srdi r27,r27,12 ; Change address into page index
1300 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1301 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1303 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1305 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1306 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1307 bl sxlkUnlock ; Unlock the search list while we are invalidating
1309 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1310 eqv r26,r26,r26 ; Get all foxes here
1311 rldimi r24,r24,28,8 ; Make a couple copies up higher
1312 rldicr r29,r29,0,47 ; Isolate just the hash table base
1313 subfic r5,r5,46 ; Get number of leading zeros
1314 srd r26,r26,r5 ; Shift the size bits over
1315 mr r30,r27 ; Get start of chunk to invalidate
1316 rldicr r26,r26,0,56 ; Make length in PTEG units
1317 add r22,r4,r30 ; Get end page number
1319 hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1320 rldicr r0,r0,0,49 ; Clean all but segment portion
1321 rlwinm r2,r30,0,16,31 ; Get the current page index
1322 xor r0,r0,r24 ; Form VSID
1323 xor r8,r2,r0 ; Hash the vaddr
1324 sldi r8,r8,7 ; Make into PTEG offset
1325 and r23,r8,r26 ; Wrap into the hash table
1326 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1327 subfic r3,r3,-4 ; Get the PCA entry offset
1328 add r7,r3,r29 ; Point to the PCA slot
1330 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1332 bl mapLockPteg ; Lock the PTEG
1334 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1335 add r5,r23,r29 ; Point to the PTEG
1336 li r0,0 ; Set an invalid PTE value
1337 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1338 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1339 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1342 bf 0,hrmSlot0s ; No autogen here
1343 std r0,0x00(r5) ; Invalidate PTE
1345 hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1346 std r0,0x10(r5) ; Invalidate PTE
1348 hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1349 std r0,0x20(r5) ; Invalidate PTE
1351 hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1352 std r0,0x30(r5) ; Invalidate PTE
1354 hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1355 std r0,0x40(r5) ; Invalidate PTE
1357 hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1358 std r0,0x50(r5) ; Invalidate PTE
1360 hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1361 std r0,0x60(r5) ; Invalidate PTE
1363 hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1364 std r0,0x70(r5) ; Invalidate PTE
1366 hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1367 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1368 andc r6,r6,r0 ; Turn off all the old autogen bits
1370 hrmBNone64: eieio ; Make sure all updates come first
1371 stw r6,0(r7) ; Unlock and set the PCA
1373 addi r30,r30,1 ; bump to the next PTEG
1374 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1376 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1378 mr r3,r31 ; Copy the pointer to the mapping
1379 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1381 sync ; Make sure memory is consistent
1383 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1384 li r6,255 ; Assume full invalidate for now
1385 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1386 andc r6,r6,r5 ; Clear max if we have less to do
1387 and r5,r25,r5 ; Clear count if we have more than max
1388 sldi r24,r24,28 ; Get the full XOR value over to segment position
1389 ld r27,mpVAddr(r31) ; Get the base vaddr
1390 li r7,tlbieLock ; Get the TLBIE lock
1391 or r5,r5,r6 ; Get number of TLBIEs needed
1393 hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1394 mr. r2,r2 ; Is it locked?
1395 li r2,1 ; Get our lock value
1396 bne-- hrmBTLBlcm ; It is locked, go wait...
1397 stwcx. r2,0,r7 ; Try to get it
1398 bne-- hrmBTLBlcl ; We was beat...
1400 hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1401 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1402 addic. r5,r5,-1 ; See if we did them all
1403 xor r2,r2,r24 ; Make the VSID
1404 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1405 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1407 tlbie r2 ; Invalidate it everywhere
1408 addi r27,r27,0x1000 ; Up to the next page
1409 bge++ hrmBTLBj ; Make sure we have done it all...
1411 eieio ; Make sure that the tlbie happens first
1412 tlbsync ; wait for everyone to catch up
1414 li r2,0 ; Lock clear value
1416 ptesync ; Wait for quiet again
1418 stw r2,tlbieLock(0) ; Clear the tlbie lock
1420 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1421 bl sxlkShared ; Go get a shared lock on the mapping lists
1422 mr. r3,r3 ; Did we get the lock?
1423 bne- hrmPanic ; Nope...
1425 lwz r4,mpVAddr(r31) ; High order of address
1426 lwz r5,mpVAddr+4(r31) ; Low order of address
1427 mr r3,r28 ; Pass in pmap to search
1428 mr r29,r4 ; Save this in case we need it (only promote fails)
1429 mr r30,r5 ; Save this in case we need it (only promote fails)
1430 bl EXT(mapSearchFull) ; Go see if we can find it
1432 mr. r3,r3 ; Did we? (And remember mapping address for later)
1433 mr r15,r4 ; Save top of next vaddr
1434 mr r16,r5 ; Save bottom of next vaddr
1435 beq- hrmPanic ; Nope, not found...
1437 cmpld r3,r31 ; Same mapping?
1438 bne- hrmPanic ; Not good...
1440 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1441 bl sxlkPromote ; Try to promote shared to exclusive
1442 mr. r3,r3 ; Could we?
1443 mr r3,r31 ; Restore the mapping pointer
1444 beq+ hrmBDone2 ; Yeah...
1446 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1447 bl sxlkConvert ; Convert shared to exclusive
1448 mr. r3,r3 ; Could we?
1449 bne-- hrmPanic ; Nope, we must have timed out...
1451 mr r3,r28 ; Pass in pmap to search
1452 mr r4,r29 ; High order of address
1453 mr r5,r30 ; Low order of address
1454 bl EXT(mapSearchFull) ; Rescan the list
1456 mr. r3,r3 ; Did we lose it when we converted?
1457 mr r15,r4 ; Save top of next vaddr
1458 mr r16,r5 ; Save bottom of next vaddr
1459 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1461 hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1463 mr r3,r28 ; Get the pmap to remove from
1464 mr r4,r31 ; Point to the mapping
1465 bl EXT(mapRemove) ; Remove the mapping from the list
1467 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1468 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1469 subi r4,r4,1 ; Drop down the mapped page count
1470 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1471 bl sxlkUnlock ; Unlock the search list
1473 b hrmRetn64 ; We are all done, get out...
1475 hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1476 stwcx. r2,0,r2 ; Unreserve it
1478 hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1479 mr. r2,r2 ; Is it held?
1480 beq++ hrmBTLBlcl ; Nope...
1481 b hrmBTLBlcn ; Yeah...
1484 ; Guest shadow assist -- mapping remove
1486 ; Method of operation:
1487 ; o Locate the VMM extension block and the host pmap
1488 ; o Obtain the host pmap's search lock exclusively
1489 ; o Locate the requested mapping in the shadow hash table,
1491 ; o If connected, disconnect the PTE and gather R&C to physent
1492 ; o Locate and lock the physent
1493 ; o Remove mapping from physent's chain
1495 ; o Unlock pmap's search lock
1497 ; Non-volatile registers on entry:
1498 ; r17: caller's msr image
1499 ; r19: sprg2 (feature flags)
1500 ; r28: guest pmap's physical address
1501 ; r29: high-order 32 bits of guest virtual address
1502 ; r30: low-order 32 bits of guest virtual address
1504 ; Non-volatile register usage:
1505 ; r26: VMM extension block's physical address
1506 ; r27: host pmap's physical address
1507 ; r28: guest pmap's physical address
1508 ; r29: physent's physical address
1509 ; r30: guest virtual address
1510 ; r31: guest mapping's physical address
1514 rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr
1515 bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine
1516 lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr
1517 lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr
1518 b hrmGStart ; Join common code
1520 hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr
1521 ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr
1522 rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
1524 hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
1525 bl sxlkExclusive ; Get lock exclusive
1527 lwz r3,vxsGrm(r26) ; Get mapping remove request count
1529 lwz r9,pmapSpace(r28) ; r9 <- guest space ID number
1530 la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index
1531 srwi r11,r30,12 ; Form shadow hash:
1532 xor r11,r9,r11 ; spaceID ^ (vaddr >> 12)
1533 rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
1534 ; Form index offset from hash page number
1535 add r31,r31,r12 ; r31 <- hash page index entry
1536 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
1537 mtctr r0 ; in this group
1538 bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search
1539 lwz r31,4(r31) ; r31 <- hash page paddr
1540 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
1541 ; r31 <- hash group paddr
1543 addi r3,r3,1 ; Increment remove request count
1544 stw r3,vxsGrm(r26) ; Update remove request count
1546 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1547 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1548 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
1549 b hrmG32SrchLp ; Let the search begin!
1553 mr r6,r3 ; r6 <- current mapping slot's flags
1554 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1555 mr r7,r4 ; r7 <- current mapping slot's space ID
1556 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1557 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1558 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
1559 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1560 xor r7,r7,r9 ; Compare space ID
1561 or r0,r11,r7 ; r0 <- !(free && space match)
1562 xor r8,r8,r30 ; Compare virtual address
1563 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1564 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1566 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1567 bdnz hrmG32SrchLp ; Iterate
1569 mr r6,r3 ; r6 <- current mapping slot's flags
1570 clrrwi r5,r5,12 ; Remove flags from virtual address
1571 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1572 xor r4,r4,r9 ; Compare space ID
1573 or r0,r11,r4 ; r0 <- !(free && space match)
1574 xor r5,r5,r30 ; Compare virtual address
1575 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1576 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1577 b hrmGSrchMiss ; No joy in our hash group
1580 ld r31,0(r31) ; r31 <- hash page paddr
1581 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
1582 ; r31 <- hash group paddr
1583 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1584 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1585 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
1586 b hrmG64SrchLp ; Let the search begin!
1590 mr r6,r3 ; r6 <- current mapping slot's flags
1591 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1592 mr r7,r4 ; r7 <- current mapping slot's space ID
1593 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1594 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1595 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
1596 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1597 xor r7,r7,r9 ; Compare space ID
1598 or r0,r11,r7 ; r0 <- !(free && space match)
1599 xor r8,r8,r30 ; Compare virtual address
1600 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1601 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1603 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1604 bdnz hrmG64SrchLp ; Iterate
1606 mr r6,r3 ; r6 <- current mapping slot's flags
1607 clrrdi r5,r5,12 ; Remove flags from virtual address
1608 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1609 xor r4,r4,r9 ; Compare space ID
1610 or r0,r11,r4 ; r0 <- !(free && space match)
1611 xor r5,r5,r30 ; Compare virtual address
1612 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1613 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1615 lwz r3,vxsGrmMiss(r26) ; Get remove miss count
1616 li r25,mapRtNotFnd ; Return not found
1617 addi r3,r3,1 ; Increment miss count
1618 stw r3,vxsGrmMiss(r26) ; Update miss count
1619 b hrmGReturn ; Join guest return
1623 rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant?
1624 bne hrmGDormant ; Yes, nothing to disconnect
1626 lwz r3,vxsGrmActive(r26) ; Get active hit count
1627 addi r3,r3,1 ; Increment active hit count
1628 stw r3,vxsGrmActive(r26) ; Update hit count
1630 bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately
1631 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
1632 ; r31 <- mapping's physical address
1633 ; r3 -> PTE slot physical address
1634 ; r4 -> High-order 32 bits of PTE
1635 ; r5 -> Low-order 32 bits of PTE
1637 ; r7 -> PCA physical address
1638 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
1639 b hrmGFreePTE ; Join 64-bit path to release the PTE
1641 bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
1642 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
1644 mr. r3,r3 ; Was there a valid PTE?
1645 beq hrmGDormant ; No valid PTE, we're almost done
1646 lis r0,0x8000 ; Prepare free bit for this slot
1647 srw r0,r0,r2 ; Position free bit
1648 or r6,r6,r0 ; Set it in our PCA image
1649 lwz r8,mpPte(r31) ; Get PTE offset
1650 rlwinm r8,r8,0,~mpHValid ; Make the offset invalid
1651 stw r8,mpPte(r31) ; Save invalidated PTE offset
1652 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
1653 stw r6,0(r7) ; Update PCA and unlock the PTEG
1656 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
1657 bl mapFindLockPN ; Find 'n' lock this page's physent
1658 mr. r29,r3 ; Got lock on our physent?
1659 beq-- hrmGBadPLock ; No, time to bail out
1661 crset cr1_eq ; cr1_eq <- previous link is the anchor
1662 bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine
1663 la r11,ppLink+4(r29) ; Point to chain anchor
1664 lwz r9,ppLink+4(r29) ; Get chain anchor
1665 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
1667 beq- hrmGPEMissMiss ; End of chain, this is not good
1668 cmplw r9,r31 ; Is this the mapping to remove?
1669 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
1670 bne hrmGRemNext ; No, chain onward
1671 bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor
1672 stw r8,0(r11) ; Unchain gpv->phys mapping
1673 b hrmGDelete ; Finish deleting mapping
1675 lwarx r0,0,r11 ; Get previous link
1676 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
1677 stwcx. r0,0,r11 ; Update previous link
1678 bne- hrmGRemRetry ; Lost reservation, retry
1679 b hrmGDelete ; Finish deleting mapping
1682 la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
1683 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1684 mr. r9,r8 ; Does next entry exist?
1685 b hrmGRemLoop ; Carry on
1688 li r7,ppLFAmask ; Get mask to clean up mapping pointer
1689 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1690 la r11,ppLink(r29) ; Point to chain anchor
1691 ld r9,ppLink(r29) ; Get chain anchor
1692 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
1694 beq-- hrmGPEMissMiss ; End of chain, this is not good
1695 cmpld r9,r31 ; Is this the mapping to remove?
1696 ld r8,mpAlias(r9) ; Get forward chain pinter
1697 bne hrmGRem64Nxt ; No mapping to remove, chain on, dude
1698 bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor
1699 std r8,0(r11) ; Unchain gpv->phys mapping
1700 b hrmGDelete ; Finish deleting mapping
1702 ldarx r0,0,r11 ; Get previous link
1703 and r0,r0,r7 ; Get flags
1704 or r0,r0,r8 ; Insert new forward pointer
1705 stdcx. r0,0,r11 ; Slam it back in
1706 bne-- hrmGRem64Rt ; Lost reservation, retry
1707 b hrmGDelete ; Finish deleting mapping
1711 la r11,mpAlias(r9) ; Point to (soon to be) previous link
1712 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1713 mr. r9,r8 ; Does next entry exist?
1714 b hrmGRem64Lp ; Carry on
1717 mr r3,r29 ; r3 <- physent addr
1718 bl mapPhysUnlock ; Unlock physent chain
1719 lwz r3,mpFlags(r31) ; Get mapping's flags
1720 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
1721 ori r3,r3,mpgFree ; Mark mapping free
1722 stw r3,mpFlags(r31) ; Update flags
1723 li r25,mapRtGuest ; Set return code to 'found guest mapping'
1726 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
1727 bl sxlkUnlock ; Release host pmap search lock
1729 mr r3,r25 ; r3 <- return code
1730 bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately
1731 mtmsr r17 ; Restore 'rupts, translation
1732 isync ; Throw a small wrench into the pipeline
1733 b hrmRetnCmn ; Nothing to do now but pop a frame and return
1734 hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode
1735 b hrmRetnCmn ; Join common return
1739 lis r0,hi16(Choke) ; Seen the arrow on the doorpost
1740 ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED"
1741 li r3,failMapping ; All the way from New Orleans
1746 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1748 * Upon entry, R3 contains a pointer to a physent.
1750 * This function removes the first mapping from a physical entry
1751 * alias list. It locks the list, extracts the vaddr and pmap from
1752 * the first entry. It then jumps into the hw_rem_map function.
1753 * NOTE: since we jump into rem_map, we need to set up the stack
1754 * identically. Also, we set the next parm to 0 so we do not
1755 * try to save a next vaddr.
1757 * We return the virtual address of the removed mapping as a
1760 * Note that this is designed to be called from 32-bit mode with a stack.
1762 * We disable translation and all interruptions here. This keeps is
1763 * from having to worry about a deadlock due to having anything locked
1764 * and needing it to process a fault.
1766 * Note that this must be done with both interruptions off and VM off
1769 * Remove mapping via physical page (mapping_purge)
1772 * 2) extract vaddr and pmap
1774 * 4) do "remove mapping via pmap"
1780 .globl EXT(hw_purge_phys)
1783 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1784 mflr r0 ; Save the link register
1785 stw r15,FM_ARG0+0x00(r1) ; Save a register
1786 stw r16,FM_ARG0+0x04(r1) ; Save a register
1787 stw r17,FM_ARG0+0x08(r1) ; Save a register
1788 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1789 stw r19,FM_ARG0+0x10(r1) ; Save a register
1790 stw r20,FM_ARG0+0x14(r1) ; Save a register
1791 stw r21,FM_ARG0+0x18(r1) ; Save a register
1792 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1793 stw r23,FM_ARG0+0x20(r1) ; Save a register
1794 stw r24,FM_ARG0+0x24(r1) ; Save a register
1795 stw r25,FM_ARG0+0x28(r1) ; Save a register
1796 li r6,0 ; Set no next address return
1797 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1798 stw r27,FM_ARG0+0x30(r1) ; Save a register
1799 stw r28,FM_ARG0+0x34(r1) ; Save a register
1800 stw r29,FM_ARG0+0x38(r1) ; Save a register
1801 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1802 stw r31,FM_ARG0+0x40(r1) ; Save a register
1803 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1804 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1806 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1808 bl mapPhysLock ; Lock the physent
1810 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1812 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1813 li r0,ppFlags ; Set the bottom stuff to clear
1814 b hppJoin ; Join the common...
1816 hppSF: li r0,ppLFAmask
1817 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1818 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1820 hppJoin: andc. r12,r12,r0 ; Clean and test link
1821 beq-- hppNone ; There are no more mappings on physical page
1823 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1824 lhz r7,mpSpace(r12) ; Get the address space hash
1825 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1826 slwi r0,r7,2 ; Multiply space by 4
1827 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1828 slwi r7,r7,3 ; Multiply space by 8
1829 lwz r5,mpVAddr+4(r12) ; and the bottom
1830 add r7,r7,r0 ; Get correct displacement into translate table
1831 lwz r28,0(r28) ; Get the actual translation map
1833 add r28,r28,r7 ; Point to the pmap translation
1835 bl mapPhysUnlock ; Time to unlock the physical entry
1837 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1839 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1840 b hrmJoin ; Go remove the mapping...
1842 hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1843 b hrmJoin ; Go remove the mapping...
1847 hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1849 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1851 mtmsr r11 ; Restore enables/translation/etc.
1853 b hppRetnCmn ; Join the common return code...
1855 hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1859 ; NOTE: we have not used any registers other than the volatiles to this point
1862 hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1864 li r3,mapRtEmpty ; Physent chain is empty
1865 mtlr r12 ; Restore the return
1866 lwz r1,0(r1) ; Pop the stack
1870 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1872 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1873 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1875 * We return the virtual address of the removed mapping as a
1878 * Note that this is designed to be called from 32-bit mode with a stack.
1880 * We disable translation and all interruptions here. This keeps is
1881 * from having to worry about a deadlock due to having anything locked
1882 * and needing it to process a fault.
1884 * Note that this must be done with both interruptions off and VM off
1886 * Remove a mapping which can be reestablished by VM
1891 .globl EXT(hw_purge_map)
1894 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1895 mflr r0 ; Save the link register
1896 stw r15,FM_ARG0+0x00(r1) ; Save a register
1897 stw r16,FM_ARG0+0x04(r1) ; Save a register
1898 stw r17,FM_ARG0+0x08(r1) ; Save a register
1899 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1900 stw r19,FM_ARG0+0x10(r1) ; Save a register
1901 mfsprg r19,2 ; Get feature flags
1902 stw r20,FM_ARG0+0x14(r1) ; Save a register
1903 stw r21,FM_ARG0+0x18(r1) ; Save a register
1904 mtcrf 0x02,r19 ; move pf64Bit cr6
1905 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1906 stw r23,FM_ARG0+0x20(r1) ; Save a register
1907 stw r24,FM_ARG0+0x24(r1) ; Save a register
1908 stw r25,FM_ARG0+0x28(r1) ; Save a register
1909 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1910 stw r27,FM_ARG0+0x30(r1) ; Save a register
1911 stw r28,FM_ARG0+0x34(r1) ; Save a register
1912 stw r29,FM_ARG0+0x38(r1) ; Save a register
1913 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1914 stw r31,FM_ARG0+0x40(r1) ; Save a register
1915 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1916 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1919 lwz r11,pmapFlags(r3) ; Get pmaps flags
1920 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1921 bne hpmPanic ; Call not valid for guest shadow assist pmap
1924 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1925 lwz r9,pmapvr+4(r3) ; Get conversion mask
1928 hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1931 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1933 xor r28,r3,r9 ; Convert the pmap to physical addressing
1935 mr r17,r11 ; Save the MSR
1937 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1938 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1939 mr. r3,r3 ; Did we get the lock?
1940 bne-- hrmBadLock ; Nope...
1942 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
1943 ; here so that we will know the previous elements so we can dequeue them
1947 mr r3,r28 ; Pass in pmap to search
1948 mr r29,r4 ; Top half of vaddr
1949 mr r30,r5 ; Bottom half of vaddr
1950 bl EXT(mapSearchFull) ; Rescan the list
1951 mr. r31,r3 ; Did we? (And remember mapping address for later)
1952 or r0,r4,r5 ; Are we beyond the end?
1953 mr r15,r4 ; Save top of next vaddr
1954 cmplwi cr1,r0,0 ; See if there is another
1955 mr r16,r5 ; Save bottom of next vaddr
1956 bne-- hpmGotOne ; We found one, go check it out...
1958 hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1959 b hrmNotFound ; No more in pmap to check...
1961 hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
1962 andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent
1963 rlwinm r21,r20,8,24,31 ; Extract the busy count
1964 cmplwi cr2,r21,0 ; Is it busy?
1965 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
1966 beq++ hrmGotX ; Found, branch to remove the mapping...
1967 b hpmCNext ; Nope...
1969 hpmPanic: lis r0,hi16(Choke) ; System abend
1970 ori r0,r0,lo16(Choke) ; System abend
1971 li r3,failMapping ; Show that we failed some kind of mapping thing
1975 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1977 * Upon entry, R3 contains a pointer to a pmap.
1978 * pa is a pointer to the physent
1980 * This function removes the first mapping for a specific pmap from a physical entry
1981 * alias list. It locks the list, extracts the vaddr and pmap from
1982 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1983 * NOTE: since we jump into rem_map, we need to set up the stack
1984 * identically. Also, we set the next parm to 0 so we do not
1985 * try to save a next vaddr.
1987 * We return the virtual address of the removed mapping as a
1990 * Note that this is designed to be called from 32-bit mode with a stack.
1992 * We disable translation and all interruptions here. This keeps is
1993 * from having to worry about a deadlock due to having anything locked
1994 * and needing it to process a fault.
1996 * Note that this must be done with both interruptions off and VM off
1999 * Remove mapping via physical page (mapping_purge)
2002 * 2) extract vaddr and pmap
2004 * 4) do "remove mapping via pmap"
2010 .globl EXT(hw_purge_space)
2012 LEXT(hw_purge_space)
2013 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
2014 mflr r0 ; Save the link register
2015 stw r15,FM_ARG0+0x00(r1) ; Save a register
2016 stw r16,FM_ARG0+0x04(r1) ; Save a register
2017 stw r17,FM_ARG0+0x08(r1) ; Save a register
2018 mfsprg r2,2 ; Get feature flags
2019 stw r18,FM_ARG0+0x0C(r1) ; Save a register
2020 stw r19,FM_ARG0+0x10(r1) ; Save a register
2021 stw r20,FM_ARG0+0x14(r1) ; Save a register
2022 stw r21,FM_ARG0+0x18(r1) ; Save a register
2023 stw r22,FM_ARG0+0x1C(r1) ; Save a register
2024 mtcrf 0x02,r2 ; move pf64Bit cr6
2025 stw r23,FM_ARG0+0x20(r1) ; Save a register
2026 stw r24,FM_ARG0+0x24(r1) ; Save a register
2027 stw r25,FM_ARG0+0x28(r1) ; Save a register
2028 stw r26,FM_ARG0+0x2C(r1) ; Save a register
2029 stw r27,FM_ARG0+0x30(r1) ; Save a register
2030 li r6,0 ; Set no next address return
2031 stw r28,FM_ARG0+0x34(r1) ; Save a register
2032 stw r29,FM_ARG0+0x38(r1) ; Save a register
2033 stw r30,FM_ARG0+0x3C(r1) ; Save a register
2034 stw r31,FM_ARG0+0x40(r1) ; Save a register
2035 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
2036 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2039 lwz r11,pmapFlags(r4) ; Get pmaps flags
2040 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2041 bne hpsPanic ; Call not valid for guest shadow assist pmap
2044 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
2046 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
2050 hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
2052 hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2054 xor r4,r4,r9 ; Convert the pmap to physical addressing
2056 bl mapPhysLock ; Lock the physent
2058 lwz r8,pmapSpace(r4) ; Get the space hash
2060 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
2062 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2064 hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2065 beq hpsNone ; Did not find one...
2067 lhz r10,mpSpace(r12) ; Get the space
2069 cmplw r10,r8 ; Is this one of ours?
2072 lwz r12,mpAlias+4(r12) ; Chain on to the next
2073 b hpsSrc32 ; Check it out...
2077 hpsSF: li r0,ppLFAmask
2078 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2079 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2081 hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2082 beq hpsNone ; Did not find one...
2084 lhz r10,mpSpace(r12) ; Get the space
2086 cmplw r10,r8 ; Is this one of ours?
2089 ld r12,mpAlias(r12) ; Chain on to the next
2090 b hpsSrc64 ; Check it out...
2094 hpsFnd: mr r28,r4 ; Set the pmap physical address
2095 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2096 lwz r5,mpVAddr+4(r12) ; and the bottom
2098 bl mapPhysUnlock ; Time to unlock the physical entry
2099 b hrmJoin ; Go remove the mapping...
2103 hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2105 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
2107 mtmsr r11 ; Restore enables/translation/etc.
2109 b hpsRetnCmn ; Join the common return code...
2111 hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2115 ; NOTE: we have not used any registers other than the volatiles to this point
2118 hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2120 li r3,mapRtEmpty ; No mappings for specified pmap on physent chain
2121 mtlr r12 ; Restore the return
2122 lwz r1,0(r1) ; Pop the stack
2125 hpsPanic: lis r0,hi16(Choke) ; System abend
2126 ori r0,r0,lo16(Choke) ; System abend
2127 li r3,failMapping ; Show that we failed some kind of mapping thing
2131 * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host
2132 * on this physent chain
2134 * Locates the first guest mapping on the physent chain that is associated with the
2135 * specified host pmap. If this succeeds, the mapping is removed by joining the general
2136 * remove path; otherwise, we return NULL. The caller is expected to invoke this entry
2137 * repeatedly until no additional guest mappings that match our criteria are removed.
2139 * Because this entry point exits through hw_rem_map, our prolog pushes its frame.
2142 * r3 : physent, 32-bit kernel virtual address
2143 * r4 : host pmap, 32-bit kernel virtual address
2145 * Volatile register usage (for linkage through hrmJoin):
2146 * r4 : high-order 32 bits of guest virtual address
2147 * r5 : low-order 32 bits of guest virtual address
2148 * r11: saved MSR image
2150 * Non-volatile register usage:
2151 * r26: VMM extension block's physical address
2152 * r27: host pmap's physical address
2153 * r28: guest pmap's physical address
2158 .globl EXT(hw_scrub_guest)
2160 LEXT(hw_scrub_guest)
2161 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
2162 mflr r0 ; Save the link register
2163 stw r15,FM_ARG0+0x00(r1) ; Save a register
2164 stw r16,FM_ARG0+0x04(r1) ; Save a register
2165 stw r17,FM_ARG0+0x08(r1) ; Save a register
2166 mfsprg r2,2 ; Get feature flags
2167 stw r18,FM_ARG0+0x0C(r1) ; Save a register
2168 stw r19,FM_ARG0+0x10(r1) ; Save a register
2169 stw r20,FM_ARG0+0x14(r1) ; Save a register
2170 stw r21,FM_ARG0+0x18(r1) ; Save a register
2171 stw r22,FM_ARG0+0x1C(r1) ; Save a register
2172 mtcrf 0x02,r2 ; move pf64Bit cr6
2173 stw r23,FM_ARG0+0x20(r1) ; Save a register
2174 stw r24,FM_ARG0+0x24(r1) ; Save a register
2175 stw r25,FM_ARG0+0x28(r1) ; Save a register
2176 stw r26,FM_ARG0+0x2C(r1) ; Save a register
2177 stw r27,FM_ARG0+0x30(r1) ; Save a register
2178 li r6,0 ; Set no next address return
2179 stw r28,FM_ARG0+0x34(r1) ; Save a register
2180 stw r29,FM_ARG0+0x38(r1) ; Save a register
2181 stw r30,FM_ARG0+0x3C(r1) ; Save a register
2182 stw r31,FM_ARG0+0x40(r1) ; Save a register
2183 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
2184 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2186 lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr
2188 bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine
2189 lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr
2190 lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt
2191 b hsgStart ; Get to work
2193 hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr
2194 ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt
2196 hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
2197 xor r27,r4,r9 ; Convert host pmap_t virt->real
2198 bl mapPhysLock ; Lock the physent
2200 bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine
2202 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2203 hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2204 beq hsg32Miss ; Did not find one...
2205 lwz r8,mpFlags(r12) ; Get mapping's flags
2206 lhz r7,mpSpace(r12) ; Get mapping's space id
2207 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2208 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2209 xori r8,r8,mpGuest ; Is it a guest mapping?
2210 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2211 slwi r9,r7,2 ; Multiply space by 4
2212 lwz r28,0(r28) ; Get the actual translation map
2213 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2214 slwi r7,r7,3 ; Multiply space by 8
2215 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2216 add r7,r7,r9 ; Get correct displacement into translate table
2217 add r28,r28,r7 ; Point to the pmap translation
2218 lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr
2219 lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr
2220 xor r7,r7,r26 ; Is guest associated with specified host?
2221 or. r7,r7,r8 ; Guest mapping && associated with host?
2222 lwz r12,mpAlias+4(r12) ; Chain on to the next
2223 bne hsg32Loop ; Try next mapping on alias chain
2225 hsg32Hit: bl mapPhysUnlock ; Unlock physent chain
2226 b hrmJoin ; Join common path for mapping removal
2229 hsg32Miss: bl mapPhysUnlock ; Unlock physent chain
2230 mtmsr r11 ; Restore 'rupts, translation
2231 isync ; Throw a small wrench into the pipeline
2232 li r3,mapRtEmpty ; No mappings found matching specified criteria
2233 b hrmRetnCmn ; Exit through common epilog
2236 hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed
2237 ld r12,ppLink(r3) ; Grab the pointer to the first mapping
2238 rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2239 hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address
2240 beq hsg64Miss ; Did not find one...
2241 lwz r8,mpFlags(r12) ; Get mapping's flags
2242 lhz r7,mpSpace(r12) ; Get mapping's space id
2243 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2244 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2245 xori r8,r8,mpGuest ; Is it a guest mapping?
2246 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2247 slwi r9,r7,2 ; Multiply space by 4
2248 lwz r28,0(r28) ; Get the actual translation map
2249 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2250 slwi r7,r7,3 ; Multiply space by 8
2251 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2252 add r7,r7,r9 ; Get correct displacement into translate table
2253 add r28,r28,r7 ; Point to the pmap translation
2254 ld r28,pmapPAddr(r28) ; Get guest pmap paddr
2255 ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr
2256 xor r7,r7,r26 ; Is guest associated with specified host?
2257 or. r7,r7,r8 ; Guest mapping && associated with host?
2258 ld r12,mpAlias(r12) ; Chain on to the next
2259 bne hsg64Loop ; Try next mapping on alias chain
2261 hsg64Hit: bl mapPhysUnlock ; Unlock physent chain
2262 b hrmJoin ; Join common path for mapping removal
2265 hsg64Miss: bl mapPhysUnlock ; Unlock physent chain
2266 mtmsrd r11 ; Restore 'rupts, translation
2267 li r3,mapRtEmpty ; No mappings found matching specified criteria
2268 b hrmRetnCmn ; Exit through common epilog
2272 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
2274 * Upon entry, R3 contains a pointer to a physent.
2275 * space is the space ID from the pmap in question
2277 * We return the virtual address of the found mapping in
2278 * R3. Note that the mapping busy is bumped.
2280 * Note that this is designed to be called from 32-bit mode with a stack.
2282 * We disable translation and all interruptions here. This keeps is
2283 * from having to worry about a deadlock due to having anything locked
2284 * and needing it to process a fault.
2289 .globl EXT(hw_find_space)
2292 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
2293 mflr r0 ; Save the link register
2294 mr r8,r4 ; Remember the space
2295 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2297 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2299 bl mapPhysLock ; Lock the physent
2301 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
2303 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2305 hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2306 beq hfsNone ; Did not find one...
2308 lhz r10,mpSpace(r12) ; Get the space
2310 cmplw r10,r8 ; Is this one of ours?
2313 lwz r12,mpAlias+4(r12) ; Chain on to the next
2314 b hfsSrc32 ; Check it out...
2318 hfsSF: li r0,ppLFAmask
2319 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2320 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2322 hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2323 beq hfsNone ; Did not find one...
2325 lhz r10,mpSpace(r12) ; Get the space
2327 cmplw r10,r8 ; Is this one of ours?
2330 ld r12,mpAlias(r12) ; Chain on to the next
2331 b hfsSrc64 ; Check it out...
2335 hfsFnd: mr r8,r3 ; Save the physent
2336 mr r3,r12 ; Point to the mapping
2337 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2339 mr r3,r8 ; Get back the physical entry
2340 li r7,0xFFF ; Get a page size mask
2341 bl mapPhysUnlock ; Time to unlock the physical entry
2343 andc r3,r12,r7 ; Move the mapping back down to a page
2344 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
2345 xor r12,r3,r12 ; Convert to virtual
2346 b hfsRet ; Time to return
2350 hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2352 hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
2354 mtmsr r11 ; Restore enables/translation/etc.
2356 b hfsRetnCmn ; Join the common return code...
2358 hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2362 ; NOTE: we have not used any registers other than the volatiles to this point
2365 hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
2368 mr. r3,r3 ; Anything to return?
2369 beq hfsRetnNull ; Nope
2370 lwz r11,mpFlags(r3) ; Get mapping flags
2371 rlwinm r0,r11,0,mpType ; Isolate the mapping type
2372 cmplwi r0,mpGuest ; Shadow guest mapping?
2373 beq hfsPanic ; Yup, kick the bucket
2377 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2379 mtlr r12 ; Restore the return
2380 lwz r1,0(r1) ; Pop the stack
2383 hfsPanic: lis r0,hi16(Choke) ; System abend
2384 ori r0,r0,lo16(Choke) ; System abend
2385 li r3,failMapping ; Show that we failed some kind of mapping thing
2389 ; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
2390 ; Returns 0 if not found or the virtual address of the mapping if
2391 ; if is. Also, the mapping has the busy count bumped.
2394 .globl EXT(hw_find_map)
2397 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2398 mflr r0 ; Save the link register
2399 stw r25,FM_ARG0+0x00(r1) ; Save a register
2400 stw r26,FM_ARG0+0x04(r1) ; Save a register
2401 mr r25,r6 ; Remember address of next va
2402 stw r27,FM_ARG0+0x08(r1) ; Save a register
2403 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2404 stw r29,FM_ARG0+0x10(r1) ; Save a register
2405 stw r30,FM_ARG0+0x14(r1) ; Save a register
2406 stw r31,FM_ARG0+0x18(r1) ; Save a register
2407 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2410 lwz r11,pmapFlags(r3) ; Get pmaps flags
2411 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2412 bne hfmPanic ; Call not valid for guest shadow assist pmap
2415 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2416 lwz r7,pmapvr+4(r3) ; Get the second part
2419 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2421 mr r27,r11 ; Remember the old MSR
2422 mr r26,r12 ; Remember the feature bits
2424 xor r28,r3,r7 ; Change the common 32- and 64-bit half
2426 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
2428 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
2430 hfmSF1: mr r29,r4 ; Save top half of vaddr
2431 mr r30,r5 ; Save the bottom half
2433 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2434 bl sxlkShared ; Go get a shared lock on the mapping lists
2435 mr. r3,r3 ; Did we get the lock?
2436 bne-- hfmBadLock ; Nope...
2438 mr r3,r28 ; get the pmap address
2439 mr r4,r29 ; Get bits 0:31 to look for
2440 mr r5,r30 ; Get bits 32:64
2442 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
2444 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
2445 mr. r31,r3 ; Save the mapping if we found it
2446 cmplwi cr1,r0,0 ; Are we removing?
2447 mr r29,r4 ; Save next va high half
2448 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
2449 mr r30,r5 ; Save next va low half
2450 li r6,0 ; Assume we did not find it
2451 li r26,0xFFF ; Get a mask to relocate to start of mapping page
2453 bt-- cr0_eq,hfmNotFnd ; We did not find it...
2455 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2457 andc r4,r31,r26 ; Get back to the mapping page start
2459 ; Note: we can treat 32- and 64-bit the same here. Because we are going from
2460 ; physical to virtual and we only do 32-bit virtual, we only need the low order
2463 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2464 li r6,-1 ; Indicate we found it and it is not being removed
2465 xor r31,r31,r4 ; Flip to virtual
2467 hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2468 bl sxlkUnlock ; Unlock the search list
2470 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2471 and r3,r3,r6 ; Clear if not found or removing
2473 hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
2475 mtmsr r27 ; Restore enables/translation/etc.
2477 b hfmReturnC ; Join common...
2479 hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2482 hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2483 stw r30,4(r25) ; Save the bottom of the next va
2484 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2485 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2486 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2487 and r3,r3,r6 ; Clear return if the mapping is being removed
2488 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2489 mtlr r0 ; Restore the return
2490 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2491 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2492 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2493 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2494 lwz r1,0(r1) ; Pop the stack
2499 hfmBadLock: li r3,1 ; Set lock time out error code
2500 b hfmReturn ; Leave....
2502 hfmPanic: lis r0,hi16(Choke) ; System abend
2503 ori r0,r0,lo16(Choke) ; System abend
2504 li r3,failMapping ; Show that we failed some kind of mapping thing
2509 * void hw_clear_maps(void)
2511 * Remove all mappings for all phys entries.
2517 .globl EXT(hw_clear_maps)
2520 mflr r10 ; Save the link register
2521 mfcr r9 ; Save the condition register
2522 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2524 lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2525 ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2528 lwz r3,mrPhysTab(r5) ; Get the actual table address
2529 lwz r0,mrStart(r5) ; Get start of table entry
2530 lwz r4,mrEnd(r5) ; Get end of table entry
2531 addi r5,r5,mrSize ; Point to the next regions
2533 cmplwi r3,0 ; No more regions?
2534 beq-- hcmDone ; Leave...
2536 sub r4,r4,r0 ; Calculate physical entry count
2540 bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version
2544 lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping
2545 addi r3,r3,physEntrySize ; Next phys_entry
2548 rlwinm. r4,r4,0,0,25 ; Clean and test mapping address
2549 beq hcmNoMap32 ; Did not find one...
2551 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2552 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2553 stw r0,mpPte(r4) ; Get the quick pointer again
2555 lwz r4,mpAlias+4(r4) ; Chain on to the next
2556 b hcmNextMap32 ; Check it out...
2564 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2565 ld r4,ppLink(r3) ; Get the pointer to the first mapping
2566 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2567 addi r3,r3,physEntrySize ; Next phys_entry
2570 andc. r4,r4,r0 ; Clean and test mapping address
2571 beq hcmNoMap64 ; Did not find one...
2573 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2574 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2575 stw r0,mpPte(r4) ; Get the quick pointer again
2577 ld r4,mpAlias(r4) ; Chain on to the next
2578 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2579 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2580 b hcmNextMap64 ; Check it out...
2588 mtlr r10 ; Restore the return
2589 mtcr r9 ; Restore the condition register
2590 bt++ pf64Bitb,hcmDone64 ; 64-bit version
2592 mtmsr r11 ; Restore translation/mode/etc.
2597 mtmsrd r11 ; Restore translation/mode/etc.
2604 * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod)
2605 * walks all mapping for a physical page and performs
2606 * specified operations on each.
2608 * pp is unlocked physent
2609 * preop is operation to perform on physent before walk. This would be
2610 * used to set cache attribute or protection
2611 * op is the operation to perform on each mapping during walk
2612 * postop is operation to perform in the phsyent after walk. this would be
2613 * used to set or reset the RC bits.
2614 * opmod modifies the action taken on any connected PTEs visited during
2617 * We return the RC bits from before postop is run.
2619 * Note that this is designed to be called from 32-bit mode with a stack.
2621 * We disable translation and all interruptions here. This keeps is
2622 * from having to worry about a deadlock due to having anything locked
2623 * and needing it to process a fault.
2625 * We lock the physent, execute preop, and then walk each mapping in turn.
2626 * If there is a PTE, it is invalidated and the RC merged into the physent.
2627 * Then we call the op function.
2628 * Then we revalidate the PTE.
2629 * Once all all mappings are finished, we save the physent RC and call the
2630 * postop routine. Then we unlock the physent and return the RC.
2636 .globl EXT(hw_walk_phys)
2639 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2640 mflr r0 ; Save the link register
2641 stw r24,FM_ARG0+0x00(r1) ; Save a register
2642 stw r25,FM_ARG0+0x04(r1) ; Save a register
2643 stw r26,FM_ARG0+0x08(r1) ; Save a register
2644 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2645 mr r24,r8 ; Save the parm
2646 mr r25,r7 ; Save the parm
2647 stw r28,FM_ARG0+0x10(r1) ; Save a register
2648 stw r29,FM_ARG0+0x14(r1) ; Save a register
2649 stw r30,FM_ARG0+0x18(r1) ; Save a register
2650 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2651 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2653 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2655 mfsprg r26,0 ; (INSTRUMENTATION)
2656 lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2657 addi r27,r27,1 ; (INSTRUMENTATION)
2658 stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2659 la r26,hwWalkFull(r26) ; (INSTRUMENTATION)
2660 slwi r12,r24,2 ; (INSTRUMENTATION)
2661 lwzx r27,r26,r12 ; (INSTRUMENTATION)
2662 addi r27,r27,1 ; (INSTRUMENTATION)
2663 stwx r27,r26,r12 ; (INSTRUMENTATION)
2665 mr r26,r11 ; Save the old MSR
2666 lis r27,hi16(hwpOpBase) ; Get high order of op base
2667 slwi r4,r4,7 ; Convert preop to displacement
2668 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2669 slwi r5,r5,7 ; Convert op to displacement
2670 add r12,r4,r27 ; Point to the preop routine
2671 slwi r28,r6,7 ; Convert postop to displacement
2672 mtctr r12 ; Set preop routine
2673 add r28,r28,r27 ; Get the address of the postop routine
2674 add r27,r5,r27 ; Get the address of the op routine
2676 bl mapPhysLock ; Lock the physent
2678 mr r29,r3 ; Save the physent address
2680 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2682 bctrl ; Call preop routine
2683 bne- hwpEarly32 ; preop says to bail now...
2685 cmplwi r24,hwpMergePTE ; Classify operation modifier
2686 mtctr r27 ; Set up the op function address
2687 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
2688 blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping
2689 beq hwpMSrc32 ; Do TLB merge for each mapping
2691 hwpQSrc32: rlwinm. r31,r31,0,0,25 ; Clean and test mapping address
2692 beq hwpNone32 ; Did not find one...
2694 bctrl ; Call the op function
2696 bne- hwpEarly32 ; op says to bail now...
2697 lwz r31,mpAlias+4(r31) ; Chain on to the next
2698 b hwpQSrc32 ; Check it out...
2701 hwpMSrc32: rlwinm. r31,r31,0,0,25 ; Clean and test mapping address
2702 beq hwpNone32 ; Did not find one...
2704 bl mapMergeRC32 ; Merge reference and change into mapping and physent
2705 bctrl ; Call the op function
2707 bne- hwpEarly32 ; op says to bail now...
2708 lwz r31,mpAlias+4(r31) ; Chain on to the next
2709 b hwpMSrc32 ; Check it out...
2712 hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2713 beq hwpNone32 ; Did not find one...
2716 ; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2717 ; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2718 ; If there is no PTE, PTE low is obtained from mapping
2720 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2722 bctrl ; Call the op function
2724 crmove cr1_eq,cr0_eq ; Save the return code
2726 mr. r3,r3 ; Was there a previously valid PTE?
2727 beq- hwpNxt32 ; Nope...
2729 stw r5,4(r3) ; Store second half of PTE
2730 eieio ; Make sure we do not reorder
2731 stw r4,0(r3) ; Revalidate the PTE
2733 eieio ; Make sure all updates come first
2734 stw r6,0(r7) ; Unlock the PCA
2736 hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2737 lwz r31,mpAlias+4(r31) ; Chain on to the next
2738 b hwpSrc32 ; Check it out...
2742 hwpNone32: mtctr r28 ; Get the post routine address
2744 lwz r30,ppLink+4(r29) ; Save the old RC
2745 mr r3,r29 ; Get the physent address
2746 bctrl ; Call post routine
2748 bl mapPhysUnlock ; Unlock the physent
2750 mtmsr r26 ; Restore translation/mode/etc.
2753 b hwpReturn ; Go restore registers and return...
2757 hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2758 mr r3,r29 ; Get the physent address
2759 bl mapPhysUnlock ; Unlock the physent
2761 mtmsr r26 ; Restore translation/mode/etc.
2764 b hwpReturn ; Go restore registers and return...
2768 hwp64: bctrl ; Call preop routine
2769 bne-- hwpEarly64 ; preop says to bail now...
2771 cmplwi r24,hwpMergePTE ; Classify operation modifier
2772 mtctr r27 ; Set up the op function address
2775 ld r31,ppLink(r3) ; Get the pointer to the first mapping
2776 rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2777 blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping
2778 beq hwpMSrc64 ; Do TLB merge for each mapping
2780 hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2781 beq hwpNone64 ; Did not find one...
2783 bctrl ; Call the op function
2785 bne-- hwpEarly64 ; op says to bail now...
2786 ld r31,mpAlias(r31) ; Chain on to the next
2787 b hwpQSrc64 ; Check it out...
2790 hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2791 beq hwpNone64 ; Did not find one...
2793 bl mapMergeRC64 ; Merge reference and change into mapping and physent
2794 bctrl ; Call the op function
2796 bne-- hwpEarly64 ; op says to bail now...
2797 ld r31,mpAlias(r31) ; Chain on to the next
2798 b hwpMSrc64 ; Check it out...
2801 hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2802 beq hwpNone64 ; Did not find one...
2804 ; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2805 ; PTE low in R5. PTEG comes back locked if there is one
2807 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2809 bctrl ; Call the op function
2811 crmove cr1_eq,cr0_eq ; Save the return code
2813 mr. r3,r3 ; Was there a previously valid PTE?
2814 beq-- hwpNxt64 ; Nope...
2816 std r5,8(r3) ; Save bottom of PTE
2817 eieio ; Make sure we do not reorder
2818 std r4,0(r3) ; Revalidate the PTE
2820 eieio ; Make sure all updates come first
2821 stw r6,0(r7) ; Unlock the PCA
2823 hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2824 ld r31,mpAlias(r31) ; Chain on to the next
2825 b hwpSrc64 ; Check it out...
2829 hwpNone64: mtctr r28 ; Get the post routine address
2831 lwz r30,ppLink+4(r29) ; Save the old RC
2832 mr r3,r29 ; Get the physent address
2833 bctrl ; Call post routine
2835 bl mapPhysUnlock ; Unlock the physent
2837 mtmsrd r26 ; Restore translation/mode/etc.
2839 b hwpReturn ; Go restore registers and return...
2843 hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2844 mr r3,r29 ; Get the physent address
2845 bl mapPhysUnlock ; Unlock the physent
2847 mtmsrd r26 ; Restore translation/mode/etc.
2850 hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2851 lwz r24,FM_ARG0+0x00(r1) ; Restore a register
2852 lwz r25,FM_ARG0+0x04(r1) ; Restore a register
2853 lwz r26,FM_ARG0+0x08(r1) ; Restore a register
2854 mr r3,r30 ; Pass back the RC
2855 lwz r27,FM_ARG0+0x0C(r1) ; Restore a register
2856 lwz r28,FM_ARG0+0x10(r1) ; Restore a register
2857 mtlr r0 ; Restore the return
2858 lwz r29,FM_ARG0+0x14(r1) ; Restore a register
2859 lwz r30,FM_ARG0+0x18(r1) ; Restore a register
2860 lwz r31,FM_ARG0+0x1C(r1) ; Restore a register
2861 lwz r1,0(r1) ; Pop the stack
2866 ; The preop/op/postop function table.
2867 ; Each function must be 64-byte aligned and be no more than
2868 ; 16 instructions. If more than 16, we must fix address calculations
2869 ; at the start of hwpOpBase
2871 ; The routine must set CR0_EQ in order to continue scan.
2872 ; If CR0_EQ is not set, an early return from the function is made.
2879 ; Function 0 - No operation
2881 hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2882 blr ; Just return...
2886 ; This is the continuation of function 4 - Set attributes in mapping
2888 ; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2889 ; NOTE: Do we have to deal with i-cache here?
2891 hwpSAM: li r11,4096 ; Get page size
2893 hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2894 dcbf r11,r5 ; Flush the line in the data cache
2895 bgt++ hwpSAMinvd ; Go do the rest of it...
2897 sync ; Make sure it is done
2899 li r11,4096 ; Get page size
2901 hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2902 icbi r11,r5 ; Flush the line in the icache
2903 bgt++ hwpSAMinvi ; Go do the rest of it...
2905 sync ; Make sure it is done
2907 cmpw r0,r0 ; Make sure we return CR0_EQ
2911 ; Function 1 - Set protection in physent (obsolete)
2913 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2915 hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ
2919 ; Function 2 - Set protection in mapping
2921 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
2923 hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2924 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2925 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2926 li r0,lo16(mpN|mpPP) ; Get no-execute and protection bits
2927 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2928 rlwinm r2,r25,0,mpNb-32,mpPPe-32 ; Isolate new no-execute and protection bits
2929 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2930 andc r5,r5,r0 ; Clear the old no-execute and prot bits
2931 or r5,r5,r2 ; Move in the new no-execute and prot bits
2932 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2933 cmpw r0,r0 ; Make sure we return CR0_EQ
2934 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2937 ; Function 3 - Set attributes in physent
2939 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
2941 hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent
2943 hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
2944 rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes
2945 stwcx. r4,r5,r29 ; Try to stuff it
2946 bne-- hwpSAtrPhX ; Try again...
2947 ; Note: CR0_EQ is set because of stwcx.
2950 ; Function 4 - Set attributes in mapping
2952 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2954 hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2955 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2956 li r2,mpM ; Force on coherent
2957 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2958 li r0,lo16(mpWIMG) ; Get wimg mask
2959 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2960 rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32
2961 ; Copy in the cache inhibited bit
2962 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2963 andc r5,r5,r0 ; Clear the old wimg
2964 rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32
2965 ; Copy in the guarded bit
2966 mfsprg r9,2 ; Feature flags
2967 or r5,r5,r2 ; Move in the new wimg
2968 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2969 lwz r2,mpPAddr(r31) ; Get the physical address
2970 li r0,0xFFF ; Start a mask
2971 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2972 rlwinm r5,r0,0,1,0 ; Copy to top half
2973 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2974 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2975 and r5,r5,r2 ; Clean stuff in top 32 bits
2976 andc r2,r2,r0 ; Clean bottom too
2977 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2978 b hwpSAM ; Join common
2980 ; NOTE: we moved the remainder of the code out of here because it
2981 ; did not fit in the 128 bytes allotted. It got stuck into the free space
2982 ; at the end of the no-op function.
2987 ; Function 5 - Clear reference in physent
2989 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
2991 hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2993 hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
2994 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
2995 stwcx. r4,r5,r29 ; Try to stuff it
2996 bne-- hwpCRefPhX ; Try again...
2997 ; Note: CR0_EQ is set because of stwcx.
3001 ; Function 6 - Clear reference in mapping
3003 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
3005 hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
3006 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3007 andc r5,r5,r0 ; Clear in PTE copy
3008 andc r8,r8,r0 ; and in the mapping
3009 cmpw r0,r0 ; Make sure we return CR0_EQ
3010 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3014 ; Function 7 - Clear change in physent
3016 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
3018 hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3020 hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
3021 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
3022 stwcx. r4,r5,r29 ; Try to stuff it
3023 bne-- hwpCCngPhX ; Try again...
3024 ; Note: CR0_EQ is set because of stwcx.
3028 ; Function 8 - Clear change in mapping
3030 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
3032 hwpCCngMap: li r0,lo16(mpC) ; Get change bit
3033 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3034 andc r5,r5,r0 ; Clear in PTE copy
3035 andc r8,r8,r0 ; and in the mapping
3036 cmpw r0,r0 ; Make sure we return CR0_EQ
3037 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3041 ; Function 9 - Set reference in physent
3043 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
3045 hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3047 hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
3048 ori r4,r4,lo16(ppR) ; Set the reference
3049 stwcx. r4,r5,r29 ; Try to stuff it
3050 bne-- hwpSRefPhX ; Try again...
3051 ; Note: CR0_EQ is set because of stwcx.
3055 ; Function 10 - Set reference in mapping
3057 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
3059 hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3060 ori r8,r8,lo16(mpR) ; Set reference in mapping
3061 cmpw r0,r0 ; Make sure we return CR0_EQ
3062 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3065 ; Function 11 - Set change in physent
3067 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
3069 hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3071 hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
3072 ori r4,r4,lo16(ppC) ; Set the change bit
3073 stwcx. r4,r5,r29 ; Try to stuff it
3074 bne-- hwpSCngPhX ; Try again...
3075 ; Note: CR0_EQ is set because of stwcx.
3078 ; Function 12 - Set change in mapping
3080 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
3082 hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3083 ori r8,r8,lo16(mpC) ; Set chage in mapping
3084 cmpw r0,r0 ; Make sure we return CR0_EQ
3085 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3088 ; Function 13 - Test reference in physent
3090 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
3092 hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3093 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
3094 blr ; Return (CR0_EQ set to continue if reference is off)...
3097 ; Function 14 - Test reference in mapping
3099 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
3101 hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
3102 blr ; Return (CR0_EQ set to continue if reference is off)...
3105 ; Function 15 - Test change in physent
3107 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
3109 hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3110 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
3111 blr ; Return (CR0_EQ set to continue if change is off)...
3114 ; Function 16 - Test change in mapping
3116 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
3118 hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
3119 blr ; Return (CR0_EQ set to continue if change is off)...
3122 ; Function 17 - Test reference and change in physent
3124 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
3127 lwz r0,ppLink+4(r29) ; Get the flags from physent
3128 rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits
3129 cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1))
3130 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3131 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3134 ; Function 18 - Test reference and change in mapping
3136 .set .,hwpOpBase+(18*128) ; Generate error if previous function too long
3138 rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping
3139 cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1))
3140 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3141 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3144 ; Function 19 - Clear reference and change in physent
3146 .set .,hwpOpBase+(19*128) ; Generate error if previous function too long
3148 li r5,ppLink+4 ; Get offset for flag part of physent
3151 lwarx r4,r5,r29 ; Get the old flags
3152 andc r4,r4,r25 ; Clear R and C as specified by mask
3153 stwcx. r4,r5,r29 ; Try to stuff it
3154 bne-- hwpCRefCngPhX ; Try again...
3155 ; Note: CR0_EQ is set because of stwcx.
3159 ; Function 20 - Clear reference and change in mapping
3161 .set .,hwpOpBase+(20*128) ; Generate error if previous function too long
3163 srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map)
3164 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3165 andc r5,r5,r0 ; Clear in PTE copy
3166 andc r8,r8,r0 ; and in the mapping
3167 cmpw r0,r0 ; Make sure we return CR0_EQ
3168 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3172 .set .,hwpOpBase+(21*128) ; Generate error if previous function too long
3175 ; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
3178 ; mapRtOK - if all is ok
3179 ; mapRtBadLk - if mapping lock fails
3180 ; mapRtPerm - if mapping is permanent
3181 ; mapRtNotFnd - if mapping is not found
3182 ; mapRtBlock - if mapping is a block
3185 .globl EXT(hw_protect)
3188 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3189 mflr r0 ; Save the link register
3190 stw r24,FM_ARG0+0x00(r1) ; Save a register
3191 stw r25,FM_ARG0+0x04(r1) ; Save a register
3192 mr r25,r7 ; Remember address of next va
3193 stw r26,FM_ARG0+0x08(r1) ; Save a register
3194 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3195 stw r28,FM_ARG0+0x10(r1) ; Save a register
3196 mr r24,r6 ; Save the new protection flags
3197 stw r29,FM_ARG0+0x14(r1) ; Save a register
3198 stw r30,FM_ARG0+0x18(r1) ; Save a register
3199 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3200 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3203 lwz r11,pmapFlags(r3) ; Get pmaps flags
3204 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3205 bne hpPanic ; Call not valid for guest shadow assist pmap
3208 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3209 lwz r7,pmapvr+4(r3) ; Get the second part
3212 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3214 mr r27,r11 ; Remember the old MSR
3215 mr r26,r12 ; Remember the feature bits
3217 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3219 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
3221 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3223 hpSF1: mr r29,r4 ; Save top half of vaddr
3224 mr r30,r5 ; Save the bottom half
3226 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3227 bl sxlkShared ; Go get a shared lock on the mapping lists
3228 mr. r3,r3 ; Did we get the lock?
3229 bne-- hpBadLock ; Nope...
3231 mr r3,r28 ; get the pmap address
3232 mr r4,r29 ; Get bits 0:31 to look for
3233 mr r5,r30 ; Get bits 32:64
3235 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
3237 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3238 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3239 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3240 cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3241 mr. r31,r3 ; Save the mapping if we found it
3242 mr r29,r4 ; Save next va high half
3243 mr r30,r5 ; Save next va low half
3245 beq-- hpNotFound ; Not found...
3247 bf-- cr1_eq,hpNotAllowed ; Something special is happening...
3249 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
3251 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3253 rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit)
3254 mr. r3,r3 ; Was there a previously valid PTE?
3256 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3258 beq-- hpNoOld32 ; Nope...
3260 stw r5,4(r3) ; Store second half of PTE
3261 eieio ; Make sure we do not reorder
3262 stw r4,0(r3) ; Revalidate the PTE
3264 eieio ; Make sure all updates come first
3265 stw r6,0(r7) ; Unlock PCA
3267 hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3268 bl sxlkUnlock ; Unlock the search list
3270 li r3,mapRtOK ; Set normal return
3271 b hpR32 ; Join common...
3276 hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3278 rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits
3279 mr. r3,r3 ; Was there a previously valid PTE?
3281 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3283 beq-- hpNoOld64 ; Nope...
3285 std r5,8(r3) ; Store second half of PTE
3286 eieio ; Make sure we do not reorder
3287 std r4,0(r3) ; Revalidate the PTE
3289 eieio ; Make sure all updates come first
3290 stw r6,0(r7) ; Unlock PCA
3292 hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3293 bl sxlkUnlock ; Unlock the search list
3295 li r3,mapRtOK ; Set normal return
3296 b hpR64 ; Join common...
3300 hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
3302 hpR32: mtmsr r27 ; Restore enables/translation/etc.
3304 b hpReturnC ; Join common...
3306 hpR64: mtmsrd r27 ; Restore enables/translation/etc.
3309 hpReturnC: stw r29,0(r25) ; Save the top of the next va
3310 stw r30,4(r25) ; Save the bottom of the next va
3311 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3312 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3313 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3314 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3315 mtlr r0 ; Restore the return
3316 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3317 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3318 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3319 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3320 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3321 lwz r1,0(r1) ; Pop the stack
3326 hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
3327 b hpReturn ; Leave....
3329 hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3330 bl sxlkUnlock ; Unlock the search list
3332 li r3,mapRtNotFnd ; Set that we did not find the requested page
3333 b hpReturn ; Leave....
3336 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
3337 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3338 bne-- hpNotFound ; Yeah...
3339 bl sxlkUnlock ; Unlock the search list
3341 li r3,mapRtBlock ; Assume it was a block
3342 rlwinm r0,r7,0,mpType ; Isolate mapping type
3343 cmplwi r0,mpBlock ; Is this a block mapping?
3344 beq++ hpReturn ; Yes, leave...
3346 li r3,mapRtPerm ; Set that we hit a permanent page
3347 b hpReturn ; Leave....
3349 hpPanic: lis r0,hi16(Choke) ; System abend
3350 ori r0,r0,lo16(Choke) ; System abend
3351 li r3,failMapping ; Show that we failed some kind of mapping thing
3356 ; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
3358 ; Returns following code ORed with RC from mapping
3359 ; mapRtOK - if all is ok
3360 ; mapRtBadLk - if mapping lock fails
3361 ; mapRtNotFnd - if mapping is not found
3364 .globl EXT(hw_test_rc)
3367 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3368 mflr r0 ; Save the link register
3369 stw r24,FM_ARG0+0x00(r1) ; Save a register
3370 stw r25,FM_ARG0+0x04(r1) ; Save a register
3371 stw r26,FM_ARG0+0x08(r1) ; Save a register
3372 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3373 stw r28,FM_ARG0+0x10(r1) ; Save a register
3374 mr r24,r6 ; Save the reset request
3375 stw r29,FM_ARG0+0x14(r1) ; Save a register
3376 stw r30,FM_ARG0+0x18(r1) ; Save a register
3377 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3378 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3381 lwz r11,pmapFlags(r3) ; Get pmaps flags
3382 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3383 bne htrPanic ; Call not valid for guest shadow assist pmap
3386 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3387 lwz r7,pmapvr+4(r3) ; Get the second part
3390 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3392 mr r27,r11 ; Remember the old MSR
3393 mr r26,r12 ; Remember the feature bits
3395 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3397 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
3399 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3401 htrSF1: mr r29,r4 ; Save top half of vaddr
3402 mr r30,r5 ; Save the bottom half
3404 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3405 bl sxlkShared ; Go get a shared lock on the mapping lists
3406 mr. r3,r3 ; Did we get the lock?
3408 bne-- htrBadLock ; Nope...
3410 mr r3,r28 ; get the pmap address
3411 mr r4,r29 ; Get bits 0:31 to look for
3412 mr r5,r30 ; Get bits 32:64
3414 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
3416 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3417 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3418 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3419 crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3420 mr. r31,r3 ; Save the mapping if we found it
3421 crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed
3423 bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed...
3425 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
3427 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3429 cmplwi cr1,r24,0 ; Do we want to clear RC?
3430 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3431 mr. r3,r3 ; Was there a previously valid PTE?
3432 li r0,lo16(mpR|mpC) ; Get bits to clear
3434 and r25,r5,r0 ; Save the RC bits
3435 beq++ cr1,htrNoClr32 ; Nope...
3437 andc r12,r12,r0 ; Clear mapping copy of RC
3438 andc r5,r5,r0 ; Clear PTE copy of RC
3439 sth r12,mpVAddr+6(r31) ; Set the new RC
3441 htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
3443 sth r5,6(r3) ; Store updated RC
3444 eieio ; Make sure we do not reorder
3445 stw r4,0(r3) ; Revalidate the PTE
3447 eieio ; Make sure all updates come first
3448 stw r6,0(r7) ; Unlock PCA
3450 htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3451 bl sxlkUnlock ; Unlock the search list
3452 li r3,mapRtOK ; Set normal return
3453 b htrR32 ; Join common...
3458 htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3460 cmplwi cr1,r24,0 ; Do we want to clear RC?
3461 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3462 mr. r3,r3 ; Was there a previously valid PTE?
3463 li r0,lo16(mpR|mpC) ; Get bits to clear
3465 and r25,r5,r0 ; Save the RC bits
3466 beq++ cr1,htrNoClr64 ; Nope...
3468 andc r12,r12,r0 ; Clear mapping copy of RC
3469 andc r5,r5,r0 ; Clear PTE copy of RC
3470 sth r12,mpVAddr+6(r31) ; Set the new RC
3472 htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
3474 sth r5,14(r3) ; Store updated RC
3475 eieio ; Make sure we do not reorder
3476 std r4,0(r3) ; Revalidate the PTE
3478 eieio ; Make sure all updates come first
3479 stw r6,0(r7) ; Unlock PCA
3481 htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3482 bl sxlkUnlock ; Unlock the search list
3483 li r3,mapRtOK ; Set normal return
3484 b htrR64 ; Join common...
3488 htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
3490 htrR32: mtmsr r27 ; Restore enables/translation/etc.
3492 b htrReturnC ; Join common...
3494 htrR64: mtmsrd r27 ; Restore enables/translation/etc.
3497 htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3498 or r3,r3,r25 ; Send the RC bits back
3499 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3500 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3501 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3502 mtlr r0 ; Restore the return
3503 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3504 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3505 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3506 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3507 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3508 lwz r1,0(r1) ; Pop the stack
3513 htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
3514 b htrReturn ; Leave....
3517 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3518 bl sxlkUnlock ; Unlock the search list
3520 li r3,mapRtNotFnd ; Set that we did not find the requested page
3521 b htrReturn ; Leave....
3523 htrPanic: lis r0,hi16(Choke) ; System abend
3524 ori r0,r0,lo16(Choke) ; System abend
3525 li r3,failMapping ; Show that we failed some kind of mapping thing
3531 ; mapFindLockPN - find and lock physent for a given page number
3536 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3537 mr r2,r3 ; Save our target
3538 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3540 mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address
3541 lwz r5,mrStart(r9) ; Get start of table entry
3542 lwz r0,mrEnd(r9) ; Get end of table entry
3543 addi r9,r9,mrSize ; Point to the next slot
3544 cmplwi cr2,r3,0 ; Are we at the end of the table?
3545 cmplw r2,r5 ; See if we are in this table
3546 cmplw cr1,r2,r0 ; Check end also
3547 sub r4,r2,r5 ; Calculate index to physical entry
3548 beq-- cr2,mapFLPNmiss ; Leave if we did not find an entry...
3549 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
3550 slwi r4,r4,3 ; Get offset to physical entry
3552 blt-- mapFLPNitr ; Did not find it...
3554 add r3,r3,r4 ; Point right to the slot
3555 b mapPhysLock ; Join common lock code
3558 li r3,0 ; Show that we did not find it
3563 ; mapPhysFindLock - find physent list and lock it
3564 ; R31 points to mapping
3569 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
3570 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
3571 rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset
3572 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
3573 add r3,r3,r4 ; Point to table entry
3574 lwz r5,mpPAddr(r31) ; Get physical page number
3575 lwz r7,mrStart(r3) ; Get the start of range
3576 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
3577 sub r6,r5,r7 ; Get index to physent
3578 rlwinm r6,r6,3,0,28 ; Get offset to physent
3579 add r3,r3,r6 ; Point right to the physent
3580 b mapPhysLock ; Join in the lock...
3583 ; mapPhysLock - lock a physent list
3584 ; R3 contains list header
3589 li r2,lgKillResv ; Get a spot to kill reservation
3590 stwcx. r2,0,r2 ; Kill it...
3593 lwz r2,ppLink(r3) ; Get physent chain header
3594 rlwinm. r2,r2,0,0,0 ; Is lock clear?
3595 bne-- mapPhysLockT ; Nope, still locked...
3598 lwarx r2,0,r3 ; Get the lock
3599 rlwinm. r0,r2,0,0,0 ; Is it locked?
3600 oris r0,r2,0x8000 ; Set the lock bit
3601 bne-- mapPhysLockS ; It is locked, spin on it...
3602 stwcx. r0,0,r3 ; Try to stuff it back...
3603 bne-- mapPhysLock ; Collision, try again...
3604 isync ; Clear any speculations
3609 ; mapPhysUnlock - unlock a physent list
3610 ; R3 contains list header
3615 lwz r0,ppLink(r3) ; Get physent chain header
3616 rlwinm r0,r0,0,1,31 ; Clear the lock bit
3617 eieio ; Make sure unlock comes last
3618 stw r0,ppLink(r3) ; Unlock the list
3622 ; mapPhysMerge - merge the RC bits into the master copy
3623 ; R3 points to the physent
3624 ; R4 contains the RC bits
3626 ; Note: we just return if RC is 0
3631 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
3632 la r5,ppLink+4(r3) ; Point to the RC field
3633 beqlr-- ; Leave if RC is 0...
3636 lwarx r6,0,r5 ; Get the RC part
3637 or r6,r6,r4 ; Merge in the RC
3638 stwcx. r6,0,r5 ; Try to stuff it back...
3639 bne-- mapPhysMergeT ; Collision, try again...
3643 ; Sets the physent link pointer and preserves all flags
3644 ; The list is locked
3645 ; R3 points to physent
3646 ; R4 has link to set
3652 la r5,ppLink+4(r3) ; Point to the link word
3655 lwarx r2,0,r5 ; Get the link and flags
3656 rlwimi r4,r2,0,ppFlags ; Insert the flags
3657 stwcx. r4,0,r5 ; Stick them back
3658 bne-- mapPhyCSetR ; Someone else did something, try again...
3664 li r0,ppLFAmask ; Get mask to clean up mapping pointer
3665 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
3668 ldarx r2,0,r3 ; Get the link and flags
3669 and r5,r2,r0 ; Isolate the flags
3670 or r6,r4,r5 ; Add them to the link
3671 stdcx. r6,0,r3 ; Stick them back
3672 bne-- mapPhyCSet64x ; Someone else did something, try again...
3676 ; mapBumpBusy - increment the busy count on a mapping
3677 ; R3 points to mapping
3683 lwarx r4,0,r3 ; Get mpBusy
3684 addis r4,r4,0x0100 ; Bump the busy count
3685 stwcx. r4,0,r3 ; Save it back
3686 bne-- mapBumpBusy ; This did not work, try again...
3690 ; mapDropBusy - increment the busy count on a mapping
3691 ; R3 points to mapping
3694 .globl EXT(mapping_drop_busy)
3697 LEXT(mapping_drop_busy)
3699 lwarx r4,0,r3 ; Get mpBusy
3700 addis r4,r4,0xFF00 ; Drop the busy count
3701 stwcx. r4,0,r3 ; Save it back
3702 bne-- mapDropBusy ; This did not work, try again...
3706 ; mapDrainBusy - drain the busy count on a mapping
3707 ; R3 points to mapping
3708 ; Note: we already have a busy for ourselves. Only one
3709 ; busy per processor is allowed, so we just spin here
3710 ; waiting for the count to drop to 1.
3711 ; Also, the mapping can not be on any lists when we do this
3712 ; so all we are doing is waiting until it can be released.
3718 lwz r4,mpFlags(r3) ; Get mpBusy
3719 rlwinm r4,r4,8,24,31 ; Clean it up
3720 cmplwi r4,1 ; Is is just our busy?
3721 beqlr++ ; Yeah, it is clear...
3722 b mapDrainBusy ; Try again...
3727 ; handleDSeg - handle a data segment fault
3728 ; handleISeg - handle an instruction segment fault
3730 ; All that we do here is to map these to DSI or ISI and insure
3731 ; that the hash bit is not set. This forces the fault code
3732 ; to also handle the missing segment.
3734 ; At entry R2 contains per_proc, R13 contains savarea pointer,
3735 ; and R11 is the exception code.
3739 .globl EXT(handleDSeg)
3743 li r11,T_DATA_ACCESS ; Change fault to DSI
3744 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3745 b EXT(handlePF) ; Join common...
3748 .globl EXT(handleISeg)
3752 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3753 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3754 b EXT(handlePF) ; Join common...
3758 * handlePF - handle a page fault interruption
3760 * At entry R2 contains per_proc, R13 contains savarea pointer,
3761 * and R11 is the exception code.
3763 * This first part does a quick check to see if we can handle the fault.
3764 * We canot handle any kind of protection exceptions here, so we pass
3765 * them up to the next level.
3767 * NOTE: In order for a page-fault redrive to work, the translation miss
3768 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3769 * before we come here.
3773 .globl EXT(handlePF)
3777 mfsprg r12,2 ; Get feature flags
3778 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3779 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3780 mtcrf 0x02,r12 ; move pf64Bit to cr6
3781 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3782 lwz r18,SAVflags(r13) ; Get the flags
3784 beq-- gotIfetch ; We have an IFETCH here...
3786 lwz r27,savedsisr(r13) ; Get the DSISR
3787 lwz r29,savedar(r13) ; Get the first half of the DAR
3788 lwz r30,savedar+4(r13) ; And second half
3790 b ckIfProt ; Go check if this is a protection fault...
3792 gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3793 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3794 lwz r30,savesrr0+4(r13) ; And second half
3795 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3797 ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3798 li r20,64 ; Set a limit of 64 nests for sanity check
3799 bne-- hpfExit ; Yes... (probably not though)
3802 ; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3803 ; should be loading the user pmap here.
3806 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3807 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3808 mr r19,r2 ; Remember the per_proc
3809 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3810 mr r23,r30 ; Save the low part of faulting address
3811 beq-- hpfInKern ; Skip if we are in the kernel
3812 la r8,ppUserPmap(r19) ; Point to the current user pmap
3814 hpfInKern: mr r22,r29 ; Save the high part of faulting address
3816 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3819 ; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3820 ; predefined value that corresponds to no address space. When we see that value
3821 ; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3822 ; cause the proper SR to be loaded.
3825 lwz r28,4(r8) ; Pick up the pmap
3826 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3827 mr r25,r28 ; Save the original pmap (in case we nest)
3828 lwz r0,pmapFlags(r28) ; Get pmap's flags
3829 bne hpfGVtest ; Segs are not ours if so...
3830 mfsrin r4,r30 ; Get the SR that was used for translation
3831 cmplwi r4,invalSpace ; Is this a simulated segment fault?
3832 bne++ hpfGVtest ; No...
3834 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
3835 b hpfGVtest ; Join on up...
3839 nop ; Push hpfNest to a 32-byte boundary
3840 nop ; Push hpfNest to a 32-byte boundary
3841 nop ; Push hpfNest to a 32-byte boundary
3843 hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3844 mr r25,r28 ; Save the original pmap (in case we nest)
3845 lwz r0,pmapFlags(r28) ; Get pmap's flags
3847 hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist?
3848 bne hpfGVxlate ; Yup, do accelerated shadow stuff
3851 ; This is where we loop descending nested pmaps
3854 hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3855 addi r20,r20,-1 ; Count nest try
3856 bl sxlkShared ; Go get a shared lock on the mapping lists
3857 mr. r3,r3 ; Did we get the lock?
3858 bne-- hpfBadLock ; Nope...
3860 mr r3,r28 ; Get the pmap pointer
3861 mr r4,r22 ; Get top of faulting vaddr
3862 mr r5,r23 ; Get bottom of faulting vaddr
3863 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3865 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3866 mr. r31,r3 ; Save the mapping if we found it
3867 cmplwi cr1,r0,0 ; Check for removal
3868 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3870 bt-- cr0_eq,hpfNotFound ; Not found or removing...
3872 rlwinm r0,r7,0,mpType ; Isolate mapping type
3873 cmplwi r0,mpNest ; Are we again nested?
3874 cmplwi cr1,r0,mpLinkage ; Are we a linkage type?
3875 cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type?
3876 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3878 lhz r21,mpSpace(r31) ; Get the space
3880 bne++ hpfFoundIt ; No, we found our guy...
3883 #if pmapTransSize != 12
3884 #error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3886 cmplwi r0,mpLinkage ; Linkage mapping?
3887 cmplwi cr1,r20,0 ; Too many nestings?
3888 beq-- hpfSpclNest ; Do we need to do special handling?
3890 hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3891 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3892 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3893 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3894 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3895 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3896 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3897 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3898 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3899 slwi r11,r21,3 ; Multiply space by 8
3900 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3901 addc r23,r23,r9 ; Relocate bottom half of vaddr
3902 lwz r10,0(r10) ; Get the actual translation map
3903 slwi r12,r21,2 ; Multiply space by 4
3904 add r10,r10,r11 ; Add in the higher part of the index
3905 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3906 adde r22,r22,r8 ; Relocate the top half of the vaddr
3907 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3908 bl sxlkUnlock ; Unlock the search list
3910 bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines
3911 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3912 cmplwi r28,0 ; Is the pmap paddr valid?
3913 bne+ hpfNest ; Nest into new pmap...
3914 b hpfBadPmap ; Handle bad pmap
3917 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3918 cmpldi r28,0 ; Is the pmap paddr valid?
3919 bne++ hpfNest ; Nest into new pmap...
3920 b hpfBadPmap ; Handle bad pmap
3924 ; Error condition. We only allow 64 nestings. This keeps us from having to
3925 ; check for recusive nests when we install them.
3931 lwz r20,savedsisr(r13) ; Get the DSISR
3932 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3933 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3934 ori r20,r20,1 ; Indicate that there was a nesting problem
3935 stw r20,savedsisr(r13) ; Stash it
3936 lwz r11,saveexception(r13) ; Restore the exception code
3937 b EXT(PFSExit) ; Yes... (probably not though)
3940 ; Error condition - lock failed - this is fatal
3946 lis r0,hi16(Choke) ; System abend
3947 ori r0,r0,lo16(Choke) ; System abend
3948 li r3,failMapping ; Show mapping failure
3952 ; Error condition - space id selected an invalid pmap - fatal
3958 lis r0,hi16(Choke) ; System abend
3959 ori r0,r0,lo16(Choke) ; System abend
3960 li r3,failPmap ; Show invalid pmap
3964 ; Did not find any kind of mapping
3970 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3971 bl sxlkUnlock ; Unlock it
3972 lwz r11,saveexception(r13) ; Restore the exception code
3974 hpfExit: ; We need this because we can not do a relative branch
3975 b EXT(PFSExit) ; Yes... (probably not though)
3979 ; Here is where we handle special mappings. So far, the only use is to load a
3980 ; processor specific segment register for copy in/out handling.
3982 ; The only (so far implemented) special map is used for copyin/copyout.
3983 ; We keep a mapping of a "linkage" mapping in the per_proc.
3984 ; The linkage mapping is basically a nested pmap that is switched in
3985 ; as part of context switch. It relocates the appropriate user address
3986 ; space slice into the right place in the kernel.
3992 la r31,ppUMWmp(r19) ; Just point to the mapping
3993 oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here
3994 b hpfCSrch ; Go continue search...
3998 ; We have now found a mapping for the address we faulted on.
4002 ; Here we go about calculating what the VSID should be. We concatanate
4003 ; the space ID (14 bits wide) 3 times. We then slide the vaddr over
4004 ; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
4005 ; Then we XOR and expanded space ID and the shifted vaddr. This gives us
4008 ; This is used both for segment handling and PTE handling
4013 #error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
4016 ; Important non-volatile registers at this point ('home' means the final pmap/mapping found
4017 ; when a multi-level mapping has been successfully searched):
4018 ; r21: home space id number
4019 ; r22: relocated high-order 32 bits of vaddr
4020 ; r23: relocated low-order 32 bits of vaddr
4021 ; r25: pmap physical address
4023 ; r28: home pmap physical address
4024 ; r29: high-order 32 bits of faulting vaddr
4025 ; r30: low-order 32 bits of faulting vaddr
4026 ; r31: mapping's physical address
4030 hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
4031 hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
4032 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
4033 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
4034 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
4035 rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag
4036 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
4037 cmplwi cr5,r0,0 ; Did we just do a special nesting?
4038 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
4039 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
4040 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
4041 xor r14,r14,r20 ; Calculate the top half of VSID
4042 xor r15,r15,r21 ; Calculate the bottom half of the VSID
4043 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
4044 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
4045 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
4046 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
4047 or r12,r12,r15 ; Add key into the bottom of VSID
4049 ; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
4051 bne++ hpfPteMiss ; Nope, normal PTE miss...
4054 ; Here is the only place that we make an entry in the pmap segment cache.
4056 ; Note that we do not make an entry in the segment cache for special
4057 ; nested mappings. This makes the copy in/out segment get refreshed
4058 ; when switching threads.
4060 ; The first thing that we do is to look up the ESID we are going to load
4061 ; into a segment in the pmap cache. If it is already there, this is
4062 ; a segment that appeared since the last time we switched address spaces.
4063 ; If all is correct, then it was another processors that made the cache
4064 ; entry. If not, well, it is an error that we should die on, but I have
4065 ; not figured a good way to trap it yet.
4067 ; If we get a hit, we just bail, otherwise, lock the pmap cache, select
4068 ; an entry based on the generation number, update the cache entry, and
4069 ; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
4070 ; entries that correspond to the last 4 bits (32:35 for 64-bit and
4071 ; 0:3 for 32-bit) of the ESID.
4073 ; Then we unlock and bail.
4075 ; First lock it. Then select a free slot or steal one based on the generation
4076 ; number. Then store it, update the allocation flags, and unlock.
4078 ; The cache entry contains an image of the ESID/VSID pair we would load for
4079 ; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
4081 ; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
4082 ; the current one, which may have changed because we nested.
4084 ; Also remember that we do not store the valid bit in the ESID. If we
4085 ; od, this will break some other stuff.
4088 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
4090 mr r3,r25 ; Point to the pmap
4091 mr r4,r22 ; ESID high half
4092 mr r5,r23 ; ESID low half
4093 bl pmapCacheLookup ; Go see if this is in the cache already
4095 mr. r3,r3 ; Did we find it?
4096 mr r4,r11 ; Copy this to a different register
4098 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
4100 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
4101 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
4103 cntlzw r7,r4 ; Find a free slot
4105 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
4106 rlwinm r30,r30,0,0,3 ; Clean up the ESID
4107 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
4108 addi r5,r4,1 ; Bump the generation number
4109 and r7,r7,r6 ; Clear bit number if none empty
4110 andc r8,r4,r6 ; Clear generation count if we found an empty
4111 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
4112 or r7,r7,r8 ; Select a slot number
4114 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
4115 oris r8,r8,0x8000 ; Get the high bit on
4116 la r9,pmapSegCache(r25) ; Point to the segment cache
4117 slwi r6,r7,4 ; Get index into the segment cache
4118 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
4119 srw r8,r8,r7 ; Get the mask
4120 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
4122 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
4123 oris r0,r0,0xF000 ; Get the sub-tag mask
4124 add r9,r9,r6 ; Point to the cache slot
4125 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
4126 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
4128 stw r29,sgcESID(r9) ; Save the top of the ESID
4129 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
4130 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
4131 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
4132 or r10,r10,r5 ; Stick in subtag in case top half
4133 or r11,r11,r5 ; Stick in subtag in case bottom half
4134 stw r14,sgcVSID(r9) ; Save the top of the VSID
4135 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
4136 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
4137 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
4139 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
4140 b hpfNoCacheEnt ; Go finish up...
4143 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
4147 eieio ; Make sure cache is updated before lock
4148 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
4152 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
4153 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
4156 ; Make and enter 32-bit segment register
4159 lwz r16,validSegs(r19) ; Get the valid SR flags
4160 xor r12,r12,r4 ; Alter the storage key before loading segment register
4161 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
4162 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
4163 lis r0,0x8000 ; Set bit 0
4164 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
4165 srw r0,r0,r2 ; Get bit corresponding to SR
4166 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
4167 or r16,r16,r0 ; Show that SR is valid
4169 mtsrin r6,r30 ; Set the actual SR
4171 stw r16,validSegs(r19) ; Set the valid SR flags
4173 b hpfPteMiss ; SR loaded, go do a PTE...
4176 ; Make and enter 64-bit segment look-aside buffer entry.
4177 ; Note that the cache entry is the right format except for valid bit.
4178 ; We also need to convert from long long to 64-bit register values.
4185 ld r16,validSegs(r19) ; Get the valid SLB entry flags
4186 sldi r8,r29,32 ; Move high order address over
4187 sldi r10,r14,32 ; Move high part of VSID over
4189 not r3,r16 ; Make valids be 0s
4190 li r0,1 ; Prepare to set bit 0
4192 cntlzd r17,r3 ; Find a free SLB
4193 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
4194 or r9,r8,r30 ; Form full 64-bit address
4195 cmplwi r17,63 ; Did we find a free SLB entry?
4196 sldi r0,r0,63 ; Get bit 0 set
4197 or r10,r10,r12 ; Move in low part and keys
4198 addi r17,r17,1 ; Skip SLB 0 always
4199 blt++ hpfFreeSeg ; Yes, go load it...
4202 ; No free SLB entries, select one that is in use and invalidate it
4204 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
4205 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4206 addi r4,r4,1 ; Set next slot to steal
4207 slbmfee r7,r17 ; Get the entry that is in the selected spot
4208 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
4209 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4210 srawi r2,r2,31 ; Get -1 if steal index still in range
4211 slbie r7 ; Invalidate the in-use SLB entry
4212 and r4,r4,r2 ; Reset steal index when it should wrap
4215 stw r4,ppSegSteal(r19) ; Set the next slot to steal
4217 ; We are now ready to stick the SLB entry in the SLB and mark it in use
4221 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
4222 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
4223 srd r0,r0,r4 ; Set bit mask for allocation
4224 oris r9,r9,0x0800 ; Turn on the valid bit
4225 or r16,r16,r0 ; Turn on the allocation flag
4226 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
4228 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
4229 slbie r7 ; Blow away a potential duplicate
4231 hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
4233 std r16,validSegs(r19) ; Mark as valid
4234 b hpfPteMiss ; STE loaded, go do a PTE...
4237 ; The segment has been set up and loaded if need be. Now we are ready to build the
4238 ; PTE and get it into the hash table.
4240 ; Note that there is actually a race here. If we start fault processing on
4241 ; a different pmap, i.e., we have descended into a nested pmap, it is possible
4242 ; that the nest could have been removed from the original pmap. We would
4243 ; succeed with this translation anyway. I do not think we need to worry
4244 ; about this (famous last words) because nobody should be unnesting anything
4245 ; if there are still people activily using them. It should be up to the
4246 ; higher level VM system to put the kibosh on this.
4248 ; There is also another race here: if we fault on the same mapping on more than
4249 ; one processor at the same time, we could end up with multiple PTEs for the same
4250 ; mapping. This is not a good thing.... We really only need one of the
4251 ; fault handlers to finish, so what we do is to set a "fault in progress" flag in
4252 ; the mapping. If we see that set, we just abandon the handler and hope that by
4253 ; the time we restore context and restart the interrupted code, the fault has
4254 ; been resolved by the other guy. If not, we will take another fault.
4258 ; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
4259 ; It is required to stay there until after we call mapSelSlot!!!!
4264 hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
4265 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
4266 li r3,mpHValid ; Get the PTE valid bit
4267 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
4268 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
4269 crnot cr1_eq,cr0_eq ; Remember if FIP was on
4270 and. r12,r12,r3 ; Isolate the valid bit
4271 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
4272 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
4273 rlwinm r0,r2,0,mpType ; Isolate mapping type
4274 cmplwi r0,mpBlock ; Is this a block mapping?
4275 crnot cr7_eq,cr0_eq ; Remember if we have a block mapping
4276 stwcx. r2,0,r31 ; Store the flags
4277 bne-- hpfPteMiss ; Collision, try again...
4279 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
4282 ; At this point we are about to do the 32-bit PTE generation.
4284 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4288 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4289 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4290 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4292 ; The 24 bits of the 32-bit architecture VSID is in the following:
4296 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4297 ; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4298 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4303 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
4304 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4306 mfsdr1 r27 ; Get the hash table base address
4308 rlwinm r0,r23,0,4,19 ; Isolate just the page index
4309 rlwinm r18,r23,10,26,31 ; Extract the API
4310 xor r19,r15,r0 ; Calculate hash << 12
4311 mr r2,r25 ; Save the flag part of the mapping
4312 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
4313 rlwinm r16,r27,16,7,15 ; Extract the hash table size
4314 rlwinm r25,r25,0,0,19 ; Clear out the flags
4315 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
4316 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
4317 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
4318 rlwinm r27,r27,0,0,15 ; Extract the hash table base
4319 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
4320 add r24,r24,r25 ; Adjust to true physical address
4321 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
4322 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
4323 and r19,r19,r16 ; Wrap hash table offset into the hash table
4324 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
4325 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
4326 add r19,r19,r27 ; Point to the PTEG
4327 subfic r20,r20,-4 ; Get negative offset to PCA
4328 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4329 add r20,r20,r27 ; Point to the PCA slot
4332 ; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
4333 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4335 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4336 ; that some other processor beat us and stuck in a PTE or that
4337 ; all we had was a simple segment exception and the PTE was there the whole time.
4338 ; If we find one a pointer, we are done.
4341 mr r7,r20 ; Copy the PCA pointer
4342 bl mapLockPteg ; Lock the PTEG
4344 lwz r12,mpPte(r31) ; Get the offset to the PTE
4345 mr r17,r6 ; Remember the PCA image
4346 mr r16,r6 ; Prime the post-select PCA image
4347 andi. r0,r12,mpHValid ; Is there a PTE here already?
4348 li r21,8 ; Get the number of slots
4350 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
4352 bne- hpfBailOut ; Someone already did this for us...
4355 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a
4356 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4357 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4358 ; R4 returns the slot index.
4360 ; REMEMBER: CR7 indicates that we are building a block mapping.
4363 hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
4364 mr r6,r17 ; Get back the original PCA
4365 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
4366 blt- hpfBailOut ; Holy Cow, all slots are locked...
4368 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
4370 cmplwi cr5,r3,1 ; Did we steal a slot?
4371 rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address
4372 mr r16,r6 ; Remember the PCA image after selection
4373 blt+ cr5,hpfInser32 ; Nope, no steal...
4375 lwz r6,0(r19) ; Get the old PTE
4376 lwz r7,4(r19) ; Get the real part of the stealee
4377 rlwinm r6,r6,0,1,31 ; Clear the valid bit
4378 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
4379 srwi r3,r7,12 ; Change phys address to a ppnum
4380 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4381 cmplwi cr1,r3,0 ; Check if this is in RAM
4382 bne- hpfNoPte32 ; Could not get it, try for another...
4384 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4386 hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
4388 sync ; Make sure the invalid is stored
4389 li r9,tlbieLock ; Get the TLBIE lock
4390 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
4392 hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
4393 mfsprg r4,0 ; Get the per_proc
4394 rlwinm r8,r6,25,18,31 ; Extract the space ID
4395 rlwinm r11,r6,25,18,31 ; Extract the space ID
4396 lwz r7,hwSteals(r4) ; Get the steal count
4397 srwi r2,r6,7 ; Align segment number with hash
4398 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
4399 mr. r0,r0 ; Is it locked?
4400 srwi r0,r19,6 ; Align PTEG offset for back hash
4401 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
4402 xor r11,r11,r0 ; Hash backwards to partial vaddr
4403 rlwinm r12,r2,14,0,3 ; Shift segment up
4404 mfsprg r2,2 ; Get feature flags
4405 li r0,1 ; Get our lock word
4406 rlwimi r12,r6,22,4,9 ; Move up the API
4407 bne- hpfTLBIE32 ; It is locked, go wait...
4408 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
4410 stwcx. r0,0,r9 ; Try to get it
4411 bne- hpfTLBIE32 ; We was beat...
4412 addi r7,r7,1 ; Bump the steal count
4414 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
4415 li r0,0 ; Lock clear value
4417 tlbie r12 ; Invalidate it everywhere
4420 beq- hpfNoTS32 ; Can not have MP on this machine...
4422 eieio ; Make sure that the tlbie happens first
4423 tlbsync ; Wait for everyone to catch up
4424 sync ; Make sure of it all
4426 hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
4428 stw r7,hwSteals(r4) ; Save the steal count
4429 bgt cr5,hpfInser32 ; We just stole a block mapping...
4431 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
4433 la r11,ppLink+4(r3) ; Point to the master RC copy
4434 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
4435 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
4437 hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
4438 or r0,r0,r2 ; Merge in the new RC
4439 stwcx. r0,0,r11 ; Try to stick it back
4440 bne- hpfMrgRC32 ; Try again if we collided...
4443 hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address
4444 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4446 lhz r10,mpSpace(r7) ; Get the space
4447 lwz r9,mpVAddr+4(r7) ; And the vaddr
4448 cmplw cr1,r10,r8 ; Is this one of ours?
4449 xor r9,r12,r9 ; Compare virtual address
4450 cmplwi r9,0x1000 ; See if we really match
4451 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4452 beq+ hpfFPnch2 ; Yes, found ours...
4454 lwz r7,mpAlias+4(r7) ; Chain on to the next
4455 b hpfFPnch ; Check it out...
4457 hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
4458 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
4459 bl mapPhysUnlock ; Unlock the physent now
4461 hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4463 stw r24,4(r19) ; Stuff in the real part of the PTE
4464 eieio ; Make sure this gets there first
4466 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
4467 mr r17,r16 ; Get the PCA image to save
4468 b hpfFinish ; Go join the common exit code...
4472 ; At this point we are about to do the 64-bit PTE generation.
4474 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4478 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4479 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4480 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4487 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
4488 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4490 mfsdr1 r27 ; Get the hash table base address
4492 sldi r11,r22,32 ; Slide top of adjusted EA over
4493 sldi r14,r14,32 ; Slide top of VSID over
4494 rlwinm r5,r27,0,27,31 ; Isolate the size
4495 eqv r16,r16,r16 ; Get all foxes here
4496 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
4497 mr r2,r10 ; Save the flag part of the mapping
4498 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
4499 rldicr r27,r27,0,45 ; Clean up the hash table base
4500 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
4501 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
4502 subfic r5,r5,46 ; Get number of leading zeros
4503 xor r19,r0,r15 ; Calculate hash
4504 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
4505 srd r16,r16,r5 ; Shift over to get length of table
4506 srdi r19,r19,5 ; Convert page offset to hash table offset
4507 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
4508 rldicr r10,r10,0,51 ; Clear out flags
4509 sldi r24,r24,12 ; Change ppnum to physical address
4510 sub r11,r11,r10 ; Get the offset from the base mapping
4511 and r19,r19,r16 ; Wrap into hash table
4512 add r24,r24,r11 ; Get actual physical address of this page
4513 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
4514 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
4515 subfic r20,r20,-4 ; Get negative offset to PCA
4516 ori r24,r24,lo16(mpR) ; Force on the reference bit
4517 add r20,r20,r27 ; Point to the PCA slot
4518 add r19,r19,r27 ; Point to the PTEG
4521 ; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
4522 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4524 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4525 ; that some other processor beat us and stuck in a PTE or that
4526 ; all we had was a simple segment exception and the PTE was there the whole time.
4527 ; If we find one a pointer, we are done.
4530 mr r7,r20 ; Copy the PCA pointer
4531 bl mapLockPteg ; Lock the PTEG
4533 lwz r12,mpPte(r31) ; Get the offset to the PTE
4534 mr r17,r6 ; Remember the PCA image
4535 mr r18,r6 ; Prime post-selection PCA image
4536 andi. r0,r12,mpHValid ; See if we have a PTE now
4537 li r21,8 ; Get the number of slots
4539 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
4541 bne-- hpfBailOut ; Someone already did this for us...
4544 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
4545 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4546 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4547 ; R4 returns the slot index.
4549 ; REMEMBER: CR7 indicates that we are building a block mapping.
4552 hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
4553 mr r6,r17 ; Restore original state of PCA
4554 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
4555 blt- hpfBailOut ; Holy Cow, all slots are locked...
4557 bl mapSelSlot ; Go select a slot
4559 cmplwi cr5,r3,1 ; Did we steal a slot?
4560 mr r18,r6 ; Remember the PCA image after selection
4561 insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address
4562 lwz r10,hwSteals(r2) ; Get the steal count
4563 blt++ cr5,hpfInser64 ; Nope, no steal...
4565 ld r6,0(r19) ; Get the old PTE
4566 ld r7,8(r19) ; Get the real part of the stealee
4567 rldicr r6,r6,0,62 ; Clear the valid bit
4568 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
4569 srdi r3,r7,12 ; Change page address to a page address
4570 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4571 cmplwi cr1,r3,0 ; Check if this is in RAM
4572 bne-- hpfNoPte64 ; Could not get it, try for another...
4574 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4576 hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
4577 li r9,tlbieLock ; Get the TLBIE lock
4579 srdi r11,r6,5 ; Shift VSID over for back hash
4580 mfsprg r4,0 ; Get the per_proc
4581 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
4582 sync ; Make sure the invalid is stored
4584 sldi r12,r6,16 ; Move AVPN to EA position
4585 sldi r11,r11,5 ; Move this to the page position
4587 hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
4588 mr. r0,r0 ; Is it locked?
4589 li r0,1 ; Get our lock word
4590 bne-- hpfTLBIE65 ; It is locked, go wait...
4592 stwcx. r0,0,r9 ; Try to get it
4593 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
4594 rldicl r8,r6,52,50 ; Isolate the address space ID
4595 bne-- hpfTLBIE64 ; We was beat...
4596 addi r10,r10,1 ; Bump the steal count
4598 rldicl r11,r12,0,16 ; Clear cause the book says so
4599 li r0,0 ; Lock clear value
4601 tlbie r11 ; Invalidate it everywhere
4603 mr r7,r8 ; Get a copy of the space ID
4604 eieio ; Make sure that the tlbie happens first
4605 rldimi r7,r7,14,36 ; Copy address space to make hash value
4606 tlbsync ; Wait for everyone to catch up
4607 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
4608 srdi r2,r6,26 ; Shift original segment down to bottom
4610 ptesync ; Make sure of it all
4611 xor r7,r7,r2 ; Compute original segment
4612 stw r0,tlbieLock(0) ; Clear the tlbie lock
4614 stw r10,hwSteals(r4) ; Save the steal count
4615 bgt cr5,hpfInser64 ; We just stole a block mapping...
4617 rldimi r12,r7,28,0 ; Insert decoded segment
4618 rldicl r4,r12,0,13 ; Trim to max supported address
4620 ld r12,8(r19) ; Get the RC of the just invalidated PTE
4622 la r11,ppLink+4(r3) ; Point to the master RC copy
4623 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
4624 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
4626 hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
4627 li r12,ppLFAmask ; Get mask to clean up alias pointer
4628 or r0,r0,r2 ; Merge in the new RC
4629 rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
4630 stwcx. r0,0,r11 ; Try to stick it back
4631 bne-- hpfMrgRC64 ; Try again if we collided...
4633 hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
4634 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4636 lhz r10,mpSpace(r7) ; Get the space
4637 ld r9,mpVAddr(r7) ; And the vaddr
4638 cmplw cr1,r10,r8 ; Is this one of ours?
4639 xor r9,r4,r9 ; Compare virtual address
4640 cmpldi r9,0x1000 ; See if we really match
4641 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4642 beq++ hpfFPnch2x ; Yes, found ours...
4644 ld r7,mpAlias(r7) ; Chain on to the next
4645 b hpfFPnchx ; Check it out...
4649 hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
4650 stwcx. r7,0,r7 ; Kill reservation
4652 hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
4653 mr. r0,r0 ; Is it locked?
4654 beq++ hpfTLBIE64 ; Yup, wait for it...
4655 b hpfTLBIE63 ; Nope, try again..
4659 hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
4660 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
4661 bl mapPhysUnlock ; Unlock the physent now
4664 hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
4665 eieio ; Make sure this gets there first
4666 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
4667 mr r17,r18 ; Get the PCA image to set
4668 b hpfFinish ; Go join the common exit code...
4671 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
4672 ori r0,r0,lo16(Choke) ; System abend
4676 ; This is the common code we execute when we are finished setting up the PTE.
4681 hpfFinish: sub r4,r19,r27 ; Get offset of PTE
4682 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
4683 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
4684 stw r4,mpPte(r31) ; Remember our PTE
4686 hpfBailOut: eieio ; Make sure all updates come first
4687 stw r17,0(r20) ; Unlock and set the final PCA
4690 ; This is where we go if we have started processing the fault, but find that someone
4691 ; else has taken care of it.
4694 hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
4695 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
4696 sth r2,mpFlags+2(r31) ; Set it
4698 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4699 bl sxlkUnlock ; Unlock the search list
4701 li r11,T_IN_VAIN ; Say that it was handled
4702 b EXT(PFSExit) ; Leave...
4705 ; This is where we go when we find that someone else
4706 ; is in the process of handling the fault.
4709 hpfAbandon: li r3,lgKillResv ; Kill off any reservation
4710 stwcx. r3,0,r3 ; Do it
4712 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4713 bl sxlkUnlock ; Unlock the search list
4715 li r11,T_IN_VAIN ; Say that it was handled
4716 b EXT(PFSExit) ; Leave...
4719 ; Guest shadow assist -- page fault handler
4721 ; Here we handle a fault in a guest pmap that has the guest shadow mapping
4722 ; assist active. We locate the VMM pmap extension block, which contains an
4723 ; index over the discontiguous multi-page shadow hash table. The index
4724 ; corresponding to our vaddr is selected, and the selected group within
4725 ; that page is searched for a valid and active entry that contains
4726 ; our vaddr and space id. The search is pipelined, so that we may fetch
4727 ; the next slot while examining the current slot for a hit. The final
4728 ; search iteration is unrolled so that we don't fetch beyond the end of
4729 ; our group, which could have dire consequences depending upon where the
4730 ; physical hash page is located.
4732 ; The VMM pmap extension block occupies a page. Begining at offset 0, we
4733 ; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary
4734 ; after the pmap_vmm_ext is the hash table physical address index, a
4735 ; linear list of 64-bit physical addresses of the pages that comprise
4738 ; In the event that we succesfully locate a guest mapping, we re-join
4739 ; the page fault path at hpfGVfound with the mapping's address in r31;
4740 ; otherwise, we re-join at hpfNotFound. In either case, we re-join holding
4741 ; a share of the pmap search lock for the host pmap with the host pmap's
4742 ; address in r28, the guest pmap's space id in r21, and the guest pmap's
4748 bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine
4750 lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr
4751 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4752 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4753 lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr
4754 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4755 rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr
4756 lwz r6,vxsGpf(r11) ; Get guest fault count
4758 srwi r3,r10,12 ; Form shadow hash:
4759 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4760 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4761 ; Form index offset from hash page number
4762 add r31,r31,r4 ; r31 <- hash page index entry
4763 lwz r31,4(r31) ; r31 <- hash page paddr
4764 rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK
4765 ; r31 <- hash group paddr
4767 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4768 bl sxlkShared ; Go get a shared lock on the mapping lists
4769 mr. r3,r3 ; Did we get the lock?
4770 bne- hpfBadLock ; Nope...
4772 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4773 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4774 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
4775 addi r6,r6,1 ; Increment guest fault count
4776 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4777 mtctr r0 ; in this group
4778 stw r6,vxsGpf(r11) ; Update guest fault count
4783 mr r6,r3 ; r6 <- current mapping slot's flags
4784 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4785 mr r7,r4 ; r7 <- current mapping slot's space ID
4786 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4787 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4788 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
4789 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
4790 xor r7,r7,r21 ; Compare space ID
4791 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4792 xor r8,r8,r10 ; Compare virtual address
4793 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4794 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4796 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4797 bdnz hpfGVlp32 ; Iterate
4799 clrrwi r5,r5,12 ; Remove flags from virtual address
4800 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4801 xor r4,r4,r21 ; Compare space ID
4802 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4803 xor r5,r5,r10 ; Compare virtual address
4804 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4805 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4811 ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr
4812 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4813 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4814 ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr
4815 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4816 rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr
4817 rldimi r10,r29,32,0 ; cleaning up low-order 12 bits
4818 lwz r6,vxsGpf(r11) ; Get guest fault count
4820 srwi r3,r10,12 ; Form shadow hash:
4821 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4822 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4823 ; Form index offset from hash page number
4824 add r31,r31,r4 ; r31 <- hash page index entry
4825 ld r31,0(r31) ; r31 <- hash page paddr
4826 insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
4827 ; r31 <- hash group paddr
4829 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4830 bl sxlkShared ; Go get a shared lock on the mapping lists
4831 mr. r3,r3 ; Did we get the lock?
4832 bne-- hpfBadLock ; Nope...
4834 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4835 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4836 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
4837 addi r6,r6,1 ; Increment guest fault count
4838 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4839 mtctr r0 ; in this group
4840 stw r6,vxsGpf(r11) ; Update guest fault count
4845 mr r6,r3 ; r6 <- current mapping slot's flags
4846 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4847 mr r7,r4 ; r7 <- current mapping slot's space ID
4848 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4849 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4850 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
4851 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4852 xor r7,r7,r21 ; Compare space ID
4853 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4854 xor r8,r8,r10 ; Compare virtual address
4855 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4856 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4858 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4859 bdnz hpfGVlp64 ; Iterate
4861 clrrdi r5,r5,12 ; Remove flags from virtual address
4862 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4863 xor r4,r4,r21 ; Compare space ID
4864 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4865 xor r5,r5,r10 ; Compare virtual address
4866 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4867 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4870 lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count
4871 addi r6,r6,1 ; Increment miss count
4872 stw r6,vxsGpfMiss(r11) ; Update guest fault miss count
4876 * hw_set_user_space(pmap)
4877 * hw_set_user_space_dis(pmap)
4879 * Indicate whether memory space needs to be switched.
4880 * We really need to turn off interrupts here, because we need to be non-preemptable
4882 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
4883 * register usage here. The VMM switch code in vmachmon.s that calls this
4884 * know what registers are in use. Check that if these change.
4890 .globl EXT(hw_set_user_space)
4892 LEXT(hw_set_user_space)
4894 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
4895 mfmsr r10 ; Get the current MSR
4896 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
4897 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
4898 andc r10,r10,r8 ; Turn off VEC, FP for good
4899 andc r9,r10,r9 ; Turn off EE also
4900 mtmsr r9 ; Disable them
4901 isync ; Make sure FP and vec are off
4902 mfsprg r6,1 ; Get the current activation
4903 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4904 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4905 mfsprg r4,2 ; The the feature flags
4906 lwz r7,pmapvr(r3) ; Get the v to r translation
4907 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4908 mtcrf 0x80,r4 ; Get the Altivec flag
4909 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4910 cmplw cr1,r3,r2 ; Same address space as before?
4911 stw r7,ppUserPmap(r6) ; Show our real pmap address
4912 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4913 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4914 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4915 mtmsr r10 ; Restore interruptions
4916 beqlr-- cr1 ; Leave if the same address space or not Altivec
4918 dssall ; Need to kill all data streams if adrsp changed
4923 .globl EXT(hw_set_user_space_dis)
4925 LEXT(hw_set_user_space_dis)
4927 lwz r7,pmapvr(r3) ; Get the v to r translation
4928 mfsprg r4,2 ; The the feature flags
4929 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4930 mfsprg r6,1 ; Get the current activation
4931 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4932 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4933 mtcrf 0x80,r4 ; Get the Altivec flag
4934 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4935 cmplw cr1,r3,r2 ; Same address space as before?
4936 stw r7,ppUserPmap(r6) ; Show our real pmap address
4937 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4938 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4939 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4940 beqlr-- cr1 ; Leave if the same
4942 dssall ; Need to kill all data streams if adrsp changed
4946 /* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4948 * Lock must already be held on mapping block list
4949 * returns 0 if all slots filled.
4950 * returns n if a slot is found and it is not the last
4951 * returns -n if a slot is found and it is the last
4952 * when n and -n are returned, the corresponding bit is cleared
4953 * the mapping is zeroed out before return
4961 lwz r4,mbfree(r3) ; Get the 1st mask
4962 lis r0,0x8000 ; Get the mask to clear the first free bit
4963 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4964 mr r12,r3 ; Save the block ptr
4965 cntlzw r3,r4 ; Get first 1-bit in 1st word
4966 srw. r9,r0,r3 ; Get bit corresponding to first free one
4967 cntlzw r10,r5 ; Get first free field in second word
4968 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4969 bne mapalc1f ; Found one in 1st word
4971 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4972 li r3,0 ; assume failure return
4973 andc r5,r5,r9 ; Turn it off
4974 beqlr-- ; There are no 1 bits left...
4975 addi r3,r10,32 ; set the correct number
4978 or. r0,r4,r5 ; any more bits set?
4979 stw r4,mbfree(r12) ; update bitmasks
4980 stw r5,mbfree+4(r12)
4982 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4984 dcbz r6,r12 ; clear the 64-byte mapping
4987 bnelr++ ; return if another bit remains set
4989 neg r3,r3 ; indicate we just returned the last bit
4993 /* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4995 * Lock must already be held on mapping block list
4996 * returns 0 if all slots filled.
4997 * returns n if a slot is found and it is not the last
4998 * returns -n if a slot is found and it is the last
4999 * when n and -n are returned, the corresponding bits are cleared
5000 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
5001 * the mapping is zero'd out before return
5007 lwz r4,mbfree(r3) ; Get the first mask
5008 lis r0,0x8000 ; Get the mask to clear the first free bit
5009 lwz r5,mbfree+4(r3) ; Get the second mask
5010 mr r12,r3 ; Save the block ptr
5011 slwi r6,r4,1 ; shift first word over
5012 and r6,r4,r6 ; lite start of double bit runs in 1st word
5013 slwi r7,r5,1 ; shift 2nd word over
5014 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
5015 and r7,r5,r7 ; lite start of double bit runs in 2nd word
5016 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
5017 cntlzw r10,r7 ; Get first free field in second word
5018 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
5019 andc r4,r4,r9 ; Turn off 1st bit in 1st word
5020 andc r4,r4,r11 ; turn off 2nd bit in 1st word
5021 bne mapalc2a ; Found two consecutive free bits in 1st word
5023 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
5024 li r3,0 ; assume failure
5025 srwi r11,r9,1 ; get mask for 2nd bit
5026 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
5027 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
5028 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
5029 addi r3,r10,32 ; set the correct number
5032 or. r0,r4,r5 ; any more bits set?
5033 stw r4,mbfree(r12) ; update bitmasks
5034 stw r5,mbfree+4(r12)
5035 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
5039 dcbz r6,r12 ; zero out the 128-byte mapping
5040 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
5041 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
5044 bnelr++ ; return if another bit remains set
5046 neg r3,r3 ; indicate we just returned the last bit
5050 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
5051 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
5052 beqlr ; no, we failed
5053 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
5054 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
5055 li r3,31 ; get index of this field
5060 ; This routine initialzes the hash table and PCA.
5061 ; It is done here because we may need to be 64-bit to do it.
5065 .globl EXT(hw_hash_init)
5069 mfsprg r10,2 ; Get feature flags
5070 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5071 mtcrf 0x02,r10 ; move pf64Bit to cr6
5072 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5073 lis r4,0xFF01 ; Set all slots free and start steal at end
5074 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5075 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5077 lwz r12,0(r12) ; Get hash table size
5079 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
5081 lwz r11,4(r11) ; Get hash table base
5083 hhiNext32: cmplw r3,r12 ; Have we reached the end?
5084 bge- hhiCPCA32 ; Yes...
5085 dcbz r3,r11 ; Clear the line
5086 addi r3,r3,32 ; Next one...
5087 b hhiNext32 ; Go on...
5089 hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
5090 li r3,-4 ; Displacement to first PCA entry
5091 neg r12,r12 ; Get negative end of PCA
5093 hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
5094 subi r3,r3,4 ; Next slot
5095 cmpw r3,r12 ; Have we finished?
5096 bge+ hhiNPCA32 ; Not yet...
5099 hhiSF: mfmsr r9 ; Save the MSR
5101 mr r0,r9 ; Get a copy of the MSR
5102 ld r11,0(r11) ; Get hash table base
5103 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
5104 mtmsrd r0 ; Turn on SF
5108 hhiNext64: cmpld r3,r12 ; Have we reached the end?
5109 bge-- hhiCPCA64 ; Yes...
5110 dcbz128 r3,r11 ; Clear the line
5111 addi r3,r3,128 ; Next one...
5112 b hhiNext64 ; Go on...
5114 hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
5115 li r3,-4 ; Displacement to first PCA entry
5116 neg r12,r12 ; Get negative end of PCA
5118 hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
5119 subi r3,r3,4 ; Next slot
5120 cmpd r3,r12 ; Have we finished?
5121 bge++ hhiNPCA64 ; Not yet...
5123 mtmsrd r9 ; Turn off SF if it was off
5129 ; This routine sets up the hardware to start translation.
5130 ; Note that we do NOT start translation.
5134 .globl EXT(hw_setup_trans)
5136 LEXT(hw_setup_trans)
5138 mfsprg r11,0 ; Get the per_proc block
5139 mfsprg r12,2 ; Get feature flags
5142 mtcrf 0x02,r12 ; Move pf64Bit to cr6
5143 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
5144 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
5145 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
5146 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
5148 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
5150 li r9,0 ; Clear out a register
5153 mtdbatu 0,r9 ; Invalidate maps
5154 mtdbatl 0,r9 ; Invalidate maps
5155 mtdbatu 1,r9 ; Invalidate maps
5156 mtdbatl 1,r9 ; Invalidate maps
5157 mtdbatu 2,r9 ; Invalidate maps
5158 mtdbatl 2,r9 ; Invalidate maps
5159 mtdbatu 3,r9 ; Invalidate maps
5160 mtdbatl 3,r9 ; Invalidate maps
5162 mtibatu 0,r9 ; Invalidate maps
5163 mtibatl 0,r9 ; Invalidate maps
5164 mtibatu 1,r9 ; Invalidate maps
5165 mtibatl 1,r9 ; Invalidate maps
5166 mtibatu 2,r9 ; Invalidate maps
5167 mtibatl 2,r9 ; Invalidate maps
5168 mtibatu 3,r9 ; Invalidate maps
5169 mtibatl 3,r9 ; Invalidate maps
5171 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5172 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5173 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5174 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5175 lwz r11,4(r11) ; Get hash table base
5176 lwz r12,0(r12) ; Get hash table size
5177 subi r12,r12,1 ; Back off by 1
5178 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
5180 mtsdr1 r11 ; Ok, we now have the hash table set up
5183 li r12,invalSpace ; Get the invalid segment value
5184 li r10,0 ; Start low
5186 hstsetsr: mtsrin r12,r10 ; Set the SR
5187 addis r10,r10,0x1000 ; Bump the segment
5188 mr. r10,r10 ; Are we finished?
5189 bne+ hstsetsr ; Nope...
5197 hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5198 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5199 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5200 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5201 ld r11,0(r11) ; Get hash table base
5202 lwz r12,0(r12) ; Get hash table size
5203 cntlzw r10,r12 ; Get the number of bits
5204 subfic r10,r10,13 ; Get the extra bits we need
5205 or r11,r11,r10 ; Add the size field to SDR1
5207 mtsdr1 r11 ; Ok, we now have the hash table set up
5210 li r0,0 ; Set an SLB slot index of 0
5211 slbia ; Trash all SLB entries (except for entry 0 that is)
5212 slbmfee r7,r0 ; Get the entry that is in SLB index 0
5213 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
5214 slbie r7 ; Invalidate it
5220 ; This routine turns on translation for the first time on a processor
5224 .globl EXT(hw_start_trans)
5226 LEXT(hw_start_trans)
5229 mfmsr r10 ; Get the msr
5230 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
5232 mtmsr r10 ; Everything falls apart here
5240 ; This routine validates a segment register.
5241 ; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
5244 ; r4 = segment[0:31]
5245 ; r5 = segment[32:63]
5249 ; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
5250 ; Note that there is no reason to apply the key modifier here because this is only
5251 ; used for kernel accesses.
5255 .globl EXT(hw_map_seg)
5259 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
5260 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
5261 mfsprg r10,2 ; Get feature flags
5264 ; Note: the following code would problably be easier to follow if I split it,
5265 ; but I just wanted to see if I could write this to work on both 32- and 64-bit
5266 ; machines combined.
5270 ; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
5271 ; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
5273 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
5274 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
5275 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
5276 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
5277 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
5278 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
5279 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
5280 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
5281 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
5282 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
5284 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
5285 ; concatenated together. There is garbage
5286 ; at the top for 64-bit but we will clean
5288 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
5292 ; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
5293 ; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
5297 ; What we have now is:
5300 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5301 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5302 ; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
5303 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5304 ; 0 0 1 2 3 - for 32-bit machines
5308 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5309 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5310 ; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
5311 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5312 ; 0 0 1 2 3 - for 32-bit machines
5316 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5317 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5318 ; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
5319 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5320 ; 0 0 1 2 3 - for 32-bit machines
5324 xor r8,r8,r2 ; Calculate VSID
5326 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
5327 mfsprg r12,0 ; Get the per_proc
5328 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5329 mfmsr r6 ; Get current MSR
5330 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5331 mtmsrd r0,1 ; Set only the EE bit to 0
5332 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5333 mfmsr r11 ; Get the MSR right now, after disabling EE
5334 andc r2,r11,r2 ; Turn off translation now
5335 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5336 or r11,r11,r6 ; Turn on the EE bit if it was on
5337 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5338 isync ; Hang out a bit
5340 ld r6,validSegs(r12) ; Get the valid SLB entry flags
5341 sldi r9,r9,9 ; Position the key and noex bit
5343 rldimi r5,r8,12,0 ; Form the VSID/key
5345 not r3,r6 ; Make valids be 0s
5347 cntlzd r7,r3 ; Find a free SLB
5348 cmplwi r7,63 ; Did we find a free SLB entry?
5350 slbie r4 ; Since this ESID may still be in an SLBE, kill it
5352 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
5353 addi r7,r7,1 ; Make sure we skip slb 0
5354 blt++ hmsFreeSeg ; Yes, go load it...
5357 ; No free SLB entries, select one that is in use and invalidate it
5359 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
5360 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
5361 addi r2,r2,1 ; Set next slot to steal
5362 slbmfee r3,r7 ; Get the entry that is in the selected spot
5363 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
5364 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
5365 srawi r8,r8,31 ; Get -1 if steal index still in range
5366 slbie r3 ; Invalidate the in-use SLB entry
5367 and r2,r2,r8 ; Reset steal index when it should wrap
5370 stw r2,ppSegSteal(r12) ; Set the next slot to steal
5372 ; We are now ready to stick the SLB entry in the SLB and mark it in use
5375 hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
5376 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
5377 srd r0,r0,r2 ; Set bit mask for allocation
5378 rldicl r5,r5,0,15 ; Clean out the unsupported bits
5379 or r6,r6,r0 ; Turn on the allocation flag
5381 slbmte r5,r4 ; Make that SLB entry
5383 std r6,validSegs(r12) ; Mark as valid
5384 mtmsrd r11 ; Restore the MSR
5391 mfsprg r12,1 ; Get the current activation
5392 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5393 rlwinm r8,r8,0,8,31 ; Clean up the VSID
5394 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
5395 lis r0,0x8000 ; Set bit 0
5396 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
5397 srw r0,r0,r2 ; Get bit corresponding to SR
5398 addi r7,r12,validSegs ; Point to the valid segment flags directly
5400 mtsrin r8,r4 ; Set the actual SR
5401 isync ; Need to make sure this is done
5403 hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
5404 or r6,r6,r0 ; Show that SR is valid
5405 stwcx. r6,0,r7 ; Set the valid SR flags
5406 bne-- hmsrupt ; Had an interrupt, need to get flags again...
5412 ; This routine invalidates a segment register.
5416 .globl EXT(hw_blow_seg)
5420 mfsprg r10,2 ; Get feature flags
5421 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5423 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
5425 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
5427 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5428 mfmsr r6 ; Get current MSR
5429 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5430 mtmsrd r0,1 ; Set only the EE bit to 0
5431 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5432 mfmsr r11 ; Get the MSR right now, after disabling EE
5433 andc r2,r11,r2 ; Turn off translation now
5434 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5435 or r11,r11,r6 ; Turn on the EE bit if it was on
5436 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5437 isync ; Hang out a bit
5439 rldimi r9,r3,32,0 ; Insert the top part of the ESID
5441 slbie r9 ; Invalidate the associated SLB entry
5443 mtmsrd r11 ; Restore the MSR
5450 mfsprg r12,1 ; Get the current activation
5451 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5452 addi r7,r12,validSegs ; Point to the valid segment flags directly
5453 lwarx r4,0,r7 ; Get and reserve the valid segment flags
5454 rlwinm r6,r9,4,28,31 ; Convert segment to number
5455 lis r2,0x8000 ; Set up a mask
5456 srw r2,r2,r6 ; Make a mask
5457 and. r0,r4,r2 ; See if this is even valid
5458 li r5,invalSpace ; Set the invalid address space VSID
5459 beqlr ; Leave if already invalid...
5461 mtsrin r5,r9 ; Slam the segment register
5462 isync ; Need to make sure this is done
5464 hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
5465 stwcx. r4,0,r7 ; Set the valid SR flags
5466 beqlr++ ; Stored ok, no interrupt, time to leave...
5468 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
5469 b hbsrupt ; Try again...
5472 ; This routine invadates the entire pmap segment cache
5474 ; Translation is on, interrupts may or may not be enabled.
5478 .globl EXT(invalidateSegs)
5480 LEXT(invalidateSegs)
5482 la r10,pmapCCtl(r3) ; Point to the segment cache control
5483 eqv r2,r2,r2 ; Get all foxes
5485 isInv: lwarx r4,0,r10 ; Get the segment cache control value
5486 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
5487 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5488 bne-- isInv0 ; Yes, try again...
5490 stwcx. r4,0,r10 ; Try to invalidate it
5491 bne-- isInv ; Someone else just stuffed it...
5495 isInv0: li r4,lgKillResv ; Get reservation kill zone
5496 stwcx. r4,0,r4 ; Kill reservation
5498 isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
5499 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5500 bne-- isInv ; Nope...
5501 b isInv1 ; Still locked do it again...
5504 ; This routine switches segment registers between kernel and user.
5505 ; We have some assumptions and rules:
5506 ; We are in the exception vectors
5507 ; pf64Bitb is set up
5508 ; R3 contains the MSR we going to
5509 ; We can not use R4, R13, R20, R21, R29
5510 ; R13 is the savearea
5511 ; R29 has the per_proc
5513 ; We return R3 as 0 if we did not switch between kernel and user
5514 ; We also maintain and apply the user state key modifier used by VMM support;
5515 ; If we go to the kernel it is set to 0, otherwise it follows the bit
5520 .globl EXT(switchSegs)
5524 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
5525 lwz r9,spcFlags(r29) ; Pick up the special user state flags
5526 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
5527 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
5528 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
5529 or r2,r2,r3 ; This will 1 if we will be using user segments
5530 li r3,0 ; Get a selection mask
5531 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
5532 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
5533 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
5534 la r19,ppUserPmap(r29) ; Point to the current user pmap
5536 ; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
5537 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
5539 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
5540 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
5541 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
5542 or r8,r8,r19 ; Get the pointer to the pmap we are using
5544 beqlr ; We are staying in the same mode, do not touch segs...
5546 lwz r28,0(r8) ; Get top half of pmap address
5547 lwz r10,4(r8) ; Get bottom half
5549 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
5550 rlwinm r28,r28,0,1,0 ; Copy top to top
5551 stw r30,ppMapFlags(r29) ; Set the key modifier
5552 rlwimi r28,r10,0,0,31 ; Insert bottom
5554 la r10,pmapCCtl(r28) ; Point to the segment cache control
5555 la r9,pmapSegCache(r28) ; Point to the segment cache
5557 ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
5558 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5559 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
5560 bne-- ssgLock0 ; Yup, this is in use...
5562 stwcx. r16,0,r10 ; Try to set the lock
5563 bne-- ssgLock ; Did we get contention?
5565 not r11,r15 ; Invert the invalids to valids
5566 li r17,0 ; Set a mask for the SRs we are loading
5567 isync ; Make sure we are all caught up
5569 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
5572 slbia ; Trash all SLB entries (except for entry 0 that is)
5573 li r17,1 ; Get SLB index to load (skip slb 0)
5574 oris r0,r0,0x8000 ; Get set for a mask
5575 b ssg64Enter ; Start on a cache line...
5579 ssgLock0: li r15,lgKillResv ; Killing field
5580 stwcx. r15,0,r15 ; Kill reservation
5582 ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
5583 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5584 beq++ ssgLock ; Yup, this is in use...
5585 b ssgLock1 ; Nope, try again...
5587 ; This is the 32-bit address space switch code.
5588 ; We take a reservation on the segment cache and walk through.
5589 ; For each entry, we load the specified entries and remember which
5590 ; we did with a mask. Then, we figure out which segments should be
5591 ; invalid and then see which actually are. Then we load those with the
5592 ; defined invalid VSID.
5593 ; Afterwards, we unlock the segment cache.
5598 ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
5599 cmplwi r12,pmapSegCacheUse ; See if we are done
5600 slwi r14,r12,4 ; Index to the cache slot
5601 lis r0,0x8000 ; Get set for a mask
5602 add r14,r14,r9 ; Point to the entry
5604 bge- ssg32Done ; All done...
5606 lwz r5,sgcESID+4(r14) ; Get the ESID part
5607 srw r2,r0,r12 ; Form a mask for the one we are loading
5608 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
5610 andc r11,r11,r2 ; Clear the bit
5611 lwz r6,sgcVSID(r14) ; And get the VSID top
5613 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
5615 xor r7,r7,r30 ; Modify the key before we actually set it
5616 srw r0,r0,r2 ; Get a mask for the SR we are loading
5617 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
5618 or r17,r17,r0 ; Remember the segment
5619 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
5620 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
5622 mtsrin r8,r5 ; Load the segment
5623 b ssg32Enter ; Go enter the next...
5627 ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
5628 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5630 lis r0,0x8000 ; Get set for a mask
5631 li r2,invalSpace ; Set the invalid address space VSID
5635 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
5638 ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
5639 cmplwi r18,16 ; Have we finished?
5640 srw r22,r0,r18 ; Get the mask bit
5641 rlwinm r23,r18,28,0,3 ; Get the segment register we need
5642 andc r16,r16,r22 ; Get rid of the guy we just did
5643 bge ssg32Really ; Yes, we are really done now...
5645 mtsrin r2,r23 ; Invalidate the SR
5646 b ssg32Inval ; Do the next...
5651 stw r17,validSegs(r29) ; Set the valid SR flags
5652 li r3,1 ; Set kernel/user transition
5656 ; This is the 64-bit address space switch code.
5657 ; First we blow away all of the SLB entries.
5659 ; loading the SLB. Afterwards, we release the cache lock
5661 ; Note that because we have to treat SLBE 0 specially, we do not ever use it...
5662 ; Its a performance thing...
5667 ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
5668 cmplwi r12,pmapSegCacheUse ; See if we are done
5669 slwi r14,r12,4 ; Index to the cache slot
5670 srw r16,r0,r12 ; Form a mask for the one we are loading
5671 add r14,r14,r9 ; Point to the entry
5672 andc r11,r11,r16 ; Clear the bit
5673 bge-- ssg64Done ; All done...
5675 ld r5,sgcESID(r14) ; Get the ESID part
5676 ld r6,sgcVSID(r14) ; And get the VSID part
5677 oris r5,r5,0x0800 ; Turn on the valid bit
5678 or r5,r5,r17 ; Insert the SLB slot
5679 xor r6,r6,r30 ; Modify the key before we actually set it
5680 addi r17,r17,1 ; Bump to the next slot
5681 slbmte r6,r5 ; Make that SLB entry
5682 b ssg64Enter ; Go enter the next...
5686 ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5688 eqv r16,r16,r16 ; Load up with all foxes
5689 subfic r17,r17,64 ; Get the number of 1 bits we need
5691 sld r16,r16,r17 ; Get a mask for the used SLB entries
5692 li r3,1 ; Set kernel/user transition
5693 std r16,validSegs(r29) ; Set the valid SR flags
5697 ; mapSetUp - this function sets initial state for all mapping functions.
5698 ; We turn off all translations (physical), disable interruptions, and
5699 ; enter 64-bit mode if applicable.
5701 ; We also return the original MSR in r11, the feature flags in R12,
5702 ; and CR6 set up so we can do easy branches for 64-bit
5703 ; hw_clear_maps assumes r10, r9 will not be trashed.
5707 .globl EXT(mapSetUp)
5711 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
5712 mfsprg r12,2 ; Get feature flags
5713 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
5714 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5715 mfmsr r11 ; Save the MSR
5716 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5717 andc r11,r11,r0 ; Clear VEC and FP for good
5718 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
5719 li r2,1 ; Prepare for 64 bit
5720 andc r0,r11,r0 ; Clear the rest
5721 bt pfNoMSRirb,msuNoMSR ; No MSR...
5722 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
5724 mtmsr r0 ; Translation and all off
5725 isync ; Toss prefetch
5730 msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
5731 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
5737 msuNoMSR: mr r2,r3 ; Save R3 across call
5738 mr r3,r0 ; Get the new MSR value
5739 li r0,loadMSR ; Get the MSR setter SC
5741 mr r3,r2 ; Restore R3
5742 blr ; Go back all set up...
5746 ; Guest shadow assist -- remove all guest mappings
5748 ; Remove all mappings for a guest pmap from the shadow hash table.
5751 ; r3 : address of pmap, 32-bit kernel virtual address
5753 ; Non-volatile register usage:
5754 ; r24 : host pmap's physical address
5755 ; r25 : VMM extension block's physical address
5756 ; r26 : physent address
5757 ; r27 : guest pmap's space ID number
5758 ; r28 : current hash table page index
5759 ; r29 : guest pmap's physical address
5760 ; r30 : saved msr image
5761 ; r31 : current mapping
5764 .globl EXT(hw_rem_all_gv)
5768 #define graStackSize ((31-24+1)*4)+4
5769 stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1)
5770 ; Mint a new stack frame
5771 mflr r0 ; Get caller's return address
5772 mfsprg r11,2 ; Get feature flags
5773 mtcrf 0x02,r11 ; Insert feature flags into cr6
5774 stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5775 ; Save caller's return address
5776 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5777 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5778 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5779 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5780 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5781 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5782 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5783 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5785 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5787 bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine
5788 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5789 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5790 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5791 b graStart ; Get to it
5792 gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5793 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5794 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5795 graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5796 xor r29,r3,r9 ; Convert pmap_t virt->real
5797 mr r30,r11 ; Save caller's msr image
5799 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5800 bl sxlkExclusive ; Get lock exclusive
5802 lwz r3,vxsGra(r25) ; Get remove all count
5803 addi r3,r3,1 ; Increment remove all count
5804 stw r3,vxsGra(r25) ; Update remove all count
5806 li r28,0 ; r28 <- first hash page table index to search
5807 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5809 la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
5810 rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK
5811 ; Convert page index into page physical index offset
5812 add r31,r31,r11 ; Calculate page physical index entry address
5813 bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit
5814 lwz r31,4(r31) ; r31 <- first slot in hash table page to examine
5815 b graLoop ; Examine all slots in this page
5816 gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine
5817 b graLoop ; Examine all slots in this page
5820 graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags
5821 lhz r4,mpSpace(r31) ; Get mapping's space ID number
5822 rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag
5823 xor r4,r4,r27 ; Compare space ID number
5824 or. r0,r6,r4 ; cr0_eq <- !free && space id match
5825 bne graMiss ; Not one of ours, skip it
5827 lwz r11,vxsGraHits(r25) ; Get remove hit count
5828 addi r11,r11,1 ; Increment remove hit count
5829 stw r11,vxsGraHits(r25) ; Update remove hit count
5831 rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant?
5832 bne graRemPhys ; Yes, nothing to disconnect
5834 lwz r11,vxsGraActive(r25) ; Get remove active count
5835 addi r11,r11,1 ; Increment remove hit count
5836 stw r11,vxsGraActive(r25) ; Update remove hit count
5838 bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately
5839 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
5840 ; r31 <- mapping's physical address
5841 ; r3 -> PTE slot physical address
5842 ; r4 -> High-order 32 bits of PTE
5843 ; r5 -> Low-order 32 bits of PTE
5845 ; r7 -> PCA physical address
5846 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
5847 b graFreePTE ; Join 64-bit path to release the PTE
5848 graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
5849 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
5850 graFreePTE: mr. r3,r3 ; Was there a valid PTE?
5851 beq- graRemPhys ; No valid PTE, we're almost done
5852 lis r0,0x8000 ; Prepare free bit for this slot
5853 srw r0,r0,r2 ; Position free bit
5854 or r6,r6,r0 ; Set it in our PCA image
5855 lwz r8,mpPte(r31) ; Get PTE pointer
5856 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
5857 stw r8,mpPte(r31) ; Save invalidated PTE pointer
5858 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
5859 stw r6,0(r7) ; Update PCA and unlock the PTEG
5862 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
5863 bl mapFindLockPN ; Find 'n' lock this page's physent
5864 mr. r26,r3 ; Got lock on our physent?
5865 beq-- graBadPLock ; No, time to bail out
5867 crset cr1_eq ; cr1_eq <- previous link is the anchor
5868 bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine
5869 la r11,ppLink+4(r26) ; Point to chain anchor
5870 lwz r9,ppLink+4(r26) ; Get chain anchor
5871 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
5873 graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
5874 cmplw r9,r31 ; Is this the mapping to remove?
5875 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
5876 bne graRemNext ; No, chain onward
5877 bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor
5878 stw r8,0(r11) ; Unchain gpv->phys mapping
5879 b graRemoved ; Exit loop
5881 lwarx r0,0,r11 ; Get previous link
5882 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
5883 stwcx. r0,0,r11 ; Update previous link
5884 bne- graRemRetry ; Lost reservation, retry
5885 b graRemoved ; Good work, let's get outta here
5887 graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
5888 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5889 mr. r9,r8 ; Does next entry exist?
5890 b graRemLoop ; Carry on
5893 li r7,ppLFAmask ; Get mask to clean up mapping pointer
5894 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
5895 la r11,ppLink(r26) ; Point to chain anchor
5896 ld r9,ppLink(r26) ; Get chain anchor
5897 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
5898 graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
5899 cmpld r9,r31 ; Is this the mapping to remove?
5900 ld r8,mpAlias(r9) ; Get forward chain pinter
5901 bne graRem64Nxt ; Not mapping to remove, chain on, dude
5902 bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor
5903 std r8,0(r11) ; Unchain gpv->phys mapping
5904 b graRemoved ; Exit loop
5905 graRem64Rt: ldarx r0,0,r11 ; Get previous link
5906 and r0,r0,r7 ; Get flags
5907 or r0,r0,r8 ; Insert new forward pointer
5908 stdcx. r0,0,r11 ; Slam it back in
5909 bne-- graRem64Rt ; Lost reservation, retry
5910 b graRemoved ; Good work, let's go home
5913 la r11,mpAlias(r9) ; Point to (soon to be) previous link
5914 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5915 mr. r9,r8 ; Does next entry exist?
5916 b graRem64Lp ; Carry on
5919 mr r3,r26 ; r3 <- physent's address
5920 bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
5922 lwz r3,mpFlags(r31) ; Get mapping's flags
5923 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
5924 ori r3,r3,mpgFree ; Mark mapping free
5925 stw r3,mpFlags(r31) ; Update flags
5927 graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping
5928 rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
5929 bne graLoop ; No, examine next slot
5930 addi r28,r28,1 ; Increment hash table page index
5931 cmplwi r28,GV_HPAGES ; End of hash table?
5932 bne graPgLoop ; Examine next hash table page
5934 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5935 bl sxlkUnlock ; Release host pmap's search lock
5937 bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately
5938 mtmsr r30 ; Restore 'rupts, translation
5939 isync ; Throw a small wrench into the pipeline
5940 b graPopFrame ; Nothing to do now but pop a frame and return
5941 graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
5943 lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5944 ; Get caller's return address
5945 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
5946 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
5947 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
5948 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
5949 mtlr r0 ; Prepare return address
5950 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
5951 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
5952 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
5953 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
5954 lwz r1,0(r1) ; Pop stack frame
5955 blr ; Return to caller
5959 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
5960 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
5961 li r3,failMapping ; The BOMB, Dmitri.
5962 sc ; The hydrogen bomb.
5966 ; Guest shadow assist -- remove local guest mappings
5968 ; Remove local mappings for a guest pmap from the shadow hash table.
5971 ; r3 : address of guest pmap, 32-bit kernel virtual address
5973 ; Non-volatile register usage:
5974 ; r20 : current active map word's physical address
5975 ; r21 : current hash table page address
5976 ; r22 : updated active map word in process
5977 ; r23 : active map word in process
5978 ; r24 : host pmap's physical address
5979 ; r25 : VMM extension block's physical address
5980 ; r26 : physent address
5981 ; r27 : guest pmap's space ID number
5982 ; r28 : current active map index
5983 ; r29 : guest pmap's physical address
5984 ; r30 : saved msr image
5985 ; r31 : current mapping
5988 .globl EXT(hw_rem_local_gv)
5990 LEXT(hw_rem_local_gv)
5992 #define grlStackSize ((31-20+1)*4)+4
5993 stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1)
5994 ; Mint a new stack frame
5995 mflr r0 ; Get caller's return address
5996 mfsprg r11,2 ; Get feature flags
5997 mtcrf 0x02,r11 ; Insert feature flags into cr6
5998 stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5999 ; Save caller's return address
6000 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6001 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6002 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6003 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6004 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6005 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6006 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6007 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6008 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6009 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
6010 stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21
6011 stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20
6013 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6015 bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine
6016 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
6017 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
6018 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
6019 b grlStart ; Get to it
6020 grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
6021 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
6022 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
6024 grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
6025 xor r29,r3,r9 ; Convert pmap_t virt->real
6026 mr r30,r11 ; Save caller's msr image
6028 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
6029 bl sxlkExclusive ; Get lock exclusive
6031 li r28,0 ; r28 <- index of first active map word to search
6032 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
6033 b grlMap1st ; Examine first map word
6036 grlNextMap: stw r22,0(r21) ; Save updated map word
6037 addi r28,r28,1 ; Increment map word index
6038 cmplwi r28,GV_MAP_WORDS ; See if we're done
6039 beq grlDone ; Yup, let's get outta here
6041 grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
6042 rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK
6043 ; Convert map index into map index offset
6044 add r20,r20,r11 ; Calculate map array element address
6045 lwz r22,0(r20) ; Get active map word at index
6046 mr. r23,r22 ; Any active mappings indicated?
6047 beq grlNextMap ; Nope, check next word
6049 la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
6050 rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK
6051 ; Extract page index from map word index and convert
6052 ; into page physical index offset
6053 add r21,r21,r11 ; Calculate page physical index entry address
6054 bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit
6055 lwz r21,4(r21) ; Get selected hash table page's address
6056 b grlLoop ; Examine all slots in this page
6057 grl64Page: ld r21,0(r21) ; Get selected hash table page's address
6058 b grlLoop ; Examine all slots in this page
6061 grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word
6062 cmplwi r11,32 ; Any active mappings left in this word?
6063 lis r12,0x8000 ; Prepare mask to reset bit
6064 srw r12,r12,r11 ; Position mask bit
6065 andc r23,r23,r12 ; Reset lit bit
6066 beq grlNextMap ; No bits lit, examine next map word
6068 slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number
6069 rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK
6070 ; Extract slot band number from index and insert
6071 add r31,r31,r21 ; Add hash page address yielding mapping slot address
6073 lwz r3,mpFlags(r31) ; Get mapping's flags
6074 lhz r4,mpSpace(r31) ; Get mapping's space ID number
6075 rlwinm r5,r3,0,mpgGlobal ; Extract global bit
6076 xor r4,r4,r27 ; Compare space ID number
6077 or. r4,r4,r5 ; (space id miss || global)
6078 bne grlLoop ; Not one of ours, skip it
6079 andc r22,r22,r12 ; Reset active bit corresponding to this mapping
6080 ori r3,r3,mpgDormant ; Mark entry dormant
6081 stw r3,mpFlags(r31) ; Update mapping's flags
6083 bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately
6084 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6085 ; r31 <- mapping's physical address
6086 ; r3 -> PTE slot physical address
6087 ; r4 -> High-order 32 bits of PTE
6088 ; r5 -> Low-order 32 bits of PTE
6090 ; r7 -> PCA physical address
6091 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6092 b grlFreePTE ; Join 64-bit path to release the PTE
6093 grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6094 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6095 grlFreePTE: mr. r3,r3 ; Was there a valid PTE?
6096 beq- grlLoop ; No valid PTE, we're done with this mapping
6097 lis r0,0x8000 ; Prepare free bit for this slot
6098 srw r0,r0,r2 ; Position free bit
6099 or r6,r6,r0 ; Set it in our PCA image
6100 lwz r8,mpPte(r31) ; Get PTE pointer
6101 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6102 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6103 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
6104 stw r6,0(r7) ; Update PCA and unlock the PTEG
6105 b grlLoop ; On to next active mapping in this map word
6107 grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
6108 bl sxlkUnlock ; Release host pmap's search lock
6110 bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately
6111 mtmsr r30 ; Restore 'rupts, translation
6112 isync ; Throw a small wrench into the pipeline
6113 b grlPopFrame ; Nothing to do now but pop a frame and return
6114 grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
6116 lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6117 ; Get caller's return address
6118 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6119 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6120 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6121 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6122 mtlr r0 ; Prepare return address
6123 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6124 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6125 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6126 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6127 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6128 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6129 lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21
6130 lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20
6131 lwz r1,0(r1) ; Pop stack frame
6132 blr ; Return to caller
6136 ; Guest shadow assist -- resume a guest mapping
6138 ; Locates the specified dormant mapping, and if it exists validates it and makes it
6142 ; r3 : address of host pmap, 32-bit kernel virtual address
6143 ; r4 : address of guest pmap, 32-bit kernel virtual address
6144 ; r5 : host virtual address, high-order 32 bits
6145 ; r6 : host virtual address, low-order 32 bits
6146 ; r7 : guest virtual address, high-order 32 bits
6147 ; r8 : guest virtual address, low-order 32 bits
6148 ; r9 : guest mapping protection code
6150 ; Non-volatile register usage:
6151 ; r23 : VMM extension block's physical address
6152 ; r24 : physent physical address
6153 ; r25 : caller's msr image from mapSetUp
6154 ; r26 : guest mapping protection code
6155 ; r27 : host pmap physical address
6156 ; r28 : guest pmap physical address
6157 ; r29 : host virtual address
6158 ; r30 : guest virtual address
6159 ; r31 : gva->phys mapping's physical address
6162 .globl EXT(hw_res_map_gv)
6166 #define grsStackSize ((31-23+1)*4)+4
6168 stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1)
6169 ; Mint a new stack frame
6170 mflr r0 ; Get caller's return address
6171 mfsprg r11,2 ; Get feature flags
6172 mtcrf 0x02,r11 ; Insert feature flags into cr6
6173 stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6174 ; Save caller's return address
6175 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6176 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6177 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6178 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6179 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6180 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6181 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6182 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6183 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6185 rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
6186 rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6187 mr r26,r9 ; Copy guest mapping protection code
6189 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6190 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6191 bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately
6192 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6193 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6194 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6195 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6196 srwi r11,r30,12 ; Form shadow hash:
6197 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6198 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6199 ; Form index offset from hash page number
6200 add r31,r31,r10 ; r31 <- hash page index entry
6201 lwz r31,4(r31) ; r31 <- hash page paddr
6202 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6203 ; r31 <- hash group paddr
6204 b grsStart ; Get to it
6206 grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
6207 rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6208 ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6209 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6210 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6211 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6212 srwi r11,r30,12 ; Form shadow hash:
6213 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6214 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6215 ; Form index offset from hash page number
6216 add r31,r31,r10 ; r31 <- hash page index entry
6217 ld r31,0(r31) ; r31 <- hash page paddr
6218 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6219 ; r31 <- hash group paddr
6221 grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6222 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6223 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6224 mr r25,r11 ; Save caller's msr image
6226 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6227 bl sxlkExclusive ; Get lock exclusive
6229 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6230 mtctr r0 ; in this group
6231 bt++ pf64Bitb,grs64Search ; Test for 64-bit machine
6233 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6234 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6235 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6236 b grs32SrchLp ; Let the search begin!
6240 mr r6,r3 ; r6 <- current mapping slot's flags
6241 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6242 mr r7,r4 ; r7 <- current mapping slot's space ID
6243 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6244 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6245 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6246 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6247 xor r7,r7,r9 ; Compare space ID
6248 or r0,r11,r7 ; r0 <- !(!free && space match)
6249 xor r8,r8,r30 ; Compare virtual address
6250 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6251 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6253 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6254 bdnz grs32SrchLp ; Iterate
6256 mr r6,r3 ; r6 <- current mapping slot's flags
6257 clrrwi r5,r5,12 ; Remove flags from virtual address
6258 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6259 xor r4,r4,r9 ; Compare space ID
6260 or r0,r11,r4 ; r0 <- !(!free && space match)
6261 xor r5,r5,r30 ; Compare virtual address
6262 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6263 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6264 b grsSrchMiss ; No joy in our hash group
6267 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6268 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6269 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6270 b grs64SrchLp ; Let the search begin!
6274 mr r6,r3 ; r6 <- current mapping slot's flags
6275 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6276 mr r7,r4 ; r7 <- current mapping slot's space ID
6277 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6278 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6279 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6280 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6281 xor r7,r7,r9 ; Compare space ID
6282 or r0,r11,r7 ; r0 <- !(!free && space match)
6283 xor r8,r8,r30 ; Compare virtual address
6284 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6285 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6287 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6288 bdnz grs64SrchLp ; Iterate
6290 mr r6,r3 ; r6 <- current mapping slot's flags
6291 clrrdi r5,r5,12 ; Remove flags from virtual address
6292 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6293 xor r4,r4,r9 ; Compare space ID
6294 or r0,r11,r4 ; r0 <- !(!free && space match)
6295 xor r5,r5,r30 ; Compare virtual address
6296 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6297 bne grsSrchMiss ; No joy in our hash group
6300 rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant?
6301 bne grsFindHost ; Yes, nothing to disconnect
6303 bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately
6304 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6305 ; r31 <- mapping's physical address
6306 ; r3 -> PTE slot physical address
6307 ; r4 -> High-order 32 bits of PTE
6308 ; r5 -> Low-order 32 bits of PTE
6310 ; r7 -> PCA physical address
6311 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6312 b grsFreePTE ; Join 64-bit path to release the PTE
6313 grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6314 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6315 grsFreePTE: mr. r3,r3 ; Was there a valid PTE?
6316 beq- grsFindHost ; No valid PTE, we're almost done
6317 lis r0,0x8000 ; Prepare free bit for this slot
6318 srw r0,r0,r2 ; Position free bit
6319 or r6,r6,r0 ; Set it in our PCA image
6320 lwz r8,mpPte(r31) ; Get PTE pointer
6321 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6322 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6323 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6324 stw r6,0(r7) ; Update PCA and unlock the PTEG
6328 // We now have a dormant guest mapping that matches our space id and virtual address. Our next
6329 // step is to locate the host mapping that completes the guest mapping's connection to a physical
6330 // frame. The guest and host mappings must connect to the same physical frame, so they must both
6331 // be chained on the same physent. We search the physent chain for a host mapping matching our
6332 // host's space id and the host virtual address. If we succeed, we know that the entire chain
6333 // of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
6334 // resumed. If we fail to find the specified host virtual->physical mapping, it is because the
6335 // host virtual or physical address has changed since the guest mapping was suspended, so it
6336 // is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell
6337 // our caller that it will have to take its long path, translating the host virtual address
6338 // through the host's skiplist and installing a new guest mapping.
6340 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6341 bl mapFindLockPN ; Find 'n' lock this page's physent
6342 mr. r24,r3 ; Got lock on our physent?
6343 beq-- grsBadPLock ; No, time to bail out
6345 bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
6347 lwz r9,ppLink+4(r24) ; Get first mapping on physent
6348 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
6349 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
6350 grsPELoop: mr. r12,r9 ; Got a mapping to look at?
6351 beq- grsPEMiss ; Nope, we've missed hva->phys mapping
6352 lwz r7,mpFlags(r12) ; Get mapping's flags
6353 lhz r4,mpSpace(r12) ; Get mapping's space id number
6354 lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address
6355 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
6357 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6358 rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags
6359 xori r0,r0,mpNormal ; Normal mapping?
6360 xor r4,r4,r6 ; Compare w/ host space id number
6361 xor r5,r5,r29 ; Compare w/ host virtual address
6362 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6363 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6365 b grsPELoop ; Iterate
6367 grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
6368 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6369 ld r9,ppLink(r24) ; Get first mapping on physent
6370 lwz r6,pmapSpace(r27) ; Get pmap's space id number
6371 andc r9,r9,r0 ; Cleanup mapping pointer
6372 grsPELp64: mr. r12,r9 ; Got a mapping to look at?
6373 beq-- grsPEMiss ; Nope, we've missed hva->phys mapping
6374 lwz r7,mpFlags(r12) ; Get mapping's flags
6375 lhz r4,mpSpace(r12) ; Get mapping's space id number
6376 ld r5,mpVAddr(r12) ; Get mapping's virtual address
6377 ld r9,mpAlias(r12) ; Next mapping physent alias chain
6378 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6379 rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
6380 xori r0,r0,mpNormal ; Normal mapping?
6381 xor r4,r4,r6 ; Compare w/ host space id number
6382 xor r5,r5,r29 ; Compare w/ host virtual address
6383 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6384 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6386 b grsPELp64 ; Iterate
6388 grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits
6389 rlwimi r0,r26,0,mpPP ; Insert new protection bits
6390 stw r0,mpVAddr+4(r31) ; Write 'em back
6392 eieio ; Ensure previous mapping updates are visible
6393 lwz r0,mpFlags(r31) ; Get flags
6394 rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag
6395 stw r0,mpFlags(r31) ; Set updated flags, entry is now valid
6397 li r31,mapRtOK ; Indicate success
6398 b grsRelPhy ; Exit through physent lock release
6400 grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor
6401 bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine
6402 la r11,ppLink+4(r24) ; Point to chain anchor
6403 lwz r9,ppLink+4(r24) ; Get chain anchor
6404 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6405 grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
6406 cmplw r9,r31 ; Is this the mapping to remove?
6407 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6408 bne grsRemNext ; No, chain onward
6409 bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor
6410 stw r8,0(r11) ; Unchain gpv->phys mapping
6411 b grsDelete ; Finish deleting mapping
6413 lwarx r0,0,r11 ; Get previous link
6414 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6415 stwcx. r0,0,r11 ; Update previous link
6416 bne- grsRemRetry ; Lost reservation, retry
6417 b grsDelete ; Finish deleting mapping
6420 grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6421 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6422 mr. r9,r8 ; Does next entry exist?
6423 b grsRemLoop ; Carry on
6426 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6427 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6428 la r11,ppLink(r24) ; Point to chain anchor
6429 ld r9,ppLink(r24) ; Get chain anchor
6430 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6431 grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
6432 cmpld r9,r31 ; Is this the mapping to remove?
6433 ld r8,mpAlias(r9) ; Get forward chain pinter
6434 bne grsRem64Nxt ; Not mapping to remove, chain on, dude
6435 bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor
6436 std r8,0(r11) ; Unchain gpv->phys mapping
6437 b grsDelete ; Finish deleting mapping
6438 grsRem64Rt: ldarx r0,0,r11 ; Get previous link
6439 and r0,r0,r7 ; Get flags
6440 or r0,r0,r8 ; Insert new forward pointer
6441 stdcx. r0,0,r11 ; Slam it back in
6442 bne-- grsRem64Rt ; Lost reservation, retry
6443 b grsDelete ; Finish deleting mapping
6447 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6448 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6449 mr. r9,r8 ; Does next entry exist?
6450 b grsRem64Lp ; Carry on
6453 lwz r3,mpFlags(r31) ; Get mapping's flags
6454 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
6455 ori r3,r3,mpgFree ; Mark mapping free
6456 stw r3,mpFlags(r31) ; Update flags
6458 li r31,mapRtNotFnd ; Didn't succeed
6460 grsRelPhy: mr r3,r24 ; r3 <- physent addr
6461 bl mapPhysUnlock ; Unlock physent chain
6463 grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6464 bl sxlkUnlock ; Release host pmap search lock
6466 grsRtn: mr r3,r31 ; r3 <- result code
6467 bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately
6468 mtmsr r25 ; Restore 'rupts, translation
6469 isync ; Throw a small wrench into the pipeline
6470 b grsPopFrame ; Nothing to do now but pop a frame and return
6471 grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6473 lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6474 ; Get caller's return address
6475 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6476 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6477 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6478 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6479 mtlr r0 ; Prepare return address
6480 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6481 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6482 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6483 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6484 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6485 lwz r1,0(r1) ; Pop stack frame
6486 blr ; Return to caller
6490 li r31,mapRtNotFnd ; Could not locate requested mapping
6491 b grsRelPmap ; Exit through host pmap search lock release
6495 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6496 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6497 li r3,failMapping ; The BOMB, Dmitri.
6498 sc ; The hydrogen bomb.
6502 ; Guest shadow assist -- add a guest mapping
6504 ; Adds a guest mapping.
6507 ; r3 : address of host pmap, 32-bit kernel virtual address
6508 ; r4 : address of guest pmap, 32-bit kernel virtual address
6509 ; r5 : guest virtual address, high-order 32 bits
6510 ; r6 : guest virtual address, low-order 32 bits (with mpHWFlags)
6511 ; r7 : new mapping's flags
6512 ; r8 : physical address, 32-bit page number
6514 ; Non-volatile register usage:
6515 ; r22 : hash group's physical address
6516 ; r23 : VMM extension block's physical address
6517 ; r24 : mapping's flags
6518 ; r25 : caller's msr image from mapSetUp
6519 ; r26 : physent physical address
6520 ; r27 : host pmap physical address
6521 ; r28 : guest pmap physical address
6522 ; r29 : physical address, 32-bit 4k-page number
6523 ; r30 : guest virtual address
6524 ; r31 : gva->phys mapping's physical address
6528 .globl EXT(hw_add_map_gv)
6533 #define gadStackSize ((31-22+1)*4)+4
6535 stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1)
6536 ; Mint a new stack frame
6537 mflr r0 ; Get caller's return address
6538 mfsprg r11,2 ; Get feature flags
6539 mtcrf 0x02,r11 ; Insert feature flags into cr6
6540 stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6541 ; Save caller's return address
6542 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6543 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6544 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6545 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6546 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6547 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6548 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6549 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6550 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6551 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
6553 rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
6554 rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
6555 mr r24,r7 ; Copy guest mapping's flags
6556 mr r29,r8 ; Copy target frame's physical address
6558 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6559 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6560 bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine
6561 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6562 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6563 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6564 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6565 srwi r11,r30,12 ; Form shadow hash:
6566 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6567 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6568 ; Form index offset from hash page number
6569 add r22,r22,r10 ; r22 <- hash page index entry
6570 lwz r22,4(r22) ; r22 <- hash page paddr
6571 rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6572 ; r22 <- hash group paddr
6573 b gadStart ; Get to it
6575 gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6576 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6577 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6578 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6579 srwi r11,r30,12 ; Form shadow hash:
6580 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6581 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6582 ; Form index offset from hash page number
6583 add r22,r22,r10 ; r22 <- hash page index entry
6584 ld r22,0(r22) ; r22 <- hash page paddr
6585 insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6586 ; r22 <- hash group paddr
6588 gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6589 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6590 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6591 mr r25,r11 ; Save caller's msr image
6593 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6594 bl sxlkExclusive ; Get lock exlusive
6596 mr r31,r22 ; Prepare to search this group
6597 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6598 mtctr r0 ; in this group
6599 bt++ pf64Bitb,gad64Search ; Test for 64-bit machine
6601 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6602 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6603 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6604 clrrwi r12,r30,12 ; r12 <- virtual address we're searching for
6605 b gad32SrchLp ; Let the search begin!
6609 mr r6,r3 ; r6 <- current mapping slot's flags
6610 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6611 mr r7,r4 ; r7 <- current mapping slot's space ID
6612 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6613 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6614 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6615 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6616 xor r7,r7,r9 ; Compare space ID
6617 or r0,r11,r7 ; r0 <- !(!free && space match)
6618 xor r8,r8,r12 ; Compare virtual address
6619 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6620 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6622 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6623 bdnz gad32SrchLp ; Iterate
6625 mr r6,r3 ; r6 <- current mapping slot's flags
6626 clrrwi r5,r5,12 ; Remove flags from virtual address
6627 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6628 xor r4,r4,r9 ; Compare space ID
6629 or r0,r11,r4 ; r0 <- !(!free && && space match)
6630 xor r5,r5,r12 ; Compare virtual address
6631 or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match
6632 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6633 b gadScan ; No joy in our hash group
6636 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6637 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6638 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6639 clrrdi r12,r30,12 ; r12 <- virtual address we're searching for
6640 b gad64SrchLp ; Let the search begin!
6644 mr r6,r3 ; r6 <- current mapping slot's flags
6645 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6646 mr r7,r4 ; r7 <- current mapping slot's space ID
6647 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6648 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6649 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6650 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6651 xor r7,r7,r9 ; Compare space ID
6652 or r0,r11,r7 ; r0 <- !(!free && space match)
6653 xor r8,r8,r12 ; Compare virtual address
6654 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6655 beq gadRelPmap ; Hit, let upper-level redrive sort it out
6657 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6658 bdnz gad64SrchLp ; Iterate
6660 mr r6,r3 ; r6 <- current mapping slot's flags
6661 clrrdi r5,r5,12 ; Remove flags from virtual address
6662 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6663 xor r4,r4,r9 ; Compare space ID
6664 or r0,r11,r4 ; r0 <- !(!free && && space match)
6665 xor r5,r5,r12 ; Compare virtual address
6666 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6667 bne gadScan ; No joy in our hash group
6668 b gadRelPmap ; Hit, let upper-level redrive sort it out
6670 gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor
6671 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6672 ; Prepare to address slot at cursor
6673 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6674 mtctr r0 ; in this group
6675 or r2,r22,r12 ; r2 <- 1st mapping to search
6676 lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags
6677 li r11,0 ; No dormant entries found yet
6678 b gadScanLoop ; Let the search begin!
6682 addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search
6683 rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6684 ; Trim off any carry, wrapping into slot number range
6685 mr r31,r2 ; r31 <- current mapping's address
6686 or r2,r22,r12 ; r2 <- next mapping to search
6687 mr r6,r3 ; r6 <- current mapping slot's flags
6688 lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags
6689 rlwinm. r0,r6,0,mpgFree ; Test free flag
6690 bne gadFillMap ; Join common path on hit (r31 points to free mapping)
6691 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6692 xori r0,r0,mpgDormant ; Invert dormant flag
6693 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6694 bne gadNotDorm ; Not dormant or we've already seen one
6695 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6696 gadNotDorm: bdnz gadScanLoop ; Iterate
6698 mr r31,r2 ; r31 <- final mapping's address
6699 rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping
6700 bne gadFillMap ; Join common path on hit (r31 points to dormant mapping)
6701 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6702 xori r0,r0,mpgDormant ; Invert dormant flag
6703 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6704 bne gadCkDormant ; Not dormant or we've already seen one
6705 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6708 mr. r31,r11 ; Get dormant mapping, if any, and test
6709 bne gadUpCursor ; Go update the cursor, we'll take the dormant entry
6712 lbz r12,mpgCursor(r22) ; Get group's cursor
6713 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6714 ; Prepare to address slot at cursor
6715 or r31,r22,r12 ; r31 <- address of mapping to steal
6717 bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately
6718 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6719 ; r31 <- mapping's physical address
6720 ; r3 -> PTE slot physical address
6721 ; r4 -> High-order 32 bits of PTE
6722 ; r5 -> Low-order 32 bits of PTE
6724 ; r7 -> PCA physical address
6725 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6726 b gadFreePTE ; Join 64-bit path to release the PTE
6727 gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6728 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6729 gadFreePTE: mr. r3,r3 ; Was there a valid PTE?
6730 beq- gadUpCursor ; No valid PTE, we're almost done
6731 lis r0,0x8000 ; Prepare free bit for this slot
6732 srw r0,r0,r2 ; Position free bit
6733 or r6,r6,r0 ; Set it in our PCA image
6734 lwz r8,mpPte(r31) ; Get PTE pointer
6735 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6736 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6737 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6738 stw r6,0(r7) ; Update PCA and unlock the PTEG
6741 rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK
6742 ; Recover slot number from stolen mapping's address
6743 addi r12,r12,1 ; Increment slot number
6744 rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range
6745 stb r12,mpgCursor(r22) ; Update group's cursor
6747 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6748 bl mapFindLockPN ; Find 'n' lock this page's physent
6749 mr. r26,r3 ; Got lock on our physent?
6750 beq-- gadBadPLock ; No, time to bail out
6752 crset cr1_eq ; cr1_eq <- previous link is the anchor
6753 bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine
6754 la r11,ppLink+4(r26) ; Point to chain anchor
6755 lwz r9,ppLink+4(r26) ; Get chain anchor
6756 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6757 gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
6758 cmplw r9,r31 ; Is this the mapping to remove?
6759 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6760 bne gadRemNext ; No, chain onward
6761 bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor
6762 stw r8,0(r11) ; Unchain gpv->phys mapping
6763 b gadDelDone ; Finish deleting mapping
6765 lwarx r0,0,r11 ; Get previous link
6766 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6767 stwcx. r0,0,r11 ; Update previous link
6768 bne- gadRemRetry ; Lost reservation, retry
6769 b gadDelDone ; Finish deleting mapping
6771 gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6772 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6773 mr. r9,r8 ; Does next entry exist?
6774 b gadRemLoop ; Carry on
6777 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6778 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6779 la r11,ppLink(r26) ; Point to chain anchor
6780 ld r9,ppLink(r26) ; Get chain anchor
6781 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6782 gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
6783 cmpld r9,r31 ; Is this the mapping to remove?
6784 ld r8,mpAlias(r9) ; Get forward chain pinter
6785 bne gadRem64Nxt ; Not mapping to remove, chain on, dude
6786 bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor
6787 std r8,0(r11) ; Unchain gpv->phys mapping
6788 b gadDelDone ; Finish deleting mapping
6789 gadRem64Rt: ldarx r0,0,r11 ; Get previous link
6790 and r0,r0,r7 ; Get flags
6791 or r0,r0,r8 ; Insert new forward pointer
6792 stdcx. r0,0,r11 ; Slam it back in
6793 bne-- gadRem64Rt ; Lost reservation, retry
6794 b gadDelDone ; Finish deleting mapping
6798 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6799 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6800 mr. r9,r8 ; Does next entry exist?
6801 b gadRem64Lp ; Carry on
6804 mr r3,r26 ; Get physent address
6805 bl mapPhysUnlock ; Unlock physent chain
6808 lwz r12,pmapSpace(r28) ; Get guest space id number
6809 li r2,0 ; Get a zero
6810 stw r24,mpFlags(r31) ; Set mapping's flags
6811 sth r12,mpSpace(r31) ; Set mapping's space id number
6812 stw r2,mpPte(r31) ; Set mapping's pte pointer invalid
6813 stw r29,mpPAddr(r31) ; Set mapping's physical address
6814 bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine
6815 stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags)
6816 b gadChain ; Continue with chaining mapping to physent
6817 gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags)
6819 gadChain: mr r3,r29 ; r3 <- physical frame address
6820 bl mapFindLockPN ; Find 'n' lock this page's physent
6821 mr. r26,r3 ; Got lock on our physent?
6822 beq-- gadBadPLock ; No, time to bail out
6824 bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine
6825 lwz r12,ppLink+4(r26) ; Get forward chain
6826 rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags
6827 rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags
6828 stw r11,mpAlias+4(r31) ; New mapping will head chain
6829 stw r12,ppLink+4(r26) ; Point physent to new mapping
6830 b gadFinish ; All over now...
6832 gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer
6833 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6834 ld r12,ppLink(r26) ; Get forward chain
6835 andc r11,r12,r7 ; Get physent's forward chain pointer sans flags
6836 and r12,r12,r7 ; Isolate pointer's flags
6837 or r12,r12,r31 ; Insert new mapping's address forming pointer
6838 std r11,mpAlias(r31) ; New mapping will head chain
6839 std r12,ppLink(r26) ; Point physent to new mapping
6841 gadFinish: eieio ; Ensure new mapping is completely visible
6843 gadRelPhy: mr r3,r26 ; r3 <- physent addr
6844 bl mapPhysUnlock ; Unlock physent chain
6846 gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6847 bl sxlkUnlock ; Release host pmap search lock
6849 bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately
6850 mtmsr r25 ; Restore 'rupts, translation
6851 isync ; Throw a small wrench into the pipeline
6852 b gadPopFrame ; Nothing to do now but pop a frame and return
6853 gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6855 lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6856 ; Get caller's return address
6857 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6858 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6859 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6860 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6861 mtlr r0 ; Prepare return address
6862 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6863 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6864 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6865 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6866 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6867 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6868 lwz r1,0(r1) ; Pop stack frame
6869 blr ; Return to caller
6873 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6874 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6875 li r3,failMapping ; The BOMB, Dmitri.
6876 sc ; The hydrogen bomb.
6880 ; Guest shadow assist -- supend a guest mapping
6882 ; Suspends a guest mapping.
6885 ; r3 : address of host pmap, 32-bit kernel virtual address
6886 ; r4 : address of guest pmap, 32-bit kernel virtual address
6887 ; r5 : guest virtual address, high-order 32 bits
6888 ; r6 : guest virtual address, low-order 32 bits
6890 ; Non-volatile register usage:
6891 ; r26 : VMM extension block's physical address
6892 ; r27 : host pmap physical address
6893 ; r28 : guest pmap physical address
6894 ; r29 : caller's msr image from mapSetUp
6895 ; r30 : guest virtual address
6896 ; r31 : gva->phys mapping's physical address
6900 .globl EXT(hw_susp_map_gv)
6902 LEXT(hw_susp_map_gv)
6904 #define gsuStackSize ((31-26+1)*4)+4
6906 stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1)
6907 ; Mint a new stack frame
6908 mflr r0 ; Get caller's return address
6909 mfsprg r11,2 ; Get feature flags
6910 mtcrf 0x02,r11 ; Insert feature flags into cr6
6911 stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6912 ; Save caller's return address
6913 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6914 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6915 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6916 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6917 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6918 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6920 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6922 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6923 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6924 bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine
6926 lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr
6927 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6928 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6929 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6930 srwi r11,r30,12 ; Form shadow hash:
6931 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6932 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6933 ; Form index offset from hash page number
6934 add r31,r31,r10 ; r31 <- hash page index entry
6935 lwz r31,4(r31) ; r31 <- hash page paddr
6936 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6937 ; r31 <- hash group paddr
6938 b gsuStart ; Get to it
6939 gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6940 ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr
6941 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6942 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6943 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6944 srwi r11,r30,12 ; Form shadow hash:
6945 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6946 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6947 ; Form index offset from hash page number
6948 add r31,r31,r10 ; r31 <- hash page index entry
6949 ld r31,0(r31) ; r31 <- hash page paddr
6950 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6951 ; r31 <- hash group paddr
6953 gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6954 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6955 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6956 mr r29,r11 ; Save caller's msr image
6958 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6959 bl sxlkExclusive ; Get lock exclusive
6961 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6962 mtctr r0 ; in this group
6963 bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine
6965 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6966 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6967 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6968 b gsu32SrchLp ; Let the search begin!
6972 mr r6,r3 ; r6 <- current mapping slot's flags
6973 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6974 mr r7,r4 ; r7 <- current mapping slot's space ID
6975 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6976 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6977 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6978 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6979 xor r7,r7,r9 ; Compare space ID
6980 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6981 xor r8,r8,r30 ; Compare virtual address
6982 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6983 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6985 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6986 bdnz gsu32SrchLp ; Iterate
6988 mr r6,r3 ; r6 <- current mapping slot's flags
6989 clrrwi r5,r5,12 ; Remove flags from virtual address
6990 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6991 xor r4,r4,r9 ; Compare space ID
6992 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6993 xor r5,r5,r30 ; Compare virtual address
6994 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6995 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6996 b gsuSrchMiss ; No joy in our hash group
6999 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7000 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7001 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7002 b gsu64SrchLp ; Let the search begin!
7006 mr r6,r3 ; r6 <- current mapping slot's flags
7007 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7008 mr r7,r4 ; r7 <- current mapping slot's space ID
7009 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7010 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7011 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7012 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7013 xor r7,r7,r9 ; Compare space ID
7014 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7015 xor r8,r8,r30 ; Compare virtual address
7016 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7017 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
7019 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7020 bdnz gsu64SrchLp ; Iterate
7022 mr r6,r3 ; r6 <- current mapping slot's flags
7023 clrrdi r5,r5,12 ; Remove flags from virtual address
7024 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7025 xor r4,r4,r9 ; Compare space ID
7026 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7027 xor r5,r5,r30 ; Compare virtual address
7028 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7029 bne gsuSrchMiss ; No joy in our hash group
7032 bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately
7033 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
7034 ; r31 <- mapping's physical address
7035 ; r3 -> PTE slot physical address
7036 ; r4 -> High-order 32 bits of PTE
7037 ; r5 -> Low-order 32 bits of PTE
7039 ; r7 -> PCA physical address
7040 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7041 b gsuFreePTE ; Join 64-bit path to release the PTE
7042 gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7043 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7044 gsuFreePTE: mr. r3,r3 ; Was there a valid PTE?
7045 beq- gsuNoPTE ; No valid PTE, we're almost done
7046 lis r0,0x8000 ; Prepare free bit for this slot
7047 srw r0,r0,r2 ; Position free bit
7048 or r6,r6,r0 ; Set it in our PCA image
7049 lwz r8,mpPte(r31) ; Get PTE pointer
7050 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7051 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7052 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7053 stw r6,0(r7) ; Update PCA and unlock the PTEG
7055 gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags
7056 ori r3,r3,mpgDormant ; Mark entry dormant
7057 stw r3,mpFlags(r31) ; Save updated flags
7058 eieio ; Ensure update is visible when we unlock
7061 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7062 bl sxlkUnlock ; Release host pmap search lock
7064 bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately
7065 mtmsr r29 ; Restore 'rupts, translation
7066 isync ; Throw a small wrench into the pipeline
7067 b gsuPopFrame ; Nothing to do now but pop a frame and return
7068 gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode
7070 lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7071 ; Get caller's return address
7072 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7073 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7074 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7075 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7076 mtlr r0 ; Prepare return address
7077 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7078 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7079 lwz r1,0(r1) ; Pop stack frame
7080 blr ; Return to caller
7083 ; Guest shadow assist -- test guest mapping reference and change bits
7085 ; Locates the specified guest mapping, and if it exists gathers its reference
7086 ; and change bit, optionallyÊresetting them.
7089 ; r3 : address of host pmap, 32-bit kernel virtual address
7090 ; r4 : address of guest pmap, 32-bit kernel virtual address
7091 ; r5 : guest virtual address, high-order 32 bits
7092 ; r6 : guest virtual address, low-order 32 bits
7093 ; r7 : reset boolean
7095 ; Non-volatile register usage:
7096 ; r24 : VMM extension block's physical address
7097 ; r25 : return code (w/reference and change bits)
7098 ; r26 : reset boolean
7099 ; r27 : host pmap physical address
7100 ; r28 : guest pmap physical address
7101 ; r29 : caller's msr image from mapSetUp
7102 ; r30 : guest virtual address
7103 ; r31 : gva->phys mapping's physical address
7107 .globl EXT(hw_test_rc_gv)
7111 #define gtdStackSize ((31-24+1)*4)+4
7113 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7114 ; Mint a new stack frame
7115 mflr r0 ; Get caller's return address
7116 mfsprg r11,2 ; Get feature flags
7117 mtcrf 0x02,r11 ; Insert feature flags into cr6
7118 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7119 ; Save caller's return address
7120 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7121 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7122 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7123 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7124 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7125 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7126 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7127 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7129 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 20 bits of guest vaddr
7131 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7132 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
7134 bt++ pf64Bitb,gtd64Salt ; Test for 64-bit machine
7136 lwz r24,pmapVmmExtPhys+4(r3) ; r24 <- VMM pmap extension block paddr
7137 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
7138 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
7139 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7140 srwi r11,r30,12 ; Form shadow hash:
7141 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7142 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7143 ; Form index offset from hash page number
7144 add r31,r31,r10 ; r31 <- hash page index entry
7145 lwz r31,4(r31) ; r31 <- hash page paddr
7146 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7147 ; r31 <- hash group paddr
7148 b gtdStart ; Get to it
7150 gtd64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7151 ld r24,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7152 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
7153 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
7154 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7155 srwi r11,r30,12 ; Form shadow hash:
7156 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7157 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7158 ; Form index offset from hash page number
7159 add r31,r31,r10 ; r31 <- hash page index entry
7160 ld r31,0(r31) ; r31 <- hash page paddr
7161 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7162 ; r31 <- hash group paddr
7164 gtdStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
7165 xor r28,r4,r28 ; Convert guest pmap_t virt->real
7166 mr r26,r7 ; Save reset boolean
7167 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7168 mr r29,r11 ; Save caller's msr image
7170 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7171 bl sxlkExclusive ; Get lock exclusive
7173 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7174 mtctr r0 ; in this group
7175 bt++ pf64Bitb,gtd64Search ; Test for 64-bit machine
7177 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7178 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7179 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7180 b gtd32SrchLp ; Let the search begin!
7184 mr r6,r3 ; r6 <- current mapping slot's flags
7185 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7186 mr r7,r4 ; r7 <- current mapping slot's space ID
7187 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7188 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7189 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7190 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7191 xor r7,r7,r9 ; Compare space ID
7192 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7193 xor r8,r8,r30 ; Compare virtual address
7194 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7195 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7197 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7198 bdnz gtd32SrchLp ; Iterate
7200 mr r6,r3 ; r6 <- current mapping slot's flags
7201 clrrwi r5,r5,12 ; Remove flags from virtual address
7202 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7203 xor r4,r4,r9 ; Compare space ID
7204 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7205 xor r5,r5,r30 ; Compare virtual address
7206 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7207 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7208 b gtdSrchMiss ; No joy in our hash group
7211 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7212 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7213 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7214 b gtd64SrchLp ; Let the search begin!
7218 mr r6,r3 ; r6 <- current mapping slot's flags
7219 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7220 mr r7,r4 ; r7 <- current mapping slot's space ID
7221 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7222 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7223 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7224 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7225 xor r7,r7,r9 ; Compare space ID
7226 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7227 xor r8,r8,r30 ; Compare virtual address
7228 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7229 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7231 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7232 bdnz gtd64SrchLp ; Iterate
7234 mr r6,r3 ; r6 <- current mapping slot's flags
7235 clrrdi r5,r5,12 ; Remove flags from virtual address
7236 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7237 xor r4,r4,r9 ; Compare space ID
7238 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7239 xor r5,r5,r30 ; Compare virtual address
7240 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7241 bne gtdSrchMiss ; No joy in our hash group
7244 bt++ pf64Bitb,gtdDo64 ; Split for 64 bit
7246 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
7248 cmplwi cr1,r26,0 ; Do we want to clear RC?
7249 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7250 mr. r3,r3 ; Was there a previously valid PTE?
7251 li r0,lo16(mpR|mpC) ; Get bits to clear
7253 and r25,r5,r0 ; Copy RC bits into result
7254 beq++ cr1,gtdNoClr32 ; Nope...
7256 andc r12,r12,r0 ; Clear mapping copy of RC
7257 andc r5,r5,r0 ; Clear PTE copy of RC
7258 sth r12,mpVAddr+6(r31) ; Set the new RC in mapping
7260 gtdNoClr32: beq-- gtdNoOld32 ; No previously valid PTE...
7262 sth r5,6(r3) ; Store updated RC in PTE
7263 eieio ; Make sure we do not reorder
7264 stw r4,0(r3) ; Revalidate the PTE
7266 eieio ; Make sure all updates come first
7267 stw r6,0(r7) ; Unlock PCA
7269 gtdNoOld32: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7270 bl sxlkUnlock ; Unlock the search list
7271 b gtdR32 ; Join common...
7276 gtdDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
7278 cmplwi cr1,r26,0 ; Do we want to clear RC?
7279 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7280 mr. r3,r3 ; Was there a previously valid PTE?
7281 li r0,lo16(mpR|mpC) ; Get bits to clear
7283 and r25,r5,r0 ; Copy RC bits into result
7284 beq++ cr1,gtdNoClr64 ; Nope...
7286 andc r12,r12,r0 ; Clear mapping copy of RC
7287 andc r5,r5,r0 ; Clear PTE copy of RC
7288 sth r12,mpVAddr+6(r31) ; Set the new RC
7290 gtdNoClr64: beq-- gtdNoOld64 ; Nope, no pevious pte...
7292 sth r5,14(r3) ; Store updated RC
7293 eieio ; Make sure we do not reorder
7294 std r4,0(r3) ; Revalidate the PTE
7296 eieio ; Make sure all updates come first
7297 stw r6,0(r7) ; Unlock PCA
7299 gtdNoOld64: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7300 bl sxlkUnlock ; Unlock the search list
7301 b gtdR64 ; Join common...
7304 la r3,pmapSXlk(r27) ; Point to the pmap search lock
7305 bl sxlkUnlock ; Unlock the search list
7306 li r25,mapRtNotFnd ; Get ready to return not found
7307 bt++ pf64Bitb,gtdR64 ; Test for 64-bit machine
7309 gtdR32: mtmsr r29 ; Restore caller's msr image
7313 gtdR64: mtmsrd r29 ; Restore caller's msr image
7315 gtdEpilog: lwz r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7316 ; Get caller's return address
7317 mr r3,r25 ; Get return code
7318 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7319 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7320 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7321 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7322 mtlr r0 ; Prepare return address
7323 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7324 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7325 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7326 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7327 lwz r1,0(r1) ; Pop stack frame
7328 blr ; Return to caller
7331 ; Guest shadow assist -- convert guest to host virtual address
7333 ; Locates the specified guest mapping, and if it exists locates the
7334 ; first mapping belonging to its host on the physical chain and returns
7335 ; its virtual address.
7337 ; Note that if there are multiple mappings belonging to this host
7338 ; chained to the physent to which the guest mapping is chained, then
7339 ; host virtual aliases exist for this physical address. If host aliases
7340 ; exist, then we select the first on the physent chain, making it
7341 ; unpredictable which of the two or more possible host virtual addresses
7345 ; r3 : address of guest pmap, 32-bit kernel virtual address
7346 ; r4 : guest virtual address, high-order 32 bits
7347 ; r5 : guest virtual address, low-order 32 bits
7349 ; Non-volatile register usage:
7350 ; r24 : physent physical address
7351 ; r25 : VMM extension block's physical address
7352 ; r26 : host virtual address
7353 ; r27 : host pmap physical address
7354 ; r28 : guest pmap physical address
7355 ; r29 : caller's msr image from mapSetUp
7356 ; r30 : guest virtual address
7357 ; r31 : gva->phys mapping's physical address
7361 .globl EXT(hw_gva_to_hva)
7365 #define gthStackSize ((31-24+1)*4)+4
7367 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7368 ; Mint a new stack frame
7369 mflr r0 ; Get caller's return address
7370 mfsprg r11,2 ; Get feature flags
7371 mtcrf 0x02,r11 ; Insert feature flags into cr6
7372 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7373 ; Save caller's return address
7374 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7375 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7376 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7377 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7378 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7379 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7380 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7381 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7383 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7385 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7386 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7388 bt++ pf64Bitb,gth64Salt ; Test for 64-bit machine
7390 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7391 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7392 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7393 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7394 srwi r11,r30,12 ; Form shadow hash:
7395 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7396 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7397 ; Form index offset from hash page number
7398 add r31,r31,r10 ; r31 <- hash page index entry
7399 lwz r31,4(r31) ; r31 <- hash page paddr
7400 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7401 ; r31 <- hash group paddr
7402 b gthStart ; Get to it
7404 gth64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7405 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7406 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7407 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7408 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7409 srwi r11,r30,12 ; Form shadow hash:
7410 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7411 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7412 ; Form index offset from hash page number
7413 add r31,r31,r10 ; r31 <- hash page index entry
7414 ld r31,0(r31) ; r31 <- hash page paddr
7415 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7416 ; r31 <- hash group paddr
7418 gthStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7419 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7420 mr r29,r11 ; Save caller's msr image
7422 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7423 bl sxlkExclusive ; Get lock exclusive
7425 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7426 mtctr r0 ; in this group
7427 bt++ pf64Bitb,gth64Search ; Test for 64-bit machine
7429 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7430 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7431 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7432 b gth32SrchLp ; Let the search begin!
7436 mr r6,r3 ; r6 <- current mapping slot's flags
7437 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7438 mr r7,r4 ; r7 <- current mapping slot's space ID
7439 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7440 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7441 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7442 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7443 xor r7,r7,r9 ; Compare space ID
7444 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7445 xor r8,r8,r30 ; Compare virtual address
7446 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7447 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7449 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7450 bdnz gth32SrchLp ; Iterate
7452 mr r6,r3 ; r6 <- current mapping slot's flags
7453 clrrwi r5,r5,12 ; Remove flags from virtual address
7454 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7455 xor r4,r4,r9 ; Compare space ID
7456 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7457 xor r5,r5,r30 ; Compare virtual address
7458 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7459 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7460 b gthSrchMiss ; No joy in our hash group
7463 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7464 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7465 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7466 b gth64SrchLp ; Let the search begin!
7470 mr r6,r3 ; r6 <- current mapping slot's flags
7471 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7472 mr r7,r4 ; r7 <- current mapping slot's space ID
7473 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7474 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7475 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7476 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7477 xor r7,r7,r9 ; Compare space ID
7478 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7479 xor r8,r8,r30 ; Compare virtual address
7480 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7481 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7483 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7484 bdnz gth64SrchLp ; Iterate
7486 mr r6,r3 ; r6 <- current mapping slot's flags
7487 clrrdi r5,r5,12 ; Remove flags from virtual address
7488 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7489 xor r4,r4,r9 ; Compare space ID
7490 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7491 xor r5,r5,r30 ; Compare virtual address
7492 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7493 bne gthSrchMiss ; No joy in our hash group
7495 gthSrchHit: lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
7496 bl mapFindLockPN ; Find 'n' lock this page's physent
7497 mr. r24,r3 ; Got lock on our physent?
7498 beq-- gthBadPLock ; No, time to bail out
7500 bt++ pf64Bitb,gthPFnd64 ; 64-bit version of physent chain search
7502 lwz r9,ppLink+4(r24) ; Get first mapping on physent
7503 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7504 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
7505 gthPELoop: mr. r12,r9 ; Got a mapping to look at?
7506 beq- gthPEMiss ; Nope, we've missed hva->phys mapping
7507 lwz r7,mpFlags(r12) ; Get mapping's flags
7508 lhz r4,mpSpace(r12) ; Get mapping's space id number
7509 lwz r26,mpVAddr+4(r12) ; Get mapping's virtual address
7510 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
7512 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7513 rlwinm r26,r26,0,~mpHWFlags ; Bye-bye unsightly flags
7514 xori r0,r0,mpNormal ; Normal mapping?
7515 xor r4,r4,r6 ; Compare w/ host space id number
7516 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7518 b gthPELoop ; Iterate
7520 gthPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
7521 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
7522 ld r9,ppLink(r24) ; Get first mapping on physent
7523 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7524 andc r9,r9,r0 ; Cleanup mapping pointer
7525 gthPELp64: mr. r12,r9 ; Got a mapping to look at?
7526 beq-- gthPEMiss ; Nope, we've missed hva->phys mapping
7527 lwz r7,mpFlags(r12) ; Get mapping's flags
7528 lhz r4,mpSpace(r12) ; Get mapping's space id number
7529 ld r26,mpVAddr(r12) ; Get mapping's virtual address
7530 ld r9,mpAlias(r12) ; Next mapping physent alias chain
7531 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7532 rldicr r26,r26,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
7533 xori r0,r0,mpNormal ; Normal mapping?
7534 xor r4,r4,r6 ; Compare w/ host space id number
7535 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7537 b gthPELp64 ; Iterate
7540 gthPEMiss: mr r3,r24 ; Get physent's address
7541 bl mapPhysUnlock ; Unlock physent chain
7543 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7544 bl sxlkUnlock ; Release host pmap search lock
7545 li r3,-1 ; Return 64-bit -1
7547 bt++ pf64Bitb,gthEpi64 ; Take 64-bit exit
7548 b gthEpi32 ; Take 32-bit exit
7551 gthPEHit: mr r3,r24 ; Get physent's address
7552 bl mapPhysUnlock ; Unlock physent chain
7553 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7554 bl sxlkUnlock ; Release host pmap search lock
7556 bt++ pf64Bitb,gthR64 ; Test for 64-bit machine
7558 gthR32: li r3,0 ; High-order 32 bits host virtual address
7559 mr r4,r26 ; Low-order 32 bits host virtual address
7560 gthEpi32: mtmsr r29 ; Restore caller's msr image
7565 gthR64: srdi r3,r26,32 ; High-order 32 bits host virtual address
7566 clrldi r4,r26,32 ; Low-order 32 bits host virtual address
7567 gthEpi64: mtmsrd r29 ; Restore caller's msr image
7569 gthEpilog: lwz r0,(FM_ALIGN(gthStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7570 ; Get caller's return address
7571 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7572 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7573 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7574 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7575 mtlr r0 ; Prepare return address
7576 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7577 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7578 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7579 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7580 lwz r1,0(r1) ; Pop stack frame
7581 blr ; Return to caller
7584 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
7585 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
7586 li r3,failMapping ; The BOMB, Dmitri.
7587 sc ; The hydrogen bomb.
7591 ; Guest shadow assist -- find a guest mapping
7593 ; Locates the specified guest mapping, and if it exists returns a copy
7597 ; r3 : address of guest pmap, 32-bit kernel virtual address
7598 ; r4 : guest virtual address, high-order 32 bits
7599 ; r5 : guest virtual address, low-order 32 bits
7600 ; r6 : 32 byte copy area, 32-bit kernel virtual address
7602 ; Non-volatile register usage:
7603 ; r25 : VMM extension block's physical address
7604 ; r26 : copy area virtual address
7605 ; r27 : host pmap physical address
7606 ; r28 : guest pmap physical address
7607 ; r29 : caller's msr image from mapSetUp
7608 ; r30 : guest virtual address
7609 ; r31 : gva->phys mapping's physical address
7613 .globl EXT(hw_find_map_gv)
7615 LEXT(hw_find_map_gv)
7617 #define gfmStackSize ((31-25+1)*4)+4
7619 stwu r1,-(FM_ALIGN(gfmStackSize)+FM_SIZE)(r1)
7620 ; Mint a new stack frame
7621 mflr r0 ; Get caller's return address
7622 mfsprg r11,2 ; Get feature flags
7623 mtcrf 0x02,r11 ; Insert feature flags into cr6
7624 stw r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7625 ; Save caller's return address
7626 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7627 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7628 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7629 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7630 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7631 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7632 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7634 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7635 mr r26,r6 ; Copy copy buffer vaddr
7637 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7638 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7640 bt++ pf64Bitb,gfm64Salt ; Test for 64-bit machine
7642 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7643 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7644 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7645 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7646 srwi r11,r30,12 ; Form shadow hash:
7647 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7648 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7649 ; Form index offset from hash page number
7650 add r31,r31,r10 ; r31 <- hash page index entry
7651 lwz r31,4(r31) ; r31 <- hash page paddr
7652 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7653 ; r31 <- hash group paddr
7654 b gfmStart ; Get to it
7656 gfm64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7657 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7658 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7659 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7660 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7661 srwi r11,r30,12 ; Form shadow hash:
7662 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7663 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7664 ; Form index offset from hash page number
7665 add r31,r31,r10 ; r31 <- hash page index entry
7666 ld r31,0(r31) ; r31 <- hash page paddr
7667 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7668 ; r31 <- hash group paddr
7670 gfmStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7671 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7672 mr r29,r11 ; Save caller's msr image
7674 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7675 bl sxlkExclusive ; Get lock exclusive
7677 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7678 mtctr r0 ; in this group
7679 bt++ pf64Bitb,gfm64Search ; Test for 64-bit machine
7681 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7682 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7683 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7684 b gfm32SrchLp ; Let the search begin!
7688 mr r6,r3 ; r6 <- current mapping slot's flags
7689 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7690 mr r7,r4 ; r7 <- current mapping slot's space ID
7691 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7692 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7693 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7694 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7695 xor r7,r7,r9 ; Compare space ID
7696 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7697 xor r8,r8,r30 ; Compare virtual address
7698 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7699 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7701 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7702 bdnz gfm32SrchLp ; Iterate
7704 mr r6,r3 ; r6 <- current mapping slot's flags
7705 clrrwi r5,r5,12 ; Remove flags from virtual address
7706 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7707 xor r4,r4,r9 ; Compare space ID
7708 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7709 xor r5,r5,r30 ; Compare virtual address
7710 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7711 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7712 b gfmSrchMiss ; No joy in our hash group
7715 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7716 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7717 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7718 b gfm64SrchLp ; Let the search begin!
7722 mr r6,r3 ; r6 <- current mapping slot's flags
7723 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7724 mr r7,r4 ; r7 <- current mapping slot's space ID
7725 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7726 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7727 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7728 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7729 xor r7,r7,r9 ; Compare space ID
7730 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7731 xor r8,r8,r30 ; Compare virtual address
7732 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7733 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7735 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7736 bdnz gfm64SrchLp ; Iterate
7738 mr r6,r3 ; r6 <- current mapping slot's flags
7739 clrrdi r5,r5,12 ; Remove flags from virtual address
7740 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7741 xor r4,r4,r9 ; Compare space ID
7742 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7743 xor r5,r5,r30 ; Compare virtual address
7744 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7745 bne gfmSrchMiss ; No joy in our hash group
7747 gfmSrchHit: lwz r5,0(r31) ; Fetch 32 bytes of mapping from physical
7750 lwz r8,12(r31) ; +12
7751 lwz r9,16(r31) ; +16
7752 lwz r10,20(r31) ; +20
7753 lwz r11,24(r31) ; +24
7754 lwz r12,28(r31) ; +28
7756 li r31,mapRtOK ; Return found mapping
7758 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7759 bl sxlkUnlock ; Release host pmap search lock
7761 bt++ pf64Bitb,gfmEpi64 ; Test for 64-bit machine
7763 gfmEpi32: mtmsr r29 ; Restore caller's msr image
7764 isync ; A small wrench
7765 b gfmEpilog ; and a larger bubble
7768 gfmEpi64: mtmsrd r29 ; Restore caller's msr image
7770 gfmEpilog: mr. r3,r31 ; Copy/test mapping address
7771 beq gfmNotFound ; Skip copy if no mapping found
7773 stw r5,0(r26) ; Store 32 bytes of mapping into virtual
7776 stw r8,12(r26) ; +12
7777 stw r9,16(r26) ; +16
7778 stw r10,20(r26) ; +20
7779 stw r11,24(r26) ; +24
7780 stw r12,28(r26) ; +28
7783 lwz r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7784 ; Get caller's return address
7785 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7786 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7787 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7788 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7789 mtlr r0 ; Prepare return address
7790 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7791 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7792 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7793 lwz r1,0(r1) ; Pop stack frame
7794 blr ; Return to caller
7798 li r31,mapRtNotFnd ; Indicate mapping not found
7799 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7800 bl sxlkUnlock ; Release host pmap search lock
7801 bt++ pf64Bitb,gfmEpi64 ; Take 64-bit exit
7802 b gfmEpi32 ; Take 32-bit exit
7806 ; Guest shadow assist -- change guest page protection
7808 ; Locates the specified dormant mapping, and if it is active, changes its
7812 ; r3 : address of guest pmap, 32-bit kernel virtual address
7813 ; r4 : guest virtual address, high-order 32 bits
7814 ; r5 : guest virtual address, low-order 32 bits
7815 ; r6 : guest mapping protection code
7817 ; Non-volatile register usage:
7818 ; r25 : caller's msr image from mapSetUp
7819 ; r26 : guest mapping protection code
7820 ; r27 : host pmap physical address
7821 ; r28 : guest pmap physical address
7822 ; r29 : VMM extension block's physical address
7823 ; r30 : guest virtual address
7824 ; r31 : gva->phys mapping's physical address
7827 .globl EXT(hw_protect_gv)
7831 #define gcpStackSize ((31-24+1)*4)+4
7833 stwu r1,-(FM_ALIGN(gcpStackSize)+FM_SIZE)(r1)
7834 ; Mint a new stack frame
7835 mflr r0 ; Get caller's return address
7836 mfsprg r11,2 ; Get feature flags
7837 mtcrf 0x02,r11 ; Insert feature flags into cr6
7838 stw r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7839 ; Save caller's return address
7840 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7841 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7842 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7843 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7844 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7845 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7846 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7848 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7849 mr r26,r6 ; Copy guest mapping protection code
7851 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7852 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7853 bt++ pf64Bitb,gcp64Salt ; Handle 64-bit machine separately
7854 lwz r29,pmapVmmExtPhys+4(r3) ; r29 <- VMM pmap extension block paddr
7855 lwz r27,vmxHostPmapPhys+4(r11) ; r27 <- host pmap paddr
7856 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7857 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7858 srwi r11,r30,12 ; Form shadow hash:
7859 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7860 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7861 ; Form index offset from hash page number
7862 add r31,r31,r10 ; r31 <- hash page index entry
7863 lwz r31,4(r31) ; r31 <- hash page paddr
7864 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7865 ; r31 <- hash group paddr
7866 b gcpStart ; Get to it
7868 gcp64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7869 ld r29,pmapVmmExtPhys(r3) ; r29 <- VMM pmap extension block paddr
7870 ld r27,vmxHostPmapPhys(r11) ; r27 <- host pmap paddr
7871 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7872 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7873 srwi r11,r30,12 ; Form shadow hash:
7874 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7875 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7876 ; Form index offset from hash page number
7877 add r31,r31,r10 ; r31 <- hash page index entry
7878 ld r31,0(r31) ; r31 <- hash page paddr
7879 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7880 ; r31 <- hash group paddr
7882 gcpStart: xor r28,r4,r28 ; Convert guest pmap_t virt->real
7883 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7884 mr r25,r11 ; Save caller's msr image
7886 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7887 bl sxlkExclusive ; Get lock exclusive
7889 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7890 mtctr r0 ; in this group
7891 bt++ pf64Bitb,gcp64Search ; Test for 64-bit machine
7893 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7894 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7895 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7896 b gcp32SrchLp ; Let the search begin!
7900 mr r6,r3 ; r6 <- current mapping slot's flags
7901 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7902 mr r7,r4 ; r7 <- current mapping slot's space ID
7903 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7904 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7905 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7906 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7907 xor r7,r7,r9 ; Compare space ID
7908 or r0,r11,r7 ; r0 <- free || dormant || !space match
7909 xor r8,r8,r30 ; Compare virtual address
7910 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7911 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7913 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7914 bdnz gcp32SrchLp ; Iterate
7916 mr r6,r3 ; r6 <- current mapping slot's flags
7917 clrrwi r5,r5,12 ; Remove flags from virtual address
7918 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7919 xor r4,r4,r9 ; Compare space ID
7920 or r0,r11,r4 ; r0 <- free || dormant || !space match
7921 xor r5,r5,r30 ; Compare virtual address
7922 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7923 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7924 b gcpSrchMiss ; No joy in our hash group
7927 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7928 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7929 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7930 b gcp64SrchLp ; Let the search begin!
7934 mr r6,r3 ; r6 <- current mapping slot's flags
7935 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7936 mr r7,r4 ; r7 <- current mapping slot's space ID
7937 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7938 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7939 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7940 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7941 xor r7,r7,r9 ; Compare space ID
7942 or r0,r11,r7 ; r0 <- free || dormant || !space match
7943 xor r8,r8,r30 ; Compare virtual address
7944 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7945 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7947 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7948 bdnz gcp64SrchLp ; Iterate
7950 mr r6,r3 ; r6 <- current mapping slot's flags
7951 clrrdi r5,r5,12 ; Remove flags from virtual address
7952 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7953 xor r4,r4,r9 ; Compare space ID
7954 or r0,r11,r4 ; r0 <- free || dormant || !space match
7955 xor r5,r5,r30 ; Compare virtual address
7956 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7957 bne gcpSrchMiss ; No joy in our hash group
7960 bt++ pf64Bitb,gcpDscon64 ; Handle 64-bit disconnect separately
7961 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
7962 ; r31 <- mapping's physical address
7963 ; r3 -> PTE slot physical address
7964 ; r4 -> High-order 32 bits of PTE
7965 ; r5 -> Low-order 32 bits of PTE
7967 ; r7 -> PCA physical address
7968 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7969 b gcpFreePTE ; Join 64-bit path to release the PTE
7970 gcpDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7971 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7972 gcpFreePTE: mr. r3,r3 ; Was there a valid PTE?
7973 beq- gcpSetKey ; No valid PTE, we're almost done
7974 lis r0,0x8000 ; Prepare free bit for this slot
7975 srw r0,r0,r2 ; Position free bit
7976 or r6,r6,r0 ; Set it in our PCA image
7977 lwz r8,mpPte(r31) ; Get PTE pointer
7978 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7979 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7980 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7981 stw r6,0(r7) ; Update PCA and unlock the PTEG
7983 gcpSetKey: lwz r0,mpVAddr+4(r31) ; Get va word containing protection bits
7984 rlwimi r0,r26,0,mpPP ; Insert new protection bits
7985 stw r0,mpVAddr+4(r31) ; Write 'em back
7986 eieio ; Ensure previous mapping updates are visible
7987 li r31,mapRtOK ; I'm a success
7989 gcpRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7990 bl sxlkUnlock ; Release host pmap search lock
7992 mr r3,r31 ; r3 <- result code
7993 bt++ pf64Bitb,gcpRtn64 ; Handle 64-bit separately
7994 mtmsr r25 ; Restore 'rupts, translation
7995 isync ; Throw a small wrench into the pipeline
7996 b gcpPopFrame ; Nothing to do now but pop a frame and return
7997 gcpRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
7999 lwz r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
8000 ; Get caller's return address
8001 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
8002 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
8003 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
8004 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
8005 mtlr r0 ; Prepare return address
8006 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
8007 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
8008 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
8009 lwz r1,0(r1) ; Pop stack frame
8010 blr ; Return to caller
8014 li r31,mapRtNotFnd ; Could not locate requested mapping
8015 b gcpRelPmap ; Exit through host pmap search lock release
8019 ; Find the physent based on a physical page and try to lock it (but not too hard)
8020 ; Note that this table always has an entry that with a 0 table pointer at the end
8022 ; R3 contains ppnum on entry
8023 ; R3 is 0 if no entry was found
8024 ; R3 is physent if found
8025 ; cr0_eq is true if lock was obtained or there was no entry to lock
8026 ; cr0_eq is false of there was an entry and it was locked
8032 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
8033 mr r2,r3 ; Save our target
8034 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
8036 mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address
8037 lwz r5,mrStart(r9) ; Get start of table entry
8038 lwz r0,mrEnd(r9) ; Get end of table entry
8039 addi r9,r9,mrSize ; Point to the next slot
8040 cmplwi cr2,r3,0 ; Are we at the end of the table?
8041 cmplw r2,r5 ; See if we are in this table
8042 cmplw cr1,r2,r0 ; Check end also
8043 sub r4,r2,r5 ; Calculate index to physical entry
8044 beq-- cr2,mapFindNo ; Leave if we did not find an entry...
8045 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
8046 slwi r4,r4,3 ; Get offset to physical entry
8048 blt-- mapFindPhz ; Did not find it...
8050 add r3,r3,r4 ; Point right to the slot
8052 mapFindOv: lwz r2,0(r3) ; Get the lock contents right now
8053 rlwinm. r0,r2,0,0,0 ; Is it locked?
8054 bnelr-- ; Yes it is...
8056 lwarx r2,0,r3 ; Get the lock
8057 rlwinm. r0,r2,0,0,0 ; Is it locked?
8058 oris r0,r2,0x8000 ; Set the lock bit
8059 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
8060 stwcx. r0,0,r3 ; Try to stuff it back...
8061 bne-- mapFindOv ; Collision, try again...
8062 isync ; Clear any speculations
8065 mapFindKl: li r2,lgKillResv ; Killing field
8066 stwcx. r2,0,r2 ; Trash reservation...
8067 crclr cr0_eq ; Make sure we do not think we got the lock
8070 mapFindNo: crset cr0_eq ; Make sure that we set this
8071 li r3,0 ; Show that we did not find it
8074 ; pmapCacheLookup - This function will look up an entry in the pmap segment cache.
8076 ; How the pmap cache lookup works:
8078 ; We use a combination of three things: a mask of valid entries, a sub-tag, and the
8079 ; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
8080 ; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits
8081 ; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
8082 ; entry contains the full 36 bit ESID.
8084 ; The purpose of the sub-tag is to limit the number of searches necessary when looking
8085 ; for an existing cache entry. Because there are 16 slots in the cache, we could end up
8086 ; searching all 16 if an match is not found.
8088 ; Essentially, we will search only the slots that have a valid entry and whose sub-tag
8089 ; matches. More than likely, we will eliminate almost all of the searches.
8093 ; R4 = ESID high half
8094 ; R5 = ESID low half
8097 ; R3 = pmap cache slot if found, 0 if not
8098 ; R10 = pmapCCtl address
8099 ; R11 = pmapCCtl image
8100 ; pmapCCtl locked on exit
8106 la r10,pmapCCtl(r3) ; Point to the segment cache control
8109 lwarx r11,0,r10 ; Get the segment cache control value
8110 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8111 ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit
8112 bne-- pmapCacheLookur ; Nope...
8113 stwcx. r0,0,r10 ; Try to take the lock
8114 bne-- pmapCacheLookuq ; Someone else just stuffed it, try again...
8116 isync ; Make sure we get reservation first
8117 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
8118 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
8119 lwz r10,pmapSCSubTag+4(r3) ; And the bottom half
8120 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
8121 lis r8,0x8888 ; Get some eights
8122 rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right
8123 ori r8,r8,0x8888 ; Fill the rest with eights
8125 eqv r10,r10,r5 ; Get 0xF where we hit in bottom half
8126 eqv r9,r9,r5 ; Get 0xF where we hit in top half
8128 rlwinm r2,r10,1,0,30 ; Shift over 1
8129 rlwinm r0,r9,1,0,30 ; Shift over 1
8130 and r2,r2,r10 ; AND the even/odd pair into the even
8131 and r0,r0,r9 ; AND the even/odd pair into the even
8132 rlwinm r10,r2,2,0,28 ; Shift over 2
8133 rlwinm r9,r0,2,0,28 ; Shift over 2
8134 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8135 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8137 and r10,r10,r8 ; Clear out extras
8138 and r9,r9,r8 ; Clear out extras
8140 rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other
8141 rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other
8142 or r10,r0,r10 ; Merge them
8143 or r9,r2,r9 ; Merge them
8144 rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other
8145 rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other
8146 or r10,r0,r10 ; Merge them
8147 or r9,r2,r9 ; Merge them
8148 rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad
8149 rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad
8150 not r6,r11 ; Turn invalid into valid
8151 rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask
8153 la r10,pmapSegCache(r3) ; Point at the cache slots
8154 and. r6,r9,r6 ; Get mask of valid and hit
8156 li r3,0 ; Assume not found
8157 oris r0,r0,0x8000 ; Start a mask
8158 beqlr++ ; Leave, should usually be no hits...
8160 pclNextEnt: cntlzw r5,r6 ; Find an in use one
8161 cmplwi cr1,r5,pmapSegCacheUse ; Did we find one?
8162 rlwinm r7,r5,4,0,27 ; Index to the cache entry
8163 srw r2,r0,r5 ; Get validity mask bit
8164 add r7,r7,r10 ; Point to the cache slot
8165 andc r6,r6,r2 ; Clear the validity bit we just tried
8166 bgelr-- cr1 ; Leave if there are no more to check...
8168 lwz r5,sgcESID(r7) ; Get the top half
8170 cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half
8172 bne++ pclNextEnt ; Nope, try again...
8174 mr r3,r7 ; Point to the slot
8180 li r11,lgKillResv ; The killing spot
8181 stwcx. r11,0,r11 ; Kill the reservation
8184 lwz r11,pmapCCtl(r3) ; Get the segment cache control
8185 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8186 beq++ pmapCacheLookup ; Nope...
8187 b pmapCacheLookus ; Yup, keep waiting...
8191 ; mapMergeRC -- Given a physical mapping address in R31, locate its
8192 ; connected PTE (if any) and merge the PTE referenced and changed bits
8193 ; into the mapping and physent.
8199 lwz r0,mpPte(r31) ; Grab the PTE offset
8200 mfsdr1 r7 ; Get the pointer to the hash table
8201 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8202 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8203 andi. r3,r0,mpHValid ; Is there a possible PTE?
8204 srwi r7,r0,4 ; Convert to PCA units
8205 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8206 mflr r2 ; Save the return
8207 subfic r7,r7,-4 ; Convert to -4 based negative index
8208 add r7,r10,r7 ; Point to the PCA directly
8209 beqlr-- ; There was no PTE to start with...
8211 bl mapLockPteg ; Lock the PTEG
8213 lwz r0,mpPte(r31) ; Grab the PTE offset
8214 mtlr r2 ; Restore the LR
8215 andi. r3,r0,mpHValid ; Is there a possible PTE?
8216 beq- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8218 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8219 add r3,r3,r10 ; Point to actual PTE
8220 lwz r5,4(r3) ; Get the real part of the PTE
8221 srwi r10,r5,12 ; Change physical address to a ppnum
8223 mMNmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8224 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8225 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8226 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8227 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8228 add r11,r11,r8 ; Point to the bank table
8229 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8230 lwz r11,mrStart(r11) ; Get the start of bank
8231 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8232 addi r2,r2,4 ; Offset to last half of field
8233 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8234 sub r11,r10,r11 ; Get the index into the table
8235 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8237 mMmrgRC: lwarx r10,r11,r2 ; Get the master RC
8238 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8239 or r0,r0,r10 ; Merge in the new RC
8240 stwcx. r0,r11,r2 ; Try to stick it back
8241 bne-- mMmrgRC ; Try again if we collided...
8242 eieio ; Commit all updates
8245 stw r6,0(r7) ; Unlock PTEG
8249 ; 64-bit version of mapMergeRC
8254 lwz r0,mpPte(r31) ; Grab the PTE offset
8255 ld r5,mpVAddr(r31) ; Grab the virtual address
8256 mfsdr1 r7 ; Get the pointer to the hash table
8257 rldicr r10,r7,0,45 ; Clean up the hash table base
8258 andi. r3,r0,mpHValid ; Is there a possible PTE?
8259 srdi r7,r0,5 ; Convert to PCA units
8260 rldicr r7,r7,0,61 ; Clean up PCA
8261 subfic r7,r7,-4 ; Convert to -4 based negative index
8262 mflr r2 ; Save the return
8263 add r7,r10,r7 ; Point to the PCA directly
8264 beqlr-- ; There was no PTE to start with...
8266 bl mapLockPteg ; Lock the PTEG
8268 lwz r0,mpPte(r31) ; Grab the PTE offset again
8269 mtlr r2 ; Restore the LR
8270 andi. r3,r0,mpHValid ; Is there a possible PTE?
8271 beq-- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8273 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8274 add r3,r3,r10 ; Point to the actual PTE
8275 ld r5,8(r3) ; Get the real part
8276 srdi r10,r5,12 ; Change physical address to a ppnum
8277 b mMNmerge ; Join the common 32-64-bit code...
8281 ; This routine, given a mapping, will find and lock the PTEG
8282 ; If mpPte does not point to a PTE (checked before and after lock), it will unlock the
8283 ; PTEG and return. In this case we will have undefined in R4
8284 ; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
8286 ; If the mapping is still valid, we will invalidate the PTE and merge
8287 ; the RC bits into the physent and also save them into the mapping.
8289 ; We then return with R3 pointing to the PTE slot, R4 is the
8290 ; top of the PTE and R5 is the bottom. R6 contains the PCA.
8291 ; R7 points to the PCA entry.
8293 ; Note that we should NEVER be called on a block or special mapping.
8294 ; We could do many bad things.
8300 lwz r0,mpPte(r31) ; Grab the PTE offset
8301 mfsdr1 r7 ; Get the pointer to the hash table
8302 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8303 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8304 andi. r3,r0,mpHValid ; Is there a possible PTE?
8305 srwi r7,r0,4 ; Convert to PCA units
8306 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8307 mflr r2 ; Save the return
8308 subfic r7,r7,-4 ; Convert to -4 based negative index
8309 add r7,r10,r7 ; Point to the PCA directly
8310 beqlr-- ; There was no PTE to start with...
8312 bl mapLockPteg ; Lock the PTEG
8314 lwz r0,mpPte(r31) ; Grab the PTE offset
8315 mtlr r2 ; Restore the LR
8316 andi. r3,r0,mpHValid ; Is there a possible PTE?
8317 beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8319 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8320 add r3,r3,r10 ; Point to actual PTE
8321 lwz r4,0(r3) ; Get the top of the PTE
8323 li r8,tlbieLock ; Get the TLBIE lock
8324 rlwinm r0,r4,0,1,31 ; Clear the valid bit
8325 stw r0,0(r3) ; Invalidate the PTE
8327 sync ; Make sure everyone sees the invalidate
8329 mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock
8330 mfsprg r2,2 ; Get feature flags
8331 mr. r0,r0 ; Is it locked?
8332 li r0,1 ; Get our lock word
8333 bne- mITLBIE32 ; It is locked, go wait...
8335 stwcx. r0,0,r8 ; Try to get it
8336 bne- mITLBIE32 ; We was beat...
8338 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
8339 li r0,0 ; Lock clear value
8341 tlbie r5 ; Invalidate it everywhere
8343 beq- mINoTS32 ; Can not have MP on this machine...
8345 eieio ; Make sure that the tlbie happens first
8346 tlbsync ; Wait for everyone to catch up
8347 sync ; Make sure of it all
8349 mINoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
8350 lwz r5,4(r3) ; Get the real part
8351 srwi r10,r5,12 ; Change physical address to a ppnum
8353 mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8354 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8355 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8356 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8357 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8358 add r11,r11,r8 ; Point to the bank table
8359 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8360 lwz r11,mrStart(r11) ; Get the start of bank
8361 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8362 addi r2,r2,4 ; Offset to last half of field
8363 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8364 sub r11,r10,r11 ; Get the index into the table
8365 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8368 mImrgRC: lwarx r10,r11,r2 ; Get the master RC
8369 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8370 or r0,r0,r10 ; Merge in the new RC
8371 stwcx. r0,r11,r2 ; Try to stick it back
8372 bne-- mImrgRC ; Try again if we collided...
8374 blr ; Leave with the PCA still locked up...
8376 mIPUnlock: eieio ; Make sure all updates come first
8378 stw r6,0(r7) ; Unlock
8387 lwz r0,mpPte(r31) ; Grab the PTE offset
8388 ld r5,mpVAddr(r31) ; Grab the virtual address
8389 mfsdr1 r7 ; Get the pointer to the hash table
8390 rldicr r10,r7,0,45 ; Clean up the hash table base
8391 andi. r3,r0,mpHValid ; Is there a possible PTE?
8392 srdi r7,r0,5 ; Convert to PCA units
8393 rldicr r7,r7,0,61 ; Clean up PCA
8394 subfic r7,r7,-4 ; Convert to -4 based negative index
8395 mflr r2 ; Save the return
8396 add r7,r10,r7 ; Point to the PCA directly
8397 beqlr-- ; There was no PTE to start with...
8399 bl mapLockPteg ; Lock the PTEG
8401 lwz r0,mpPte(r31) ; Grab the PTE offset again
8402 mtlr r2 ; Restore the LR
8403 andi. r3,r0,mpHValid ; Is there a possible PTE?
8404 beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8406 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8407 add r3,r3,r10 ; Point to the actual PTE
8408 ld r4,0(r3) ; Get the top of the PTE
8410 li r8,tlbieLock ; Get the TLBIE lock
8411 rldicr r0,r4,0,62 ; Clear the valid bit
8412 std r0,0(r3) ; Invalidate the PTE
8414 rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN
8415 sync ; Make sure everyone sees the invalidate
8416 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
8418 mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock
8419 mr. r0,r0 ; Is it locked?
8420 li r0,1 ; Get our lock word
8421 bne-- mITLBIE64a ; It is locked, toss reservation and wait...
8423 stwcx. r0,0,r8 ; Try to get it
8424 bne-- mITLBIE64 ; We was beat...
8426 rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders
8428 li r0,0 ; Lock clear value
8430 tlbie r2 ; Invalidate it everywhere
8432 eieio ; Make sure that the tlbie happens first
8433 tlbsync ; Wait for everyone to catch up
8434 ptesync ; Wait for quiet again
8436 stw r0,tlbieLock(0) ; Clear the tlbie lock
8438 ld r5,8(r3) ; Get the real part
8439 srdi r10,r5,12 ; Change physical address to a ppnum
8440 b mINmerge ; Join the common 32-64-bit code...
8442 mITLBIE64a: li r5,lgKillResv ; Killing field
8443 stwcx. r5,0,r5 ; Kill reservation
8445 mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock
8446 mr. r0,r0 ; Is it locked?
8447 beq++ mITLBIE64 ; Nope, try again...
8448 b mITLBIE64b ; Yup, wait for it...
8451 ; mapLockPteg - Locks a PTEG
8452 ; R7 points to PCA entry
8453 ; R6 contains PCA on return
8460 lwarx r6,0,r7 ; Pick up the PCA
8461 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8462 ori r0,r6,PCAlock ; Set the lock bit
8463 bne-- mLSkill ; It is locked...
8465 stwcx. r0,0,r7 ; Try to lock the PTEG
8466 bne-- mapLockPteg ; We collided...
8468 isync ; Nostradamus lied
8471 mLSkill: li r6,lgKillResv ; Get killing field
8472 stwcx. r6,0,r6 ; Kill it
8475 lwz r6,0(r7) ; Pick up the PCA
8476 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8477 beq++ mapLockPteg ; Nope, try again...
8478 b mapLockPteh ; Yes, wait for it...
8482 ; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6
8483 ; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was
8484 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
8485 ; R4 returns the slot index.
8487 ; CR7 also indicates that we have a block mapping
8489 ; The PTEG allocation controls are a bit map of the state of the PTEG.
8490 ; PCAfree indicates that the PTE slot is empty.
8491 ; PCAauto means that it comes from an autogen area. These
8492 ; guys do not keep track of reference and change and are actually "wired".
8493 ; They are easy to maintain. PCAsteal
8494 ; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
8495 ; fields fit in a single word and are loaded and stored under control of the
8496 ; PTEG control area lock (PCAlock).
8498 ; Note that PCAauto does not contribute to the steal calculations at all. Originally
8499 ; it did, autogens were second in priority. This can result in a pathalogical
8500 ; case where an instruction can not make forward progress, or one PTE slot
8503 ; Note that the PCA must be locked when we get here.
8505 ; Physically, the fields are arranged:
8512 ; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched)
8517 ; R3 = 1 - steal regular
8518 ; R3 = 2 - steal autogen
8519 ; R4 contains slot number
8520 ; R6 contains updated PCA image
8525 mapSelSlot: lis r10,0 ; Clear autogen mask
8526 li r9,0 ; Start a mask
8527 beq cr7,mSSnotblk ; Skip if this is not a block mapping
8528 ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen)
8530 mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask
8531 oris r9,r9,0x8000 ; Get a mask
8532 cntlzw r4,r6 ; Find a slot or steal one
8533 ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000
8534 rlwinm r4,r4,0,29,31 ; Isolate bit position
8535 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
8536 srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags
8537 srwi r11,r11,1 ; Slide steal mask right
8538 and r8,r6,r2 ; Isolate the old in use and autogen bits
8539 andc r6,r6,r2 ; Allocate the slot and also clear autogen flag
8540 addi r0,r8,0x7F00 ; Push autogen flag to bit 16
8541 and r2,r2,r10 ; Keep the autogen part if autogen
8542 addis r8,r8,0xFF00 ; Push in use to bit 0 and invert
8543 or r6,r6,r2 ; Add in the new autogen bit
8544 rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use)
8545 rlwinm r8,r8,1,31,31 ; Isolate old in use
8546 rlwimi r6,r11,16,8,15 ; Stick the new steal slot in
8548 add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen
8552 ; Shared/Exclusive locks
8554 ; A shared/exclusive lock allows multiple shares of a lock to be taken
8555 ; but only one exclusive. A shared lock can be "promoted" to exclusive
8556 ; when it is the only share. If there are multiple sharers, the lock
8557 ; must be "converted". A promotion drops the share and gains exclusive as
8558 ; an atomic operation. If anyone else has a share, the operation fails.
8559 ; A conversion first drops the share and then takes an exclusive lock.
8561 ; We will want to add a timeout to this eventually.
8563 ; R3 is set to 0 for success, non-zero for failure
8567 ; Convert a share into an exclusive
8574 lis r0,0x8000 ; Get the locked lock image
8576 mflr r0 ; (TEST/DEBUG)
8577 oris r0,r0,0x8000 ; (TEST/DEBUG)
8580 sxlkCTry: lwarx r2,0,r3 ; Get the lock word
8581 cmplwi r2,1 ; Does it just have our share?
8582 subi r2,r2,1 ; Drop our share in case we do not get it
8583 bne-- sxlkCnotfree ; No, we need to unlock...
8584 stwcx. r0,0,r3 ; Try to take it exclusively
8585 bne-- sxlkCTry ; Collision, try again...
8592 stwcx. r2,0,r3 ; Try to drop our share...
8593 bne-- sxlkCTry ; Try again if we collided...
8594 b sxlkExclusive ; Go take it exclusively...
8597 ; Promote shared to exclusive
8603 lis r0,0x8000 ; Get the locked lock image
8605 mflr r0 ; (TEST/DEBUG)
8606 oris r0,r0,0x8000 ; (TEST/DEBUG)
8609 sxlkPTry: lwarx r2,0,r3 ; Get the lock word
8610 cmplwi r2,1 ; Does it just have our share?
8611 bne-- sxlkPkill ; No, just fail (R3 is non-zero)...
8612 stwcx. r0,0,r3 ; Try to take it exclusively
8613 bne-- sxlkPTry ; Collision, try again...
8619 sxlkPkill: li r2,lgKillResv ; Point to killing field
8620 stwcx. r2,0,r2 ; Kill reservation
8626 ; Take lock exclusivily
8632 lis r0,0x8000 ; Get the locked lock image
8634 mflr r0 ; (TEST/DEBUG)
8635 oris r0,r0,0x8000 ; (TEST/DEBUG)
8638 sxlkXTry: lwarx r2,0,r3 ; Get the lock word
8639 mr. r2,r2 ; Is it locked?
8640 bne-- sxlkXWait ; Yes...
8641 stwcx. r0,0,r3 ; Try to take it
8642 bne-- sxlkXTry ; Collision, try again...
8644 isync ; Toss anything younger than us
8650 sxlkXWait: li r2,lgKillResv ; Point to killing field
8651 stwcx. r2,0,r2 ; Kill reservation
8653 sxlkXWaiu: lwz r2,0(r3) ; Get the lock again
8654 mr. r2,r2 ; Is it free yet?
8655 beq++ sxlkXTry ; Yup...
8656 b sxlkXWaiu ; Hang around a bit more...
8659 ; Take a share of the lock
8664 sxlkShared: lwarx r2,0,r3 ; Get the lock word
8665 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8666 addi r2,r2,1 ; Up the share count
8667 bne-- sxlkSWait ; Yes...
8668 stwcx. r2,0,r3 ; Try to take it
8669 bne-- sxlkShared ; Collision, try again...
8671 isync ; Toss anything younger than us
8677 sxlkSWait: li r2,lgKillResv ; Point to killing field
8678 stwcx. r2,0,r2 ; Kill reservation
8680 sxlkSWaiu: lwz r2,0(r3) ; Get the lock again
8681 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8682 beq++ sxlkShared ; Nope...
8683 b sxlkSWaiu ; Hang around a bit more...
8686 ; Unlock either exclusive or shared.
8691 sxlkUnlock: eieio ; Make sure we order our stores out
8693 sxlkUnTry: lwarx r2,0,r3 ; Get the lock
8694 rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively?
8695 subi r2,r2,1 ; Remove our share if we have one
8696 li r0,0 ; Clear this
8697 bne-- sxlkUExclu ; We hold exclusive...
8699 stwcx. r2,0,r3 ; Try to lose our share
8700 bne-- sxlkUnTry ; Collision...
8703 sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation
8704 beqlr++ ; Leave if ok...
8705 b sxlkUnTry ; Could not store, try over...
8709 .globl EXT(fillPage)
8713 mfsprg r0,2 ; Get feature flags
8714 mtcrf 0x02,r0 ; move pf64Bit to cr
8716 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
8717 lis r2,0x0200 ; Get vec
8719 ori r2,r2,0x2000 ; Get FP
8723 andc r5,r5,r2 ; Clear out permanent turn-offs
8725 ori r2,r2,0x8030 ; Clear IR, DR and EE
8727 andc r0,r5,r2 ; Kill them
8730 bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint)
8732 slwi r3,r3,12 ; Make into a physical address
8733 mtmsr r2 ; Interrupts and translation off
8736 li r2,4096/32 ; Get number of cache lines
8738 fp32again: dcbz 0,r3 ; Clear
8739 addic. r2,r2,-1 ; Count down
8743 stw r8,12(r3) ; Fill
8744 stw r9,16(r3) ; Fill
8745 stw r10,20(r3) ; Fill
8746 stw r11,24(r3) ; Fill
8747 stw r12,28(r3) ; Fill
8748 addi r3,r3,32 ; Point next
8749 bgt+ fp32again ; Keep going
8751 mtmsr r5 ; Restore all
8758 sldi r2,r2,63 ; Get 64-bit bit
8759 or r0,r0,r2 ; Turn on 64-bit
8760 sldi r3,r3,12 ; Make into a physical address
8762 mtmsrd r0 ; Interrupts and translation off
8765 li r2,4096/128 ; Get number of cache lines
8767 fp64again: dcbz128 0,r3 ; Clear
8768 addic. r2,r2,-1 ; Count down
8771 std r7,16(r3) ; Fill
8772 std r8,24(r3) ; Fill
8773 std r9,32(r3) ; Fill
8774 std r10,40(r3) ; Fill
8775 std r11,48(r3) ; Fill
8776 std r12,56(r3) ; Fill
8777 std r4,64+0(r3) ; Fill
8778 std r6,64+8(r3) ; Fill
8779 std r7,64+16(r3) ; Fill
8780 std r8,64+24(r3) ; Fill
8781 std r9,64+32(r3) ; Fill
8782 std r10,64+40(r3) ; Fill
8783 std r11,64+48(r3) ; Fill
8784 std r12,64+56(r3) ; Fill
8785 addi r3,r3,128 ; Point next
8786 bgt+ fp64again ; Keep going
8788 mtmsrd r5 ; Restore all
8798 lis r11,hi16(EXT(mapdebug))
8799 ori r11,r11,lo16(EXT(mapdebug))
8804 mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1
8819 .globl EXT(checkBogus)
8824 blr ; No-op normally