2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <db_machine_commands.h>
27 #include <mach_debug.h>
29 #include <ppc/proc_reg.h>
30 #include <ppc/exception.h>
31 #include <ppc/Performance.h>
32 #include <ppc/exception.h>
33 #include <mach/ppc/vm_param.h>
40 ; +--------+--------+--------+--------+--------+--------+--------+--------+
41 ; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
42 ; +--------+--------+--------+--------+--------+--------+--------+--------+
46 ; +--------+--------+--------+
47 ; |//////BB|BBBBBBBB|BBBB////| - SID - base
48 ; +--------+--------+--------+
52 ; +--------+--------+--------+
53 ; |////////|11111111|111111//| - SID - copy 1
54 ; +--------+--------+--------+
58 ; +--------+--------+--------+
59 ; |////////|//222222|22222222| - SID - copy 2
60 ; +--------+--------+--------+
64 ; +--------+--------+--------+
65 ; |//////33|33333333|33//////| - SID - copy 3 - not needed
66 ; +--------+--------+--------+ for 65 bit VPN
70 ; +--------+--------+--------+--------+--------+--------+--------+
71 ; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
72 ; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
75 ; +--------+--------+--------+--------+--------+--------+--------+
76 ; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
77 ; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
84 ; +--------+--------+--------+--------+--------+--------+--------+
85 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
86 ; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
88 ; 0 0 1 2 3 4 4 5 6 7 7
89 ; 0 8 6 4 2 0 8 6 4 2 9
90 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
91 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
92 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
96 /* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
98 * Maps a page or block into a pmap
100 * Returns 0 if add worked or the vaddr of the first overlap if not
102 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
104 * 1) bump mapping busy count
106 * 3) find mapping full path - finds all possible list previous elements
107 * 4) upgrade pmap to exclusive
108 * 5) add mapping to search list
114 * 11) drop mapping busy count
117 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
119 * 1) bump mapping busy count
121 * 3) find mapping full path - finds all possible list previous elements
122 * 4) upgrade pmap to exclusive
123 * 5) add mapping to search list
125 * 7) drop mapping busy count
130 .globl EXT(hw_add_map)
134 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
135 mflr r0 ; Save the link register
136 stw r17,FM_ARG0+0x00(r1) ; Save a register
137 stw r18,FM_ARG0+0x04(r1) ; Save a register
138 stw r19,FM_ARG0+0x08(r1) ; Save a register
139 mfsprg r19,2 ; Get feature flags
140 stw r20,FM_ARG0+0x0C(r1) ; Save a register
141 stw r21,FM_ARG0+0x10(r1) ; Save a register
142 mtcrf 0x02,r19 ; move pf64Bit cr6
143 stw r22,FM_ARG0+0x14(r1) ; Save a register
144 stw r23,FM_ARG0+0x18(r1) ; Save a register
145 stw r24,FM_ARG0+0x1C(r1) ; Save a register
146 stw r25,FM_ARG0+0x20(r1) ; Save a register
147 stw r26,FM_ARG0+0x24(r1) ; Save a register
148 stw r27,FM_ARG0+0x28(r1) ; Save a register
149 stw r28,FM_ARG0+0x2C(r1) ; Save a register
150 stw r29,FM_ARG0+0x30(r1) ; Save a register
151 stw r30,FM_ARG0+0x34(r1) ; Save a register
152 stw r31,FM_ARG0+0x38(r1) ; Save a register
153 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
156 lwz r11,pmapFlags(r3) ; Get pmaps flags
157 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
158 bne hamPanic ; Call not valid for guest shadow assist pmap
161 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
162 mr r28,r3 ; Save the pmap
163 mr r31,r4 ; Save the mapping
164 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
165 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
166 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
170 hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
171 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
173 hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
175 mr r17,r11 ; Save the MSR
176 xor r28,r28,r20 ; Convert the pmap to physical addressing
177 xor r31,r31,r21 ; Convert the mapping to physical addressing
179 la r3,pmapSXlk(r28) ; Point to the pmap search lock
180 bl sxlkShared ; Go get a shared lock on the mapping lists
181 mr. r3,r3 ; Did we get the lock?
182 lwz r24,mpFlags(r31) ; Pick up the flags
183 bne-- hamBadLock ; Nope...
185 li r21,0 ; Remember that we have the shared lock
188 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
189 ; here so that we will know the previous elements so we can dequeue them
193 hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
194 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
195 mr r3,r28 ; Pass in pmap to search
196 lhz r23,mpBSize(r31) ; Get the block size for later
197 mr r29,r4 ; Save top half of vaddr for later
198 mr r30,r5 ; Save bottom half of vaddr for later
200 bl EXT(mapSearchFull) ; Go see if we can find it
202 li r22,lo16(0x800C) ; Get 0xFFFF800C
203 rlwinm r0,r24,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
204 addi r23,r23,1 ; Get actual length
205 rlwnm r22,r22,r0,27,31 ; Rotate to get 12 or 25
206 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
207 slw r9,r23,r22 ; Isolate the low part
208 rlwnm r22,r23,r22,22,31 ; Extract the high order
209 addic r23,r9,-4096 ; Get the length to the last page
210 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
211 addme r22,r22 ; Do high order as well...
212 mr. r3,r3 ; Did we find a mapping here?
213 or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry
214 bne-- hamOverlay ; We found a mapping, this is no good, can not double map...
216 addc r9,r0,r23 ; Add size to get last page in new range
217 or. r0,r4,r5 ; Are we beyond the end?
218 adde r8,r29,r22 ; Add the rest of the length on
219 rlwinm r9,r9,0,0,31 ; Clean top half of sum
220 beq++ hamFits ; We are at the end...
222 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
223 cmplw r8,r4 ; Is our end before the next (top part)
224 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
225 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
227 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
230 ; Here we try to convert to an exclusive lock. This will fail if someone else
233 hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
234 la r3,pmapSXlk(r28) ; Point to the pmap search lock
236 bne-- hamGotX ; We already have the exclusive...
238 bl sxlkPromote ; Try to promote shared to exclusive
239 mr. r3,r3 ; Could we?
240 beq++ hamGotX ; Yeah...
243 ; Since we could not promote our lock, we need to convert to it.
244 ; That means that we drop the shared lock and wait to get it
245 ; exclusive. Since we release the lock, we need to do the look up
249 la r3,pmapSXlk(r28) ; Point to the pmap search lock
250 bl sxlkConvert ; Convert shared to exclusive
251 mr. r3,r3 ; Could we?
252 bne-- hamBadLock ; Nope, we must have timed out...
254 li r21,1 ; Remember that we have the exclusive lock
255 b hamRescan ; Go look again...
259 hamGotX: mr r3,r28 ; Get the pmap to insert into
260 mr r4,r31 ; Point to the mapping
261 bl EXT(mapInsert) ; Insert the mapping into the list
263 rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table
264 lhz r8,mpSpace(r31) ; Get the address space
265 lwz r11,lgpPcfg(r11) ; Get the page config
266 mfsdr1 r7 ; Get the hash table base/bounds
267 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
269 andi. r0,r24,mpType ; Is this a normal mapping?
271 rlwimi r8,r8,14,4,17 ; Double address space
272 rlwinm r9,r30,0,4,31 ; Clear segment
273 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
274 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
275 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
276 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
277 addi r4,r4,1 ; Bump up the mapped page count
278 srw r9,r9,r11 ; Isolate just the page index
279 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
280 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
281 xor r9,r9,r10 ; Get the hash to the PTEG
283 bne-- hamDoneNP ; Not a normal mapping, therefore, no physent...
285 bl mapPhysFindLock ; Go find and lock the physent
287 bt++ pf64Bitb,ham64 ; This is 64-bit...
289 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
290 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
291 slwi r9,r9,6 ; Make PTEG offset
292 ori r7,r7,0xFFC0 ; Stick in the bottom part
293 rlwinm r12,r11,0,~ppFlags ; Clean it up
294 and r9,r9,r7 ; Wrap offset into table
295 mr r4,r31 ; Set the link to install
296 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
297 stw r12,mpAlias+4(r31) ; Move to the mapping
298 bl mapPhyCSet32 ; Install the link
299 b hamDone ; Go finish up...
303 ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer
304 subfic r7,r7,46 ; Get number of leading zeros
305 eqv r4,r4,r4 ; Get all ones
306 ld r11,ppLink(r3) ; Get the alias chain pointer
307 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
308 srd r4,r4,r7 ; Get the wrap mask
309 sldi r9,r9,7 ; Change hash to PTEG offset
310 andc r11,r11,r0 ; Clean out the lock and flags
311 and r9,r9,r4 ; Wrap to PTEG
313 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
314 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
316 bl mapPhyCSet64 ; Install the link
318 hamDone: bl mapPhysUnlock ; Unlock the physent chain
320 hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
321 bl sxlkUnlock ; Unlock the search list
323 mr r3,r31 ; Get the mapping pointer
324 bl mapDropBusy ; Drop the busy count
326 li r3,0 ; Set successful return
327 li r4,0 ; Set successful return
329 hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
331 mtmsr r17 ; Restore enables/translation/etc.
333 b hamReturnC ; Join common...
335 hamR64: mtmsrd r17 ; Restore enables/translation/etc.
338 hamReturnC: lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
339 lwz r17,FM_ARG0+0x00(r1) ; Save a register
340 lwz r18,FM_ARG0+0x04(r1) ; Save a register
341 lwz r19,FM_ARG0+0x08(r1) ; Save a register
342 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
343 mtlr r0 ; Restore the return
344 lwz r21,FM_ARG0+0x10(r1) ; Save a register
345 lwz r22,FM_ARG0+0x14(r1) ; Save a register
346 lwz r23,FM_ARG0+0x18(r1) ; Save a register
347 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
348 lwz r25,FM_ARG0+0x20(r1) ; Save a register
349 lwz r26,FM_ARG0+0x24(r1) ; Save a register
350 lwz r27,FM_ARG0+0x28(r1) ; Save a register
351 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
352 lwz r29,FM_ARG0+0x30(r1) ; Save a register
353 lwz r30,FM_ARG0+0x34(r1) ; Save a register
354 lwz r31,FM_ARG0+0x38(r1) ; Save a register
355 lwz r1,0(r1) ; Pop the stack
362 hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
363 li r0,mpC|mpR ; Get a mask to turn off RC bits
364 lwz r23,mpFlags(r31) ; Get the requested flags
365 lwz r20,mpVAddr(r3) ; Get the overlay address
366 lwz r8,mpVAddr(r31) ; Get the requested address
367 lwz r21,mpVAddr+4(r3) ; Get the overlay address
368 lwz r9,mpVAddr+4(r31) ; Get the requested address
369 lhz r10,mpBSize(r3) ; Get the overlay length
370 lhz r11,mpBSize(r31) ; Get the requested length
371 lwz r24,mpPAddr(r3) ; Get the overlay physical address
372 lwz r25,mpPAddr(r31) ; Get the requested physical address
373 andc r21,r21,r0 ; Clear RC bits
374 andc r9,r9,r0 ; Clear RC bits
376 la r3,pmapSXlk(r28) ; Point to the pmap search lock
377 bl sxlkUnlock ; Unlock the search list
379 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
380 mr r3,r20 ; Save the top of the colliding address
381 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
383 bne++ hamRemv ; Removing, go say so so we help...
385 cmplw r20,r8 ; High part of vaddr the same?
386 cmplw cr1,r21,r9 ; Low part?
387 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
389 cmplw r10,r11 ; Size the same?
390 cmplw cr1,r24,r25 ; Physical address?
391 crand cr5_eq,cr5_eq,cr0_eq ; Remember
392 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
394 xor r23,r23,r22 ; Compare mapping flag words
395 andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same?
396 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
397 bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash...
399 ori r4,r4,mapRtMapDup ; Set duplicate
400 b hamReturn ; And leave...
402 hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
403 b hamReturn ; Come back yall...
405 hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do
406 b hamReturn ; Join common epilog code
410 hamBadLock: li r3,0 ; Set lock time out error code
411 li r4,mapRtBadLk ; Set lock time out error code
412 b hamReturn ; Leave....
414 hamPanic: lis r0,hi16(Choke) ; System abend
415 ori r0,r0,lo16(Choke) ; System abend
416 li r3,failMapping ; Show that we failed some kind of mapping thing
423 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
425 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
426 * a 64-bit quantity, it is a long long so it is in R4 and R5.
428 * We return the virtual address of the removed mapping as a
431 * Note that this is designed to be called from 32-bit mode with a stack.
433 * We disable translation and all interruptions here. This keeps is
434 * from having to worry about a deadlock due to having anything locked
435 * and needing it to process a fault.
437 * Note that this must be done with both interruptions off and VM off
439 * Remove mapping via pmap, regular page, no pte
442 * 2) find mapping full path - finds all possible list previous elements
443 * 4) upgrade pmap to exclusive
444 * 3) bump mapping busy count
445 * 5) remove mapping from search list
448 * 8) remove from physent
450 * 10) drop mapping busy count
451 * 11) drain mapping busy count
454 * Remove mapping via pmap, regular page, with pte
457 * 2) find mapping full path - finds all possible list previous elements
458 * 3) upgrade lock to exclusive
459 * 4) bump mapping busy count
461 * 6) invalidate pte and tlbie
462 * 7) atomic merge rc into physent
464 * 9) remove mapping from search list
467 * 12) remove from physent
469 * 14) drop mapping busy count
470 * 15) drain mapping busy count
473 * Remove mapping via pmap, I/O or block
476 * 2) find mapping full path - finds all possible list previous elements
477 * 3) upgrade lock to exclusive
478 * 4) bump mapping busy count
479 * 5) mark remove-in-progress
480 * 6) check and bump remove chunk cursor if needed
482 * 8) if something to invalidate, go to step 11
485 * 10) return with mapRtRemove to force higher level to call again
488 * 12) invalidate ptes, no tlbie
490 * 14) repeat 11 - 13 for all pages in chunk
491 * 15) if not final chunk, go to step 9
492 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
493 * 17) lock pmap share
494 * 18) find mapping full path - finds all possible list previous elements
495 * 19) upgrade lock to exclusive
496 * 20) remove mapping from search list
497 * 21) drop mapping busy count
498 * 22) drain mapping busy count
503 .globl EXT(hw_rem_map)
508 ; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
509 ; THE HW_PURGE_* ROUTINES ALSO
512 #define hrmStackSize ((31-15+1)*4)+4
513 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
514 mflr r0 ; Save the link register
515 stw r15,FM_ARG0+0x00(r1) ; Save a register
516 stw r16,FM_ARG0+0x04(r1) ; Save a register
517 stw r17,FM_ARG0+0x08(r1) ; Save a register
518 stw r18,FM_ARG0+0x0C(r1) ; Save a register
519 stw r19,FM_ARG0+0x10(r1) ; Save a register
520 mfsprg r19,2 ; Get feature flags
521 stw r20,FM_ARG0+0x14(r1) ; Save a register
522 stw r21,FM_ARG0+0x18(r1) ; Save a register
523 mtcrf 0x02,r19 ; move pf64Bit cr6
524 stw r22,FM_ARG0+0x1C(r1) ; Save a register
525 stw r23,FM_ARG0+0x20(r1) ; Save a register
526 stw r24,FM_ARG0+0x24(r1) ; Save a register
527 stw r25,FM_ARG0+0x28(r1) ; Save a register
528 stw r26,FM_ARG0+0x2C(r1) ; Save a register
529 stw r27,FM_ARG0+0x30(r1) ; Save a register
530 stw r28,FM_ARG0+0x34(r1) ; Save a register
531 stw r29,FM_ARG0+0x38(r1) ; Save a register
532 stw r30,FM_ARG0+0x3C(r1) ; Save a register
533 stw r31,FM_ARG0+0x40(r1) ; Save a register
534 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
535 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
538 lwz r11,pmapFlags(r3) ; Get pmaps flags
539 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
540 bne hrmPanic ; Call not valid for guest shadow assist pmap
543 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
544 lwz r9,pmapvr+4(r3) ; Get conversion mask
547 hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
550 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
552 xor r28,r3,r9 ; Convert the pmap to physical addressing
555 ; Here is where we join in from the hw_purge_* routines
558 hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags
559 mfsprg r19,2 ; Get feature flags again (for alternate entries)
561 mr r17,r11 ; Save the MSR
562 mr r29,r4 ; Top half of vaddr
563 mr r30,r5 ; Bottom half of vaddr
565 rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active?
566 bne-- hrmGuest ; Yes, handle specially
568 la r3,pmapSXlk(r28) ; Point to the pmap search lock
569 bl sxlkShared ; Go get a shared lock on the mapping lists
570 mr. r3,r3 ; Did we get the lock?
571 bne-- hrmBadLock ; Nope...
574 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
575 ; here so that we will know the previous elements so we can dequeue them
576 ; later. Note: we get back mpFlags in R7.
579 mr r3,r28 ; Pass in pmap to search
580 mr r4,r29 ; High order of address
581 mr r5,r30 ; Low order of address
582 bl EXT(mapSearchFull) ; Go see if we can find it
584 andi. r0,r7,mpPerm ; Mapping marked permanent?
585 crmove cr5_eq,cr0_eq ; Remember permanent marking
586 mr r20,r7 ; Remember mpFlags
587 mr. r31,r3 ; Did we? (And remember mapping address for later)
588 mr r15,r4 ; Save top of next vaddr
589 mr r16,r5 ; Save bottom of next vaddr
590 beq-- hrmNotFound ; Nope, not found...
592 bf-- cr5_eq,hrmPerm ; This one can't be removed...
594 ; Here we try to promote to an exclusive lock. This will fail if someone else
598 la r3,pmapSXlk(r28) ; Point to the pmap search lock
599 bl sxlkPromote ; Try to promote shared to exclusive
600 mr. r3,r3 ; Could we?
601 beq++ hrmGotX ; Yeah...
604 ; Since we could not promote our lock, we need to convert to it.
605 ; That means that we drop the shared lock and wait to get it
606 ; exclusive. Since we release the lock, we need to do the look up
610 la r3,pmapSXlk(r28) ; Point to the pmap search lock
611 bl sxlkConvert ; Convert shared to exclusive
612 mr. r3,r3 ; Could we?
613 bne-- hrmBadLock ; Nope, we must have timed out...
615 mr r3,r28 ; Pass in pmap to search
616 mr r4,r29 ; High order of address
617 mr r5,r30 ; Low order of address
618 bl EXT(mapSearchFull) ; Rescan the list
620 andi. r0,r7,mpPerm ; Mapping marked permanent?
621 crmove cr5_eq,cr0_eq ; Remember permanent marking
622 mr. r31,r3 ; Did we lose it when we converted?
623 mr r20,r7 ; Remember mpFlags
624 mr r15,r4 ; Save top of next vaddr
625 mr r16,r5 ; Save bottom of next vaddr
626 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
628 bf-- cr5_eq,hrmPerm ; This one can't be removed...
631 ; We have an exclusive lock on the mapping chain. And we
632 ; also have the busy count bumped in the mapping so it can
636 hrmGotX: mr r3,r31 ; Get the mapping
637 bl mapBumpBusy ; Bump up the busy count
640 ; Invalidate any PTEs associated with this
641 ; mapping (more than one if a block) and accumulate the reference
644 ; Here is also where we need to split 32- and 64-bit processing
647 lwz r21,mpPte(r31) ; Grab the offset to the PTE
648 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
649 mfsdr1 r29 ; Get the hash table base and size
651 rlwinm r0,r20,0,mpType ; Isolate mapping type
652 cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping
653 cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type
655 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
656 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
657 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
658 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
659 crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping
660 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
661 andc r29,r29,r2 ; Clean up hash table base
662 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
663 mr r30,r23 ; Move the now merged vaddr to the correct register
664 add r26,r29,r21 ; Point to the PTEG slot
666 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
668 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
669 beq- cr5,hrmBlock32 ; Go treat block specially...
670 subfic r9,r9,-4 ; Get the PCA entry offset
671 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
672 add r7,r9,r29 ; Point to the PCA slot
674 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
676 lwz r21,mpPte(r31) ; Get the quick pointer again
677 lwz r5,0(r26) ; Get the top of PTE
679 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
680 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
681 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
682 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
683 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
685 stw r5,0(r26) ; Invalidate the PTE
687 li r9,tlbieLock ; Get the TLBIE lock
689 sync ; Make sure the invalid PTE is actually in memory
691 hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
692 mr. r5,r5 ; Is it locked?
693 li r5,1 ; Get locked indicator
694 bne- hrmPtlb32 ; It is locked, go spin...
695 stwcx. r5,0,r9 ; Try to get it
696 bne- hrmPtlb32 ; We was beat...
698 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
700 tlbie r30 ; Invalidate it all corresponding TLB entries
702 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
704 eieio ; Make sure that the tlbie happens first
705 tlbsync ; Wait for everyone to catch up
706 sync ; Make sure of it all
708 hrmNTlbs: li r0,0 ; Clear this
709 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
710 stw r0,tlbieLock(0) ; Clear the tlbie lock
711 lis r0,0x8000 ; Get bit for slot 0
712 eieio ; Make sure those RC bit have been stashed in PTE
714 srw r0,r0,r2 ; Get the allocation hash mask
715 lwz r22,4(r26) ; Get the latest reference and change bits
716 or r6,r6,r0 ; Show that this slot is free
719 eieio ; Make sure all updates come first
720 stw r6,0(r7) ; Unlock the PTEG
723 ; Now, it is time to remove the mapping and unlock the chain.
724 ; But first, we need to make sure no one else is using this
725 ; mapping so we drain the busy now
728 hrmPysDQ32: mr r3,r31 ; Point to the mapping
729 bl mapDrainBusy ; Go wait until mapping is unused
731 mr r3,r28 ; Get the pmap to remove from
732 mr r4,r31 ; Point to the mapping
733 bl EXT(mapRemove) ; Remove the mapping from the list
735 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
736 rlwinm r0,r20,0,mpType ; Isolate mapping type
737 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
738 la r3,pmapSXlk(r28) ; Point to the pmap search lock
739 subi r4,r4,1 ; Drop down the mapped page count
740 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
741 bl sxlkUnlock ; Unlock the search list
743 bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done...
745 bl mapPhysFindLock ; Go find and lock the physent
747 lwz r9,ppLink+4(r3) ; Get first mapping
749 mr r4,r22 ; Get the RC bits we just got
750 bl mapPhysMerge ; Go merge the RC bits
752 rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer
754 cmplw r9,r31 ; Are we the first on the list?
755 bne- hrmNot1st ; Nope...
758 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
759 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
760 bl mapPhyCSet32 ; Go set the physent link and preserve flags
762 b hrmPhyDQd ; Join up and unlock it all...
766 hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
767 and r8,r8,r31 ; Get back to a page
768 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
770 la r3,pmapSXlk(r28) ; Point to the pmap search lock
771 bl sxlkUnlock ; Unlock the search list
773 xor r3,r31,r8 ; Flip mapping address to virtual
774 ori r3,r3,mapRtPerm ; Set permanent mapping error
777 hrmBadLock: li r3,mapRtBadLk ; Set bad lock
781 la r3,pmapSXlk(r28) ; Point to the pmap search lock
782 bl sxlkUnlock ; Unlock the search list
785 mr r3,r31 ; Point to the mapping
786 bl mapDropBusy ; Drop the busy here since we need to come back
787 li r3,mapRtRemove ; Say we are still removing this
793 la r3,pmapSXlk(r28) ; Point to the pmap search lock
794 bl sxlkUnlock ; Unlock the search list
795 li r3,mapRtNotFnd ; No mapping found
797 hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
799 mtmsr r17 ; Restore enables/translation/etc.
801 b hrmRetnCmn ; Join the common return code...
803 hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
805 b hrmRetnCmn ; Join the common return code...
809 hrmNot1st: mr. r8,r9 ; Remember and test current node
810 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
811 lwz r9,mpAlias+4(r9) ; Chain to the next
812 cmplw r9,r31 ; Is this us?
813 bne- hrmNot1st ; Not us...
815 lwz r9,mpAlias+4(r9) ; Get our forward pointer
816 stw r9,mpAlias+4(r8) ; Unchain us
820 hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
822 hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
823 mr r3,r31 ; Copy the pointer to the mapping
824 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
825 bl mapDrainBusy ; Go wait until mapping is unused
827 xor r3,r31,r8 ; Flip mapping address to virtual
829 mtmsr r17 ; Restore enables/translation/etc.
832 hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
833 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
834 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
835 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
836 mr. r6,r6 ; Should we pass back the "next" vaddr?
837 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
838 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
839 mtlr r0 ; Restore the return
841 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
842 beq hrmNoNextAdr ; Do not pass back the next vaddr...
843 stw r15,0(r6) ; Pass back the top of the next vaddr
844 stw r16,4(r6) ; Pass back the bottom of the next vaddr
847 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
848 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
849 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
850 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
851 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
852 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
853 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
854 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
855 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
856 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
857 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
858 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
859 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
860 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
861 lwz r1,0(r1) ; Pop the stack
865 ; Here is where we come when all is lost. Somehow, we failed a mapping function
866 ; that must work... All hope is gone. Alas, we die.......
869 hrmPanic: lis r0,hi16(Choke) ; System abend
870 ori r0,r0,lo16(Choke) ; System abend
871 li r3,failMapping ; Show that we failed some kind of mapping thing
876 ; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
877 ; in the range. Then, if we did not finish, return a code indicating that we need to
878 ; be called again. Eventually, we will finish and then, we will do a TLBIE for each
879 ; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
881 ; A potential speed up is that we stop the invalidate loop once we have walked through
882 ; the hash table once. This really is not worth the trouble because we need to have
883 ; mapped 1/2 of physical RAM in an individual block. Way unlikely.
885 ; We should rethink this and see if we think it will be faster to check PTE and
886 ; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
891 hrmBlock32: lis r29,0xD000 ; Get shift to 32MB bsu
892 rlwinm r24,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
893 lhz r25,mpBSize(r31) ; Get the number of pages in block
894 lhz r23,mpSpace(r31) ; Get the address space hash
895 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
896 rlwnm r29,r29,r24,28,31 ; Rotate to get 0 or 13
897 addi r25,r25,1 ; Account for zero-based counting
898 ori r0,r20,mpRIP ; Turn on the remove in progress flag
899 slw r25,r25,r29 ; Adjust for 32MB if needed
900 mfsdr1 r29 ; Get the hash table base and size
901 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
902 subi r25,r25,1 ; Convert back to zero-based counting
903 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
904 sub r4,r25,r9 ; Get number of pages left
905 cmplw cr1,r9,r25 ; Have we already hit the end?
906 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
907 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
908 rlwinm r26,r29,16,7,15 ; Get the hash table size
909 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
910 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
911 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
912 cmpwi cr7,r2,0 ; Remember if we have finished
913 slwi r0,r9,12 ; Make cursor into page offset
914 or r24,r24,r23 ; Get full hash
915 and r4,r4,r2 ; If more than a chunk, bring this back to 0
916 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
917 add r27,r27,r0 ; Adjust vaddr to start of current chunk
918 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
920 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
922 la r3,pmapSXlk(r28) ; Point to the pmap search lock
923 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
924 bl sxlkUnlock ; Unlock the search list while we are invalidating
926 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
927 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
928 xor r24,r24,r8 ; Get the proper VSID
929 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
930 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
931 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
932 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
933 add r22,r22,r30 ; Get end address (in PTEG units)
935 hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
936 xor r23,r23,r24 ; Hash it
937 and r23,r23,r26 ; Wrap it into the table
938 rlwinm r3,r23,28,4,29 ; Change to PCA offset
939 subfic r3,r3,-4 ; Get the PCA entry offset
940 add r7,r3,r29 ; Point to the PCA slot
941 cmplw cr5,r30,r22 ; Check if we reached the end of the range
942 addi r30,r30,64 ; bump to the next vaddr
944 bl mapLockPteg ; Lock the PTEG
946 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
947 add r5,r23,r29 ; Point to the PTEG
948 li r0,0 ; Set an invalid PTE value
949 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
950 mtcrf 0x80,r4 ; Set CRs to select PTE slots
951 mtcrf 0x40,r4 ; Set CRs to select PTE slots
953 bf 0,hrmSlot0 ; No autogen here
954 stw r0,0x00(r5) ; Invalidate PTE
956 hrmSlot0: bf 1,hrmSlot1 ; No autogen here
957 stw r0,0x08(r5) ; Invalidate PTE
959 hrmSlot1: bf 2,hrmSlot2 ; No autogen here
960 stw r0,0x10(r5) ; Invalidate PTE
962 hrmSlot2: bf 3,hrmSlot3 ; No autogen here
963 stw r0,0x18(r5) ; Invalidate PTE
965 hrmSlot3: bf 4,hrmSlot4 ; No autogen here
966 stw r0,0x20(r5) ; Invalidate PTE
968 hrmSlot4: bf 5,hrmSlot5 ; No autogen here
969 stw r0,0x28(r5) ; Invalidate PTE
971 hrmSlot5: bf 6,hrmSlot6 ; No autogen here
972 stw r0,0x30(r5) ; Invalidate PTE
974 hrmSlot6: bf 7,hrmSlot7 ; No autogen here
975 stw r0,0x38(r5) ; Invalidate PTE
977 hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
978 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
979 andc r6,r6,r0 ; Turn off all the old autogen bits
981 hrmBNone32: eieio ; Make sure all updates come first
983 stw r6,0(r7) ; Unlock and set the PCA
985 bne+ cr5,hrmBInv32 ; Go invalidate the next...
987 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
989 mr r3,r31 ; Copy the pointer to the mapping
990 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
992 sync ; Make sure memory is consistent
994 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
995 li r6,63 ; Assume full invalidate for now
996 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
997 andc r6,r6,r5 ; Clear max if we have less to do
998 and r5,r25,r5 ; Clear count if we have more than max
999 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1000 li r7,tlbieLock ; Get the TLBIE lock
1001 or r5,r5,r6 ; Get number of TLBIEs needed
1003 hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1004 mr. r2,r2 ; Is it locked?
1005 li r2,1 ; Get our lock value
1006 bne- hrmBTLBlck ; It is locked, go wait...
1007 stwcx. r2,0,r7 ; Try to get it
1008 bne- hrmBTLBlck ; We was beat...
1010 hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1011 tlbie r27 ; Invalidate it everywhere
1012 addi r27,r27,0x1000 ; Up to the next page
1013 bge+ hrmBTLBi ; Make sure we have done it all...
1015 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1016 li r2,0 ; Lock clear value
1018 sync ; Make sure all is quiet
1019 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1021 eieio ; Make sure that the tlbie happens first
1022 tlbsync ; Wait for everyone to catch up
1023 sync ; Wait for quiet again
1025 hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1027 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1028 bl sxlkShared ; Go get a shared lock on the mapping lists
1029 mr. r3,r3 ; Did we get the lock?
1030 bne- hrmPanic ; Nope...
1032 lwz r4,mpVAddr(r31) ; High order of address
1033 lwz r5,mpVAddr+4(r31) ; Low order of address
1034 mr r3,r28 ; Pass in pmap to search
1035 mr r29,r4 ; Save this in case we need it (only promote fails)
1036 mr r30,r5 ; Save this in case we need it (only promote fails)
1037 bl EXT(mapSearchFull) ; Go see if we can find it
1039 mr. r3,r3 ; Did we? (And remember mapping address for later)
1040 mr r15,r4 ; Save top of next vaddr
1041 mr r16,r5 ; Save bottom of next vaddr
1042 beq- hrmPanic ; Nope, not found...
1044 cmplw r3,r31 ; Same mapping?
1045 bne- hrmPanic ; Not good...
1047 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1048 bl sxlkPromote ; Try to promote shared to exclusive
1049 mr. r3,r3 ; Could we?
1050 mr r3,r31 ; Restore the mapping pointer
1051 beq+ hrmBDone1 ; Yeah...
1053 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1054 bl sxlkConvert ; Convert shared to exclusive
1055 mr. r3,r3 ; Could we?
1056 bne-- hrmPanic ; Nope, we must have timed out...
1058 mr r3,r28 ; Pass in pmap to search
1059 mr r4,r29 ; High order of address
1060 mr r5,r30 ; Low order of address
1061 bl EXT(mapSearchFull) ; Rescan the list
1063 mr. r3,r3 ; Did we lose it when we converted?
1064 mr r15,r4 ; Save top of next vaddr
1065 mr r16,r5 ; Save bottom of next vaddr
1066 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1068 hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1070 mr r3,r28 ; Get the pmap to remove from
1071 mr r4,r31 ; Point to the mapping
1072 bl EXT(mapRemove) ; Remove the mapping from the list
1074 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1075 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1076 subi r4,r4,1 ; Drop down the mapped page count
1077 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1078 bl sxlkUnlock ; Unlock the search list
1080 b hrmRetn32 ; We are all done, get out...
1083 ; Here we handle the 64-bit version of hw_rem_map
1088 hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
1089 beq-- cr5,hrmBlock64 ; Go treat block specially...
1090 subfic r9,r9,-4 ; Get the PCA entry offset
1091 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1092 add r7,r9,r29 ; Point to the PCA slot
1094 bl mapLockPteg ; Go lock up the PTEG
1096 lwz r21,mpPte(r31) ; Get the quick pointer again
1097 ld r5,0(r26) ; Get the top of PTE
1099 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
1100 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
1101 sldi r23,r5,16 ; Shift AVPN up to EA format
1102 // **** Need to adjust above shift based on the page size - large pages need to shift a bit more
1103 rldicr r5,r5,0,62 ; Clear the valid bit
1104 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1105 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1106 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1108 std r5,0(r26) ; Invalidate the PTE
1110 li r9,tlbieLock ; Get the TLBIE lock
1112 sync ; Make sure the invalid PTE is actually in memory
1114 hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1115 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1116 mr. r5,r5 ; Is it locked?
1117 li r5,1 ; Get locked indicator
1118 bne-- hrmPtlb64w ; It is locked, go spin...
1119 stwcx. r5,0,r9 ; Try to get it
1120 bne-- hrmPtlb64 ; We was beat...
1122 tlbie r23 ; Invalidate all corresponding TLB entries
1124 eieio ; Make sure that the tlbie happens first
1125 tlbsync ; Wait for everyone to catch up
1127 ptesync ; Make sure of it all
1128 li r0,0 ; Clear this
1129 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1130 stw r0,tlbieLock(0) ; Clear the tlbie lock
1131 oris r0,r0,0x8000 ; Assume slot 0
1133 srw r0,r0,r2 ; Get slot mask to deallocate
1135 lwz r22,12(r26) ; Get the latest reference and change bits
1136 or r6,r6,r0 ; Make the guy we killed free
1139 eieio ; Make sure all updates come first
1141 stw r6,0(r7) ; Unlock and change the PCA
1143 hrmPysDQ64: mr r3,r31 ; Point to the mapping
1144 bl mapDrainBusy ; Go wait until mapping is unused
1146 mr r3,r28 ; Get the pmap to remove from
1147 mr r4,r31 ; Point to the mapping
1148 bl EXT(mapRemove) ; Remove the mapping from the list
1150 rlwinm r0,r20,0,mpType ; Isolate mapping type
1151 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
1152 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1153 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1154 subi r4,r4,1 ; Drop down the mapped page count
1155 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1156 bl sxlkUnlock ; Unlock the search list
1158 bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done...
1160 bl mapPhysFindLock ; Go find and lock the physent
1162 li r0,ppLFAmask ; Get mask to clean up mapping pointer
1163 ld r9,ppLink(r3) ; Get first mapping
1164 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1165 mr r4,r22 ; Get the RC bits we just got
1167 bl mapPhysMerge ; Go merge the RC bits
1169 andc r9,r9,r0 ; Clean up the mapping pointer
1171 cmpld r9,r31 ; Are we the first on the list?
1172 bne-- hrmNot1st64 ; Nope...
1175 ld r4,mpAlias(r31) ; Get our forward pointer
1177 std r9,mpAlias(r31) ; Make sure we are off the chain
1178 bl mapPhyCSet64 ; Go set the physent link and preserve flags
1180 b hrmPhyDQd64 ; Join up and unlock it all...
1182 hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1183 stwcx. r5,0,r5 ; Clear the pending reservation
1186 hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1187 mr. r5,r5 ; is it locked?
1188 beq++ hrmPtlb64 ; Nope...
1189 b hrmPtlb64x ; Sniff some more...
1194 mr. r8,r9 ; Remember and test current node
1195 beq-- hrmPhyDQd64 ; Could not find our node...
1196 ld r9,mpAlias(r9) ; Chain to the next
1197 cmpld r9,r31 ; Is this us?
1198 bne-- hrmNot1st64 ; Not us...
1200 ld r9,mpAlias(r9) ; Get our forward pointer
1201 std r9,mpAlias(r8) ; Unchain us
1206 bl mapPhysUnlock ; Unlock the physent chain
1208 hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1209 mr r3,r31 ; Copy the pointer to the mapping
1210 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1211 bl mapDrainBusy ; Go wait until mapping is unused
1213 xor r3,r31,r8 ; Flip mapping address to virtual
1215 mtmsrd r17 ; Restore enables/translation/etc.
1218 b hrmRetnCmn ; Join the common return path...
1222 ; Check hrmBlock32 for comments.
1227 hrmBlock64: lis r29,0xD000 ; Get shift to 32MB bsu
1228 rlwinm r10,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
1229 lhz r24,mpSpace(r31) ; Get the address space hash
1230 lhz r25,mpBSize(r31) ; Get the number of pages in block
1231 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
1232 rlwnm r29,r29,r10,28,31 ; Rotate to get 0 or 13
1233 addi r25,r25,1 ; Account for zero-based counting
1234 ori r0,r20,mpRIP ; Turn on the remove in progress flag
1235 slw r25,r25,r29 ; Adjust for 32MB if needed
1236 mfsdr1 r29 ; Get the hash table base and size
1237 ld r27,mpVAddr(r31) ; Get the base vaddr
1238 subi r25,r25,1 ; Convert back to zero-based counting
1239 rlwinm r5,r29,0,27,31 ; Isolate the size
1240 sub r4,r25,r9 ; Get number of pages left
1241 cmplw cr1,r9,r25 ; Have we already hit the end?
1242 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1243 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1244 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1245 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1246 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1247 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1248 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1249 srdi r27,r27,12 ; Change address into page index
1250 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1251 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1253 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1255 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1256 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1257 bl sxlkUnlock ; Unlock the search list while we are invalidating
1259 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1260 eqv r26,r26,r26 ; Get all foxes here
1261 rldimi r24,r24,28,8 ; Make a couple copies up higher
1262 rldicr r29,r29,0,47 ; Isolate just the hash table base
1263 subfic r5,r5,46 ; Get number of leading zeros
1264 srd r26,r26,r5 ; Shift the size bits over
1265 mr r30,r27 ; Get start of chunk to invalidate
1266 rldicr r26,r26,0,56 ; Make length in PTEG units
1267 add r22,r4,r30 ; Get end page number
1269 hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1270 rldicr r0,r0,0,49 ; Clean all but segment portion
1271 rlwinm r2,r30,0,16,31 ; Get the current page index
1272 xor r0,r0,r24 ; Form VSID
1273 xor r8,r2,r0 ; Hash the vaddr
1274 sldi r8,r8,7 ; Make into PTEG offset
1275 and r23,r8,r26 ; Wrap into the hash table
1276 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1277 subfic r3,r3,-4 ; Get the PCA entry offset
1278 add r7,r3,r29 ; Point to the PCA slot
1280 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1282 bl mapLockPteg ; Lock the PTEG
1284 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1285 add r5,r23,r29 ; Point to the PTEG
1286 li r0,0 ; Set an invalid PTE value
1287 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1288 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1289 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1292 bf 0,hrmSlot0s ; No autogen here
1293 std r0,0x00(r5) ; Invalidate PTE
1295 hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1296 std r0,0x10(r5) ; Invalidate PTE
1298 hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1299 std r0,0x20(r5) ; Invalidate PTE
1301 hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1302 std r0,0x30(r5) ; Invalidate PTE
1304 hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1305 std r0,0x40(r5) ; Invalidate PTE
1307 hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1308 std r0,0x50(r5) ; Invalidate PTE
1310 hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1311 std r0,0x60(r5) ; Invalidate PTE
1313 hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1314 std r0,0x70(r5) ; Invalidate PTE
1316 hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1317 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1318 andc r6,r6,r0 ; Turn off all the old autogen bits
1320 hrmBNone64: eieio ; Make sure all updates come first
1321 stw r6,0(r7) ; Unlock and set the PCA
1323 addi r30,r30,1 ; bump to the next PTEG
1324 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1326 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1328 mr r3,r31 ; Copy the pointer to the mapping
1329 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1331 sync ; Make sure memory is consistent
1333 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1334 li r6,255 ; Assume full invalidate for now
1335 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1336 andc r6,r6,r5 ; Clear max if we have less to do
1337 and r5,r25,r5 ; Clear count if we have more than max
1338 sldi r24,r24,28 ; Get the full XOR value over to segment position
1339 ld r27,mpVAddr(r31) ; Get the base vaddr
1340 li r7,tlbieLock ; Get the TLBIE lock
1341 or r5,r5,r6 ; Get number of TLBIEs needed
1343 hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1344 mr. r2,r2 ; Is it locked?
1345 li r2,1 ; Get our lock value
1346 bne-- hrmBTLBlcm ; It is locked, go wait...
1347 stwcx. r2,0,r7 ; Try to get it
1348 bne-- hrmBTLBlcl ; We was beat...
1350 hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1351 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1352 addic. r5,r5,-1 ; See if we did them all
1353 xor r2,r2,r24 ; Make the VSID
1354 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1355 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1357 tlbie r2 ; Invalidate it everywhere
1358 addi r27,r27,0x1000 ; Up to the next page
1359 bge++ hrmBTLBj ; Make sure we have done it all...
1361 eieio ; Make sure that the tlbie happens first
1362 tlbsync ; wait for everyone to catch up
1364 li r2,0 ; Lock clear value
1366 ptesync ; Wait for quiet again
1368 stw r2,tlbieLock(0) ; Clear the tlbie lock
1370 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1371 bl sxlkShared ; Go get a shared lock on the mapping lists
1372 mr. r3,r3 ; Did we get the lock?
1373 bne- hrmPanic ; Nope...
1375 lwz r4,mpVAddr(r31) ; High order of address
1376 lwz r5,mpVAddr+4(r31) ; Low order of address
1377 mr r3,r28 ; Pass in pmap to search
1378 mr r29,r4 ; Save this in case we need it (only promote fails)
1379 mr r30,r5 ; Save this in case we need it (only promote fails)
1380 bl EXT(mapSearchFull) ; Go see if we can find it
1382 mr. r3,r3 ; Did we? (And remember mapping address for later)
1383 mr r15,r4 ; Save top of next vaddr
1384 mr r16,r5 ; Save bottom of next vaddr
1385 beq- hrmPanic ; Nope, not found...
1387 cmpld r3,r31 ; Same mapping?
1388 bne- hrmPanic ; Not good...
1390 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1391 bl sxlkPromote ; Try to promote shared to exclusive
1392 mr. r3,r3 ; Could we?
1393 mr r3,r31 ; Restore the mapping pointer
1394 beq+ hrmBDone2 ; Yeah...
1396 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1397 bl sxlkConvert ; Convert shared to exclusive
1398 mr. r3,r3 ; Could we?
1399 bne-- hrmPanic ; Nope, we must have timed out...
1401 mr r3,r28 ; Pass in pmap to search
1402 mr r4,r29 ; High order of address
1403 mr r5,r30 ; Low order of address
1404 bl EXT(mapSearchFull) ; Rescan the list
1406 mr. r3,r3 ; Did we lose it when we converted?
1407 mr r15,r4 ; Save top of next vaddr
1408 mr r16,r5 ; Save bottom of next vaddr
1409 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1411 hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1413 mr r3,r28 ; Get the pmap to remove from
1414 mr r4,r31 ; Point to the mapping
1415 bl EXT(mapRemove) ; Remove the mapping from the list
1417 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1418 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1419 subi r4,r4,1 ; Drop down the mapped page count
1420 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1421 bl sxlkUnlock ; Unlock the search list
1423 b hrmRetn64 ; We are all done, get out...
1425 hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1426 stwcx. r2,0,r2 ; Unreserve it
1428 hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1429 mr. r2,r2 ; Is it held?
1430 beq++ hrmBTLBlcl ; Nope...
1431 b hrmBTLBlcn ; Yeah...
1434 ; Guest shadow assist -- mapping remove
1436 ; Method of operation:
1437 ; o Locate the VMM extension block and the host pmap
1438 ; o Obtain the host pmap's search lock exclusively
1439 ; o Locate the requested mapping in the shadow hash table,
1441 ; o If connected, disconnect the PTE and gather R&C to physent
1442 ; o Locate and lock the physent
1443 ; o Remove mapping from physent's chain
1445 ; o Unlock pmap's search lock
1447 ; Non-volatile registers on entry:
1448 ; r17: caller's msr image
1449 ; r19: sprg2 (feature flags)
1450 ; r28: guest pmap's physical address
1451 ; r29: high-order 32 bits of guest virtual address
1452 ; r30: low-order 32 bits of guest virtual address
1454 ; Non-volatile register usage:
1455 ; r26: VMM extension block's physical address
1456 ; r27: host pmap's physical address
1457 ; r28: guest pmap's physical address
1458 ; r29: physent's physical address
1459 ; r30: guest virtual address
1460 ; r31: guest mapping's physical address
1464 rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr
1465 bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine
1466 lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr
1467 lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr
1468 b hrmGStart ; Join common code
1470 hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr
1471 ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr
1472 rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
1474 hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
1475 bl sxlkExclusive ; Get lock exclusive
1477 lwz r3,vxsGrm(r26) ; Get mapping remove request count
1479 lwz r9,pmapSpace(r28) ; r9 <- guest space ID number
1480 la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index
1481 srwi r11,r30,12 ; Form shadow hash:
1482 xor r11,r9,r11 ; spaceID ^ (vaddr >> 12)
1483 rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
1484 ; Form index offset from hash page number
1485 add r31,r31,r12 ; r31 <- hash page index entry
1486 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
1487 mtctr r0 ; in this group
1488 bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search
1489 lwz r31,4(r31) ; r31 <- hash page paddr
1490 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
1491 ; r31 <- hash group paddr
1493 addi r3,r3,1 ; Increment remove request count
1494 stw r3,vxsGrm(r26) ; Update remove request count
1496 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1497 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1498 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
1499 b hrmG32SrchLp ; Let the search begin!
1503 mr r6,r3 ; r6 <- current mapping slot's flags
1504 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1505 mr r7,r4 ; r7 <- current mapping slot's space ID
1506 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1507 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1508 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
1509 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1510 xor r7,r7,r9 ; Compare space ID
1511 or r0,r11,r7 ; r0 <- !(free && space match)
1512 xor r8,r8,r30 ; Compare virtual address
1513 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1514 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1516 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1517 bdnz hrmG32SrchLp ; Iterate
1519 mr r6,r3 ; r6 <- current mapping slot's flags
1520 clrrwi r5,r5,12 ; Remove flags from virtual address
1521 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1522 xor r4,r4,r9 ; Compare space ID
1523 or r0,r11,r4 ; r0 <- !(free && space match)
1524 xor r5,r5,r30 ; Compare virtual address
1525 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1526 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1527 b hrmGSrchMiss ; No joy in our hash group
1530 ld r31,0(r31) ; r31 <- hash page paddr
1531 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
1532 ; r31 <- hash group paddr
1533 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1534 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1535 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
1536 b hrmG64SrchLp ; Let the search begin!
1540 mr r6,r3 ; r6 <- current mapping slot's flags
1541 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1542 mr r7,r4 ; r7 <- current mapping slot's space ID
1543 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1544 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1545 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
1546 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1547 xor r7,r7,r9 ; Compare space ID
1548 or r0,r11,r7 ; r0 <- !(free && space match)
1549 xor r8,r8,r30 ; Compare virtual address
1550 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1551 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1553 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1554 bdnz hrmG64SrchLp ; Iterate
1556 mr r6,r3 ; r6 <- current mapping slot's flags
1557 clrrdi r5,r5,12 ; Remove flags from virtual address
1558 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1559 xor r4,r4,r9 ; Compare space ID
1560 or r0,r11,r4 ; r0 <- !(free && space match)
1561 xor r5,r5,r30 ; Compare virtual address
1562 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1563 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1565 lwz r3,vxsGrmMiss(r26) ; Get remove miss count
1566 li r25,mapRtNotFnd ; Return not found
1567 addi r3,r3,1 ; Increment miss count
1568 stw r3,vxsGrmMiss(r26) ; Update miss count
1569 b hrmGReturn ; Join guest return
1573 rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant?
1574 bne hrmGDormant ; Yes, nothing to disconnect
1576 lwz r3,vxsGrmActive(r26) ; Get active hit count
1577 addi r3,r3,1 ; Increment active hit count
1578 stw r3,vxsGrmActive(r26) ; Update hit count
1580 bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately
1581 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
1582 ; r31 <- mapping's physical address
1583 ; r3 -> PTE slot physical address
1584 ; r4 -> High-order 32 bits of PTE
1585 ; r5 -> Low-order 32 bits of PTE
1587 ; r7 -> PCA physical address
1588 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
1589 b hrmGFreePTE ; Join 64-bit path to release the PTE
1591 bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
1592 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
1594 mr. r3,r3 ; Was there a valid PTE?
1595 beq hrmGDormant ; No valid PTE, we're almost done
1596 lis r0,0x8000 ; Prepare free bit for this slot
1597 srw r0,r0,r2 ; Position free bit
1598 or r6,r6,r0 ; Set it in our PCA image
1599 lwz r8,mpPte(r31) ; Get PTE offset
1600 rlwinm r8,r8,0,~mpHValid ; Make the offset invalid
1601 stw r8,mpPte(r31) ; Save invalidated PTE offset
1602 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
1603 stw r6,0(r7) ; Update PCA and unlock the PTEG
1606 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
1607 bl mapFindLockPN ; Find 'n' lock this page's physent
1608 mr. r29,r3 ; Got lock on our physent?
1609 beq-- hrmGBadPLock ; No, time to bail out
1611 crset cr1_eq ; cr1_eq <- previous link is the anchor
1612 bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine
1613 la r11,ppLink+4(r29) ; Point to chain anchor
1614 lwz r9,ppLink+4(r29) ; Get chain anchor
1615 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
1617 beq- hrmGPEMissMiss ; End of chain, this is not good
1618 cmplw r9,r31 ; Is this the mapping to remove?
1619 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
1620 bne hrmGRemNext ; No, chain onward
1621 bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor
1622 stw r8,0(r11) ; Unchain gpv->phys mapping
1623 b hrmGDelete ; Finish deleting mapping
1625 lwarx r0,0,r11 ; Get previous link
1626 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
1627 stwcx. r0,0,r11 ; Update previous link
1628 bne- hrmGRemRetry ; Lost reservation, retry
1629 b hrmGDelete ; Finish deleting mapping
1632 la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
1633 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1634 mr. r9,r8 ; Does next entry exist?
1635 b hrmGRemLoop ; Carry on
1638 li r7,ppLFAmask ; Get mask to clean up mapping pointer
1639 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1640 la r11,ppLink(r29) ; Point to chain anchor
1641 ld r9,ppLink(r29) ; Get chain anchor
1642 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
1644 beq-- hrmGPEMissMiss ; End of chain, this is not good
1645 cmpld r9,r31 ; Is this the mapping to remove?
1646 ld r8,mpAlias(r9) ; Get forward chain pinter
1647 bne hrmGRem64Nxt ; No mapping to remove, chain on, dude
1648 bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor
1649 std r8,0(r11) ; Unchain gpv->phys mapping
1650 b hrmGDelete ; Finish deleting mapping
1652 ldarx r0,0,r11 ; Get previous link
1653 and r0,r0,r7 ; Get flags
1654 or r0,r0,r8 ; Insert new forward pointer
1655 stdcx. r0,0,r11 ; Slam it back in
1656 bne-- hrmGRem64Rt ; Lost reservation, retry
1657 b hrmGDelete ; Finish deleting mapping
1661 la r11,mpAlias(r9) ; Point to (soon to be) previous link
1662 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1663 mr. r9,r8 ; Does next entry exist?
1664 b hrmGRem64Lp ; Carry on
1667 mr r3,r29 ; r3 <- physent addr
1668 bl mapPhysUnlock ; Unlock physent chain
1669 lwz r3,mpFlags(r31) ; Get mapping's flags
1670 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
1671 ori r3,r3,mpgFree ; Mark mapping free
1672 stw r3,mpFlags(r31) ; Update flags
1673 li r25,mapRtGuest ; Set return code to 'found guest mapping'
1676 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
1677 bl sxlkUnlock ; Release host pmap search lock
1679 mr r3,r25 ; r3 <- return code
1680 bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately
1681 mtmsr r17 ; Restore 'rupts, translation
1682 isync ; Throw a small wrench into the pipeline
1683 b hrmRetnCmn ; Nothing to do now but pop a frame and return
1684 hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode
1685 b hrmRetnCmn ; Join common return
1689 lis r0,hi16(Choke) ; Seen the arrow on the doorpost
1690 ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED"
1691 li r3,failMapping ; All the way from New Orleans
1696 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1698 * Upon entry, R3 contains a pointer to a physent.
1700 * This function removes the first mapping from a physical entry
1701 * alias list. It locks the list, extracts the vaddr and pmap from
1702 * the first entry. It then jumps into the hw_rem_map function.
1703 * NOTE: since we jump into rem_map, we need to set up the stack
1704 * identically. Also, we set the next parm to 0 so we do not
1705 * try to save a next vaddr.
1707 * We return the virtual address of the removed mapping as a
1710 * Note that this is designed to be called from 32-bit mode with a stack.
1712 * We disable translation and all interruptions here. This keeps is
1713 * from having to worry about a deadlock due to having anything locked
1714 * and needing it to process a fault.
1716 * Note that this must be done with both interruptions off and VM off
1719 * Remove mapping via physical page (mapping_purge)
1722 * 2) extract vaddr and pmap
1724 * 4) do "remove mapping via pmap"
1730 .globl EXT(hw_purge_phys)
1733 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1734 mflr r0 ; Save the link register
1735 stw r15,FM_ARG0+0x00(r1) ; Save a register
1736 stw r16,FM_ARG0+0x04(r1) ; Save a register
1737 stw r17,FM_ARG0+0x08(r1) ; Save a register
1738 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1739 stw r19,FM_ARG0+0x10(r1) ; Save a register
1740 stw r20,FM_ARG0+0x14(r1) ; Save a register
1741 stw r21,FM_ARG0+0x18(r1) ; Save a register
1742 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1743 stw r23,FM_ARG0+0x20(r1) ; Save a register
1744 stw r24,FM_ARG0+0x24(r1) ; Save a register
1745 stw r25,FM_ARG0+0x28(r1) ; Save a register
1746 li r6,0 ; Set no next address return
1747 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1748 stw r27,FM_ARG0+0x30(r1) ; Save a register
1749 stw r28,FM_ARG0+0x34(r1) ; Save a register
1750 stw r29,FM_ARG0+0x38(r1) ; Save a register
1751 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1752 stw r31,FM_ARG0+0x40(r1) ; Save a register
1753 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1754 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1756 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1758 bl mapPhysLock ; Lock the physent
1760 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1762 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1763 li r0,ppFlags ; Set the bottom stuff to clear
1764 b hppJoin ; Join the common...
1766 hppSF: li r0,ppLFAmask
1767 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1768 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1770 hppJoin: andc. r12,r12,r0 ; Clean and test link
1771 beq-- hppNone ; There are no more mappings on physical page
1773 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1774 lhz r7,mpSpace(r12) ; Get the address space hash
1775 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1776 slwi r0,r7,2 ; Multiply space by 4
1777 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1778 slwi r7,r7,3 ; Multiply space by 8
1779 lwz r5,mpVAddr+4(r12) ; and the bottom
1780 add r7,r7,r0 ; Get correct displacement into translate table
1781 lwz r28,0(r28) ; Get the actual translation map
1783 add r28,r28,r7 ; Point to the pmap translation
1785 bl mapPhysUnlock ; Time to unlock the physical entry
1787 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1789 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1790 b hrmJoin ; Go remove the mapping...
1792 hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1793 b hrmJoin ; Go remove the mapping...
1797 hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1799 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1801 mtmsr r11 ; Restore enables/translation/etc.
1803 b hppRetnCmn ; Join the common return code...
1805 hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1809 ; NOTE: we have not used any registers other than the volatiles to this point
1812 hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1814 li r3,mapRtEmpty ; Physent chain is empty
1815 mtlr r12 ; Restore the return
1816 lwz r1,0(r1) ; Pop the stack
1820 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1822 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1823 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1825 * We return the virtual address of the removed mapping as a
1828 * Note that this is designed to be called from 32-bit mode with a stack.
1830 * We disable translation and all interruptions here. This keeps is
1831 * from having to worry about a deadlock due to having anything locked
1832 * and needing it to process a fault.
1834 * Note that this must be done with both interruptions off and VM off
1836 * Remove a mapping which can be reestablished by VM
1841 .globl EXT(hw_purge_map)
1844 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1845 mflr r0 ; Save the link register
1846 stw r15,FM_ARG0+0x00(r1) ; Save a register
1847 stw r16,FM_ARG0+0x04(r1) ; Save a register
1848 stw r17,FM_ARG0+0x08(r1) ; Save a register
1849 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1850 stw r19,FM_ARG0+0x10(r1) ; Save a register
1851 mfsprg r19,2 ; Get feature flags
1852 stw r20,FM_ARG0+0x14(r1) ; Save a register
1853 stw r21,FM_ARG0+0x18(r1) ; Save a register
1854 mtcrf 0x02,r19 ; move pf64Bit cr6
1855 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1856 stw r23,FM_ARG0+0x20(r1) ; Save a register
1857 stw r24,FM_ARG0+0x24(r1) ; Save a register
1858 stw r25,FM_ARG0+0x28(r1) ; Save a register
1859 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1860 stw r27,FM_ARG0+0x30(r1) ; Save a register
1861 stw r28,FM_ARG0+0x34(r1) ; Save a register
1862 stw r29,FM_ARG0+0x38(r1) ; Save a register
1863 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1864 stw r31,FM_ARG0+0x40(r1) ; Save a register
1865 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1866 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1869 lwz r11,pmapFlags(r3) ; Get pmaps flags
1870 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1871 bne hpmPanic ; Call not valid for guest shadow assist pmap
1874 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1875 lwz r9,pmapvr+4(r3) ; Get conversion mask
1878 hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1881 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1883 xor r28,r3,r9 ; Convert the pmap to physical addressing
1885 mr r17,r11 ; Save the MSR
1887 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1888 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1889 mr. r3,r3 ; Did we get the lock?
1890 bne-- hrmBadLock ; Nope...
1892 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
1893 ; here so that we will know the previous elements so we can dequeue them
1897 mr r3,r28 ; Pass in pmap to search
1898 mr r29,r4 ; Top half of vaddr
1899 mr r30,r5 ; Bottom half of vaddr
1900 bl EXT(mapSearchFull) ; Rescan the list
1901 mr. r31,r3 ; Did we? (And remember mapping address for later)
1902 or r0,r4,r5 ; Are we beyond the end?
1903 mr r15,r4 ; Save top of next vaddr
1904 cmplwi cr1,r0,0 ; See if there is another
1905 mr r16,r5 ; Save bottom of next vaddr
1906 bne-- hpmGotOne ; We found one, go check it out...
1908 hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1909 b hrmNotFound ; No more in pmap to check...
1911 hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
1912 andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent
1913 rlwinm r21,r20,8,24,31 ; Extract the busy count
1914 cmplwi cr2,r21,0 ; Is it busy?
1915 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
1916 beq++ hrmGotX ; Found, branch to remove the mapping...
1917 b hpmCNext ; Nope...
1919 hpmPanic: lis r0,hi16(Choke) ; System abend
1920 ori r0,r0,lo16(Choke) ; System abend
1921 li r3,failMapping ; Show that we failed some kind of mapping thing
1925 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1927 * Upon entry, R3 contains a pointer to a pmap.
1928 * pa is a pointer to the physent
1930 * This function removes the first mapping for a specific pmap from a physical entry
1931 * alias list. It locks the list, extracts the vaddr and pmap from
1932 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1933 * NOTE: since we jump into rem_map, we need to set up the stack
1934 * identically. Also, we set the next parm to 0 so we do not
1935 * try to save a next vaddr.
1937 * We return the virtual address of the removed mapping as a
1940 * Note that this is designed to be called from 32-bit mode with a stack.
1942 * We disable translation and all interruptions here. This keeps is
1943 * from having to worry about a deadlock due to having anything locked
1944 * and needing it to process a fault.
1946 * Note that this must be done with both interruptions off and VM off
1949 * Remove mapping via physical page (mapping_purge)
1952 * 2) extract vaddr and pmap
1954 * 4) do "remove mapping via pmap"
1960 .globl EXT(hw_purge_space)
1962 LEXT(hw_purge_space)
1963 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1964 mflr r0 ; Save the link register
1965 stw r15,FM_ARG0+0x00(r1) ; Save a register
1966 stw r16,FM_ARG0+0x04(r1) ; Save a register
1967 stw r17,FM_ARG0+0x08(r1) ; Save a register
1968 mfsprg r2,2 ; Get feature flags
1969 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1970 stw r19,FM_ARG0+0x10(r1) ; Save a register
1971 stw r20,FM_ARG0+0x14(r1) ; Save a register
1972 stw r21,FM_ARG0+0x18(r1) ; Save a register
1973 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1974 mtcrf 0x02,r2 ; move pf64Bit cr6
1975 stw r23,FM_ARG0+0x20(r1) ; Save a register
1976 stw r24,FM_ARG0+0x24(r1) ; Save a register
1977 stw r25,FM_ARG0+0x28(r1) ; Save a register
1978 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1979 stw r27,FM_ARG0+0x30(r1) ; Save a register
1980 li r6,0 ; Set no next address return
1981 stw r28,FM_ARG0+0x34(r1) ; Save a register
1982 stw r29,FM_ARG0+0x38(r1) ; Save a register
1983 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1984 stw r31,FM_ARG0+0x40(r1) ; Save a register
1985 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1986 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1989 lwz r11,pmapFlags(r4) ; Get pmaps flags
1990 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1991 bne hpsPanic ; Call not valid for guest shadow assist pmap
1994 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
1996 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
2000 hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
2002 hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2004 xor r4,r4,r9 ; Convert the pmap to physical addressing
2006 bl mapPhysLock ; Lock the physent
2008 lwz r8,pmapSpace(r4) ; Get the space hash
2010 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
2012 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2014 hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2015 beq hpsNone ; Did not find one...
2017 lhz r10,mpSpace(r12) ; Get the space
2019 cmplw r10,r8 ; Is this one of ours?
2022 lwz r12,mpAlias+4(r12) ; Chain on to the next
2023 b hpsSrc32 ; Check it out...
2027 hpsSF: li r0,ppLFAmask
2028 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2029 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2031 hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2032 beq hpsNone ; Did not find one...
2034 lhz r10,mpSpace(r12) ; Get the space
2036 cmplw r10,r8 ; Is this one of ours?
2039 ld r12,mpAlias(r12) ; Chain on to the next
2040 b hpsSrc64 ; Check it out...
2044 hpsFnd: mr r28,r4 ; Set the pmap physical address
2045 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2046 lwz r5,mpVAddr+4(r12) ; and the bottom
2048 bl mapPhysUnlock ; Time to unlock the physical entry
2049 b hrmJoin ; Go remove the mapping...
2053 hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2055 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
2057 mtmsr r11 ; Restore enables/translation/etc.
2059 b hpsRetnCmn ; Join the common return code...
2061 hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2065 ; NOTE: we have not used any registers other than the volatiles to this point
2068 hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2070 li r3,mapRtEmpty ; No mappings for specified pmap on physent chain
2071 mtlr r12 ; Restore the return
2072 lwz r1,0(r1) ; Pop the stack
2075 hpsPanic: lis r0,hi16(Choke) ; System abend
2076 ori r0,r0,lo16(Choke) ; System abend
2077 li r3,failMapping ; Show that we failed some kind of mapping thing
2081 * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host
2082 * on this physent chain
2084 * Locates the first guest mapping on the physent chain that is associated with the
2085 * specified host pmap. If this succeeds, the mapping is removed by joining the general
2086 * remove path; otherwise, we return NULL. The caller is expected to invoke this entry
2087 * repeatedly until no additional guest mappings that match our criteria are removed.
2089 * Because this entry point exits through hw_rem_map, our prolog pushes its frame.
2092 * r3 : physent, 32-bit kernel virtual address
2093 * r4 : host pmap, 32-bit kernel virtual address
2095 * Volatile register usage (for linkage through hrmJoin):
2096 * r4 : high-order 32 bits of guest virtual address
2097 * r5 : low-order 32 bits of guest virtual address
2098 * r11: saved MSR image
2100 * Non-volatile register usage:
2101 * r26: VMM extension block's physical address
2102 * r27: host pmap's physical address
2103 * r28: guest pmap's physical address
2108 .globl EXT(hw_scrub_guest)
2110 LEXT(hw_scrub_guest)
2111 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
2112 mflr r0 ; Save the link register
2113 stw r15,FM_ARG0+0x00(r1) ; Save a register
2114 stw r16,FM_ARG0+0x04(r1) ; Save a register
2115 stw r17,FM_ARG0+0x08(r1) ; Save a register
2116 mfsprg r2,2 ; Get feature flags
2117 stw r18,FM_ARG0+0x0C(r1) ; Save a register
2118 stw r19,FM_ARG0+0x10(r1) ; Save a register
2119 stw r20,FM_ARG0+0x14(r1) ; Save a register
2120 stw r21,FM_ARG0+0x18(r1) ; Save a register
2121 stw r22,FM_ARG0+0x1C(r1) ; Save a register
2122 mtcrf 0x02,r2 ; move pf64Bit cr6
2123 stw r23,FM_ARG0+0x20(r1) ; Save a register
2124 stw r24,FM_ARG0+0x24(r1) ; Save a register
2125 stw r25,FM_ARG0+0x28(r1) ; Save a register
2126 stw r26,FM_ARG0+0x2C(r1) ; Save a register
2127 stw r27,FM_ARG0+0x30(r1) ; Save a register
2128 li r6,0 ; Set no next address return
2129 stw r28,FM_ARG0+0x34(r1) ; Save a register
2130 stw r29,FM_ARG0+0x38(r1) ; Save a register
2131 stw r30,FM_ARG0+0x3C(r1) ; Save a register
2132 stw r31,FM_ARG0+0x40(r1) ; Save a register
2133 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
2134 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2136 lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr
2138 bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine
2139 lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr
2140 lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt
2141 b hsgStart ; Get to work
2143 hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr
2144 ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt
2146 hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
2147 xor r27,r4,r9 ; Convert host pmap_t virt->real
2148 bl mapPhysLock ; Lock the physent
2150 bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine
2152 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2153 hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2154 beq hsg32Miss ; Did not find one...
2155 lwz r8,mpFlags(r12) ; Get mapping's flags
2156 lhz r7,mpSpace(r12) ; Get mapping's space id
2157 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2158 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2159 xori r8,r8,mpGuest ; Is it a guest mapping?
2160 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2161 slwi r9,r7,2 ; Multiply space by 4
2162 lwz r28,0(r28) ; Get the actual translation map
2163 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2164 slwi r7,r7,3 ; Multiply space by 8
2165 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2166 add r7,r7,r9 ; Get correct displacement into translate table
2167 add r28,r28,r7 ; Point to the pmap translation
2168 lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr
2169 lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr
2170 xor r7,r7,r26 ; Is guest associated with specified host?
2171 or. r7,r7,r8 ; Guest mapping && associated with host?
2172 lwz r12,mpAlias+4(r12) ; Chain on to the next
2173 bne hsg32Loop ; Try next mapping on alias chain
2175 hsg32Hit: bl mapPhysUnlock ; Unlock physent chain
2176 b hrmJoin ; Join common path for mapping removal
2179 hsg32Miss: bl mapPhysUnlock ; Unlock physent chain
2180 mtmsr r11 ; Restore 'rupts, translation
2181 isync ; Throw a small wrench into the pipeline
2182 li r3,mapRtEmpty ; No mappings found matching specified criteria
2183 b hrmRetnCmn ; Exit through common epilog
2186 hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed
2187 ld r12,ppLink(r3) ; Grab the pointer to the first mapping
2188 rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2189 hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address
2190 beq hsg64Miss ; Did not find one...
2191 lwz r8,mpFlags(r12) ; Get mapping's flags
2192 lhz r7,mpSpace(r12) ; Get mapping's space id
2193 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2194 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2195 xori r8,r8,mpGuest ; Is it a guest mapping?
2196 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2197 slwi r9,r7,2 ; Multiply space by 4
2198 lwz r28,0(r28) ; Get the actual translation map
2199 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2200 slwi r7,r7,3 ; Multiply space by 8
2201 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2202 add r7,r7,r9 ; Get correct displacement into translate table
2203 add r28,r28,r7 ; Point to the pmap translation
2204 ld r28,pmapPAddr(r28) ; Get guest pmap paddr
2205 ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr
2206 xor r7,r7,r26 ; Is guest associated with specified host?
2207 or. r7,r7,r8 ; Guest mapping && associated with host?
2208 ld r12,mpAlias(r12) ; Chain on to the next
2209 bne hsg64Loop ; Try next mapping on alias chain
2211 hsg64Hit: bl mapPhysUnlock ; Unlock physent chain
2212 b hrmJoin ; Join common path for mapping removal
2215 hsg64Miss: bl mapPhysUnlock ; Unlock physent chain
2216 mtmsrd r11 ; Restore 'rupts, translation
2217 li r3,mapRtEmpty ; No mappings found matching specified criteria
2218 b hrmRetnCmn ; Exit through common epilog
2222 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
2224 * Upon entry, R3 contains a pointer to a physent.
2225 * space is the space ID from the pmap in question
2227 * We return the virtual address of the found mapping in
2228 * R3. Note that the mapping busy is bumped.
2230 * Note that this is designed to be called from 32-bit mode with a stack.
2232 * We disable translation and all interruptions here. This keeps is
2233 * from having to worry about a deadlock due to having anything locked
2234 * and needing it to process a fault.
2239 .globl EXT(hw_find_space)
2242 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
2243 mflr r0 ; Save the link register
2244 mr r8,r4 ; Remember the space
2245 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2247 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2249 bl mapPhysLock ; Lock the physent
2251 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
2253 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2255 hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2256 beq hfsNone ; Did not find one...
2258 lhz r10,mpSpace(r12) ; Get the space
2260 cmplw r10,r8 ; Is this one of ours?
2263 lwz r12,mpAlias+4(r12) ; Chain on to the next
2264 b hfsSrc32 ; Check it out...
2268 hfsSF: li r0,ppLFAmask
2269 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2270 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2272 hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2273 beq hfsNone ; Did not find one...
2275 lhz r10,mpSpace(r12) ; Get the space
2277 cmplw r10,r8 ; Is this one of ours?
2280 ld r12,mpAlias(r12) ; Chain on to the next
2281 b hfsSrc64 ; Check it out...
2285 hfsFnd: mr r8,r3 ; Save the physent
2286 mr r3,r12 ; Point to the mapping
2287 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2289 mr r3,r8 ; Get back the physical entry
2290 li r7,0xFFF ; Get a page size mask
2291 bl mapPhysUnlock ; Time to unlock the physical entry
2293 andc r3,r12,r7 ; Move the mapping back down to a page
2294 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
2295 xor r12,r3,r12 ; Convert to virtual
2296 b hfsRet ; Time to return
2300 hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2302 hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
2304 mtmsr r11 ; Restore enables/translation/etc.
2306 b hfsRetnCmn ; Join the common return code...
2308 hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2312 ; NOTE: we have not used any registers other than the volatiles to this point
2315 hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
2318 mr. r3,r3 ; Anything to return?
2319 beq hfsRetnNull ; Nope
2320 lwz r11,mpFlags(r3) ; Get mapping flags
2321 rlwinm r0,r11,0,mpType ; Isolate the mapping type
2322 cmplwi r0,mpGuest ; Shadow guest mapping?
2323 beq hfsPanic ; Yup, kick the bucket
2327 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2329 mtlr r12 ; Restore the return
2330 lwz r1,0(r1) ; Pop the stack
2333 hfsPanic: lis r0,hi16(Choke) ; System abend
2334 ori r0,r0,lo16(Choke) ; System abend
2335 li r3,failMapping ; Show that we failed some kind of mapping thing
2339 ; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
2340 ; Returns 0 if not found or the virtual address of the mapping if
2341 ; if is. Also, the mapping has the busy count bumped.
2344 .globl EXT(hw_find_map)
2347 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2348 mflr r0 ; Save the link register
2349 stw r25,FM_ARG0+0x00(r1) ; Save a register
2350 stw r26,FM_ARG0+0x04(r1) ; Save a register
2351 mr r25,r6 ; Remember address of next va
2352 stw r27,FM_ARG0+0x08(r1) ; Save a register
2353 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2354 stw r29,FM_ARG0+0x10(r1) ; Save a register
2355 stw r30,FM_ARG0+0x14(r1) ; Save a register
2356 stw r31,FM_ARG0+0x18(r1) ; Save a register
2357 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2360 lwz r11,pmapFlags(r3) ; Get pmaps flags
2361 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2362 bne hfmPanic ; Call not valid for guest shadow assist pmap
2365 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2366 lwz r7,pmapvr+4(r3) ; Get the second part
2369 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2371 mr r27,r11 ; Remember the old MSR
2372 mr r26,r12 ; Remember the feature bits
2374 xor r28,r3,r7 ; Change the common 32- and 64-bit half
2376 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
2378 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
2380 hfmSF1: mr r29,r4 ; Save top half of vaddr
2381 mr r30,r5 ; Save the bottom half
2383 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2384 bl sxlkShared ; Go get a shared lock on the mapping lists
2385 mr. r3,r3 ; Did we get the lock?
2386 bne-- hfmBadLock ; Nope...
2388 mr r3,r28 ; get the pmap address
2389 mr r4,r29 ; Get bits 0:31 to look for
2390 mr r5,r30 ; Get bits 32:64
2392 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
2394 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
2395 mr. r31,r3 ; Save the mapping if we found it
2396 cmplwi cr1,r0,0 ; Are we removing?
2397 mr r29,r4 ; Save next va high half
2398 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
2399 mr r30,r5 ; Save next va low half
2400 li r6,0 ; Assume we did not find it
2401 li r26,0xFFF ; Get a mask to relocate to start of mapping page
2403 bt-- cr0_eq,hfmNotFnd ; We did not find it...
2405 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2407 andc r4,r31,r26 ; Get back to the mapping page start
2409 ; Note: we can treat 32- and 64-bit the same here. Because we are going from
2410 ; physical to virtual and we only do 32-bit virtual, we only need the low order
2413 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2414 li r6,-1 ; Indicate we found it and it is not being removed
2415 xor r31,r31,r4 ; Flip to virtual
2417 hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2418 bl sxlkUnlock ; Unlock the search list
2420 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2421 and r3,r3,r6 ; Clear if not found or removing
2423 hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
2425 mtmsr r27 ; Restore enables/translation/etc.
2427 b hfmReturnC ; Join common...
2429 hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2432 hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2433 stw r30,4(r25) ; Save the bottom of the next va
2434 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2435 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2436 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2437 and r3,r3,r6 ; Clear return if the mapping is being removed
2438 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2439 mtlr r0 ; Restore the return
2440 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2441 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2442 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2443 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2444 lwz r1,0(r1) ; Pop the stack
2449 hfmBadLock: li r3,1 ; Set lock time out error code
2450 b hfmReturn ; Leave....
2452 hfmPanic: lis r0,hi16(Choke) ; System abend
2453 ori r0,r0,lo16(Choke) ; System abend
2454 li r3,failMapping ; Show that we failed some kind of mapping thing
2459 * void hw_clear_maps(void)
2461 * Remove all mappings for all phys entries.
2467 .globl EXT(hw_clear_maps)
2470 mflr r10 ; Save the link register
2471 mfcr r9 ; Save the condition register
2472 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2474 lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2475 ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2478 lwz r3,mrPhysTab(r5) ; Get the actual table address
2479 lwz r0,mrStart(r5) ; Get start of table entry
2480 lwz r4,mrEnd(r5) ; Get end of table entry
2481 addi r5,r5,mrSize ; Point to the next regions
2483 cmplwi r3,0 ; No more regions?
2484 beq-- hcmDone ; Leave...
2486 sub r4,r4,r0 ; Calculate physical entry count
2490 bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version
2494 lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping
2495 addi r3,r3,physEntrySize ; Next phys_entry
2498 rlwinm. r4,r4,0,~ppFlags ; Clean and test mapping address
2499 beq hcmNoMap32 ; Did not find one...
2501 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2502 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2503 stw r0,mpPte(r4) ; Get the quick pointer again
2505 lwz r4,mpAlias+4(r4) ; Chain on to the next
2506 b hcmNextMap32 ; Check it out...
2514 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2515 ld r4,ppLink(r3) ; Get the pointer to the first mapping
2516 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2517 addi r3,r3,physEntrySize ; Next phys_entry
2520 andc. r4,r4,r0 ; Clean and test mapping address
2521 beq hcmNoMap64 ; Did not find one...
2523 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2524 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2525 stw r0,mpPte(r4) ; Get the quick pointer again
2527 ld r4,mpAlias(r4) ; Chain on to the next
2528 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2529 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2530 b hcmNextMap64 ; Check it out...
2538 mtlr r10 ; Restore the return
2539 mtcr r9 ; Restore the condition register
2540 bt++ pf64Bitb,hcmDone64 ; 64-bit version
2542 mtmsr r11 ; Restore translation/mode/etc.
2547 mtmsrd r11 ; Restore translation/mode/etc.
2554 * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod)
2555 * walks all mapping for a physical page and performs
2556 * specified operations on each.
2558 * pp is unlocked physent
2559 * preop is operation to perform on physent before walk. This would be
2560 * used to set cache attribute or protection
2561 * op is the operation to perform on each mapping during walk
2562 * postop is operation to perform in the phsyent after walk. this would be
2563 * used to set or reset the RC bits.
2564 * opmod modifies the action taken on any connected PTEs visited during
2567 * We return the RC bits from before postop is run.
2569 * Note that this is designed to be called from 32-bit mode with a stack.
2571 * We disable translation and all interruptions here. This keeps is
2572 * from having to worry about a deadlock due to having anything locked
2573 * and needing it to process a fault.
2575 * We lock the physent, execute preop, and then walk each mapping in turn.
2576 * If there is a PTE, it is invalidated and the RC merged into the physent.
2577 * Then we call the op function.
2578 * Then we revalidate the PTE.
2579 * Once all all mappings are finished, we save the physent RC and call the
2580 * postop routine. Then we unlock the physent and return the RC.
2586 .globl EXT(hw_walk_phys)
2589 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2590 mflr r0 ; Save the link register
2591 stw r24,FM_ARG0+0x00(r1) ; Save a register
2592 stw r25,FM_ARG0+0x04(r1) ; Save a register
2593 stw r26,FM_ARG0+0x08(r1) ; Save a register
2594 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2595 mr r24,r8 ; Save the parm
2596 mr r25,r7 ; Save the parm
2597 stw r28,FM_ARG0+0x10(r1) ; Save a register
2598 stw r29,FM_ARG0+0x14(r1) ; Save a register
2599 stw r30,FM_ARG0+0x18(r1) ; Save a register
2600 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2601 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2603 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2605 mfsprg r26,0 ; (INSTRUMENTATION)
2606 lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2607 addi r27,r27,1 ; (INSTRUMENTATION)
2608 stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2609 la r26,hwWalkFull(r26) ; (INSTRUMENTATION)
2610 slwi r12,r24,2 ; (INSTRUMENTATION)
2611 lwzx r27,r26,r12 ; (INSTRUMENTATION)
2612 addi r27,r27,1 ; (INSTRUMENTATION)
2613 stwx r27,r26,r12 ; (INSTRUMENTATION)
2615 mr r26,r11 ; Save the old MSR
2616 lis r27,hi16(hwpOpBase) ; Get high order of op base
2617 slwi r4,r4,7 ; Convert preop to displacement
2618 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2619 slwi r5,r5,7 ; Convert op to displacement
2620 add r12,r4,r27 ; Point to the preop routine
2621 slwi r28,r6,7 ; Convert postop to displacement
2622 mtctr r12 ; Set preop routine
2623 add r28,r28,r27 ; Get the address of the postop routine
2624 add r27,r5,r27 ; Get the address of the op routine
2626 bl mapPhysLock ; Lock the physent
2628 mr r29,r3 ; Save the physent address
2630 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2632 bctrl ; Call preop routine
2633 bne- hwpEarly32 ; preop says to bail now...
2635 cmplwi r24,hwpMergePTE ; Classify operation modifier
2636 mtctr r27 ; Set up the op function address
2637 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
2638 blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping
2639 beq hwpMSrc32 ; Do TLB merge for each mapping
2641 hwpQSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2642 beq hwpNone32 ; Did not find one...
2644 bctrl ; Call the op function
2646 bne- hwpEarly32 ; op says to bail now...
2647 lwz r31,mpAlias+4(r31) ; Chain on to the next
2648 b hwpQSrc32 ; Check it out...
2651 hwpMSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2652 beq hwpNone32 ; Did not find one...
2654 bl mapMergeRC32 ; Merge reference and change into mapping and physent
2655 bctrl ; Call the op function
2657 bne- hwpEarly32 ; op says to bail now...
2658 lwz r31,mpAlias+4(r31) ; Chain on to the next
2659 b hwpMSrc32 ; Check it out...
2662 hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2663 beq hwpNone32 ; Did not find one...
2666 ; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2667 ; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2668 ; If there is no PTE, PTE low is obtained from mapping
2670 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2672 bctrl ; Call the op function
2674 crmove cr1_eq,cr0_eq ; Save the return code
2676 mr. r3,r3 ; Was there a previously valid PTE?
2677 beq- hwpNxt32 ; Nope...
2679 stw r5,4(r3) ; Store second half of PTE
2680 eieio ; Make sure we do not reorder
2681 stw r4,0(r3) ; Revalidate the PTE
2683 eieio ; Make sure all updates come first
2684 stw r6,0(r7) ; Unlock the PCA
2686 hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2687 lwz r31,mpAlias+4(r31) ; Chain on to the next
2688 b hwpSrc32 ; Check it out...
2692 hwpNone32: mtctr r28 ; Get the post routine address
2694 lwz r30,ppLink+4(r29) ; Save the old RC
2695 mr r3,r29 ; Get the physent address
2696 bctrl ; Call post routine
2698 bl mapPhysUnlock ; Unlock the physent
2700 mtmsr r26 ; Restore translation/mode/etc.
2703 b hwpReturn ; Go restore registers and return...
2707 hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2708 mr r3,r29 ; Get the physent address
2709 bl mapPhysUnlock ; Unlock the physent
2711 mtmsr r26 ; Restore translation/mode/etc.
2714 b hwpReturn ; Go restore registers and return...
2718 hwp64: bctrl ; Call preop routine
2719 bne-- hwpEarly64 ; preop says to bail now...
2721 cmplwi r24,hwpMergePTE ; Classify operation modifier
2722 mtctr r27 ; Set up the op function address
2725 ld r31,ppLink(r3) ; Get the pointer to the first mapping
2726 rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2727 blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping
2728 beq hwpMSrc64 ; Do TLB merge for each mapping
2730 hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2731 beq hwpNone64 ; Did not find one...
2733 bctrl ; Call the op function
2735 bne-- hwpEarly64 ; op says to bail now...
2736 ld r31,mpAlias(r31) ; Chain on to the next
2737 b hwpQSrc64 ; Check it out...
2740 hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2741 beq hwpNone64 ; Did not find one...
2743 bl mapMergeRC64 ; Merge reference and change into mapping and physent
2744 bctrl ; Call the op function
2746 bne-- hwpEarly64 ; op says to bail now...
2747 ld r31,mpAlias(r31) ; Chain on to the next
2748 b hwpMSrc64 ; Check it out...
2751 hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2752 beq hwpNone64 ; Did not find one...
2754 ; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2755 ; PTE low in R5. PTEG comes back locked if there is one
2757 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2759 bctrl ; Call the op function
2761 crmove cr1_eq,cr0_eq ; Save the return code
2763 mr. r3,r3 ; Was there a previously valid PTE?
2764 beq-- hwpNxt64 ; Nope...
2766 std r5,8(r3) ; Save bottom of PTE
2767 eieio ; Make sure we do not reorder
2768 std r4,0(r3) ; Revalidate the PTE
2770 eieio ; Make sure all updates come first
2771 stw r6,0(r7) ; Unlock the PCA
2773 hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2774 ld r31,mpAlias(r31) ; Chain on to the next
2775 b hwpSrc64 ; Check it out...
2779 hwpNone64: mtctr r28 ; Get the post routine address
2781 lwz r30,ppLink+4(r29) ; Save the old RC
2782 mr r3,r29 ; Get the physent address
2783 bctrl ; Call post routine
2785 bl mapPhysUnlock ; Unlock the physent
2787 mtmsrd r26 ; Restore translation/mode/etc.
2789 b hwpReturn ; Go restore registers and return...
2793 hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2794 mr r3,r29 ; Get the physent address
2795 bl mapPhysUnlock ; Unlock the physent
2797 mtmsrd r26 ; Restore translation/mode/etc.
2800 hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2801 lwz r24,FM_ARG0+0x00(r1) ; Restore a register
2802 lwz r25,FM_ARG0+0x04(r1) ; Restore a register
2803 lwz r26,FM_ARG0+0x08(r1) ; Restore a register
2804 mr r3,r30 ; Pass back the RC
2805 lwz r27,FM_ARG0+0x0C(r1) ; Restore a register
2806 lwz r28,FM_ARG0+0x10(r1) ; Restore a register
2807 mtlr r0 ; Restore the return
2808 lwz r29,FM_ARG0+0x14(r1) ; Restore a register
2809 lwz r30,FM_ARG0+0x18(r1) ; Restore a register
2810 lwz r31,FM_ARG0+0x1C(r1) ; Restore a register
2811 lwz r1,0(r1) ; Pop the stack
2816 ; The preop/op/postop function table.
2817 ; Each function must be 64-byte aligned and be no more than
2818 ; 16 instructions. If more than 16, we must fix address calculations
2819 ; at the start of hwpOpBase
2821 ; The routine must set CR0_EQ in order to continue scan.
2822 ; If CR0_EQ is not set, an early return from the function is made.
2829 ; Function 0 - No operation
2831 hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2832 blr ; Just return...
2836 ; This is the continuation of function 4 - Set attributes in mapping
2838 ; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2839 ; NOTE: Do we have to deal with i-cache here?
2841 hwpSAM: li r11,4096 ; Get page size
2843 hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2844 dcbf r11,r5 ; Flush the line in the data cache
2845 bgt++ hwpSAMinvd ; Go do the rest of it...
2847 sync ; Make sure it is done
2849 li r11,4096 ; Get page size
2851 hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2852 icbi r11,r5 ; Flush the line in the icache
2853 bgt++ hwpSAMinvi ; Go do the rest of it...
2855 sync ; Make sure it is done
2857 cmpw r0,r0 ; Make sure we return CR0_EQ
2861 ; Function 1 - Set protection in physent (obsolete)
2863 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2865 hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ
2869 ; Function 2 - Set protection in mapping
2871 ; NOTE: Changes to no-execute permission are ignored
2873 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
2875 hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2876 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2877 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2878 li r0,lo16(mpPP) ; Get protection bits
2879 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2880 rlwinm r2,r25,0,mpPP ; Isolate new protection bits
2881 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2882 andc r5,r5,r0 ; Clear the old prot bits
2883 or r5,r5,r2 ; Move in the new prot bits
2884 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2885 cmpw r0,r0 ; Make sure we return CR0_EQ
2886 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2889 ; Function 3 - Set attributes in physent
2891 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
2893 hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent
2895 hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
2896 rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes
2897 stwcx. r4,r5,r29 ; Try to stuff it
2898 bne-- hwpSAtrPhX ; Try again...
2899 ; Note: CR0_EQ is set because of stwcx.
2902 ; Function 4 - Set attributes in mapping
2904 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2906 hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2907 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2908 li r2,mpM ; Force on coherent
2909 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2910 li r0,lo16(mpWIMG) ; Get wimg mask
2911 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2912 rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32
2913 ; Copy in the cache inhibited bit
2914 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2915 andc r5,r5,r0 ; Clear the old wimg
2916 rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32
2917 ; Copy in the guarded bit
2918 mfsprg r9,2 ; Feature flags
2919 or r5,r5,r2 ; Move in the new wimg
2920 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2921 lwz r2,mpPAddr(r31) ; Get the physical address
2922 li r0,0xFFF ; Start a mask
2923 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2924 rlwinm r5,r0,0,1,0 ; Copy to top half
2925 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2926 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2927 and r5,r5,r2 ; Clean stuff in top 32 bits
2928 andc r2,r2,r0 ; Clean bottom too
2929 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2930 b hwpSAM ; Join common
2932 ; NOTE: we moved the remainder of the code out of here because it
2933 ; did not fit in the 128 bytes allotted. It got stuck into the free space
2934 ; at the end of the no-op function.
2939 ; Function 5 - Clear reference in physent
2941 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
2943 hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2945 hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
2946 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
2947 stwcx. r4,r5,r29 ; Try to stuff it
2948 bne-- hwpCRefPhX ; Try again...
2949 ; Note: CR0_EQ is set because of stwcx.
2953 ; Function 6 - Clear reference in mapping
2955 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
2957 hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2958 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2959 andc r5,r5,r0 ; Clear in PTE copy
2960 andc r8,r8,r0 ; and in the mapping
2961 cmpw r0,r0 ; Make sure we return CR0_EQ
2962 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2966 ; Function 7 - Clear change in physent
2968 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
2970 hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2972 hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2973 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2974 stwcx. r4,r5,r29 ; Try to stuff it
2975 bne-- hwpCCngPhX ; Try again...
2976 ; Note: CR0_EQ is set because of stwcx.
2980 ; Function 8 - Clear change in mapping
2982 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2984 hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2985 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2986 andc r5,r5,r0 ; Clear in PTE copy
2987 andc r8,r8,r0 ; and in the mapping
2988 cmpw r0,r0 ; Make sure we return CR0_EQ
2989 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2993 ; Function 9 - Set reference in physent
2995 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
2997 hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2999 hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
3000 ori r4,r4,lo16(ppR) ; Set the reference
3001 stwcx. r4,r5,r29 ; Try to stuff it
3002 bne-- hwpSRefPhX ; Try again...
3003 ; Note: CR0_EQ is set because of stwcx.
3007 ; Function 10 - Set reference in mapping
3009 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
3011 hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3012 ori r8,r8,lo16(mpR) ; Set reference in mapping
3013 cmpw r0,r0 ; Make sure we return CR0_EQ
3014 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3017 ; Function 11 - Set change in physent
3019 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
3021 hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3023 hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
3024 ori r4,r4,lo16(ppC) ; Set the change bit
3025 stwcx. r4,r5,r29 ; Try to stuff it
3026 bne-- hwpSCngPhX ; Try again...
3027 ; Note: CR0_EQ is set because of stwcx.
3030 ; Function 12 - Set change in mapping
3032 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
3034 hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3035 ori r8,r8,lo16(mpC) ; Set chage in mapping
3036 cmpw r0,r0 ; Make sure we return CR0_EQ
3037 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3040 ; Function 13 - Test reference in physent
3042 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
3044 hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3045 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
3046 blr ; Return (CR0_EQ set to continue if reference is off)...
3049 ; Function 14 - Test reference in mapping
3051 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
3053 hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
3054 blr ; Return (CR0_EQ set to continue if reference is off)...
3057 ; Function 15 - Test change in physent
3059 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
3061 hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3062 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
3063 blr ; Return (CR0_EQ set to continue if change is off)...
3066 ; Function 16 - Test change in mapping
3068 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
3070 hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
3071 blr ; Return (CR0_EQ set to continue if change is off)...
3074 ; Function 17 - Test reference and change in physent
3076 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
3079 lwz r0,ppLink+4(r29) ; Get the flags from physent
3080 rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits
3081 cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1))
3082 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3083 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3086 ; Function 18 - Test reference and change in mapping
3088 .set .,hwpOpBase+(18*128) ; Generate error if previous function too long
3090 rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping
3091 cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1))
3092 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3093 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3096 ; Function 19 - Clear reference and change in physent
3098 .set .,hwpOpBase+(19*128) ; Generate error if previous function too long
3100 li r5,ppLink+4 ; Get offset for flag part of physent
3103 lwarx r4,r5,r29 ; Get the old flags
3104 andc r4,r4,r25 ; Clear R and C as specified by mask
3105 stwcx. r4,r5,r29 ; Try to stuff it
3106 bne-- hwpCRefCngPhX ; Try again...
3107 ; Note: CR0_EQ is set because of stwcx.
3111 ; Function 20 - Clear reference and change in mapping
3113 .set .,hwpOpBase+(20*128) ; Generate error if previous function too long
3115 srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map)
3116 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3117 andc r5,r5,r0 ; Clear in PTE copy
3118 andc r8,r8,r0 ; and in the mapping
3119 cmpw r0,r0 ; Make sure we return CR0_EQ
3120 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3124 .set .,hwpOpBase+(21*128) ; Generate error if previous function too long
3127 ; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
3130 ; mapRtOK - if all is ok
3131 ; mapRtBadLk - if mapping lock fails
3132 ; mapRtPerm - if mapping is permanent
3133 ; mapRtNotFnd - if mapping is not found
3134 ; mapRtBlock - if mapping is a block
3137 .globl EXT(hw_protect)
3140 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3141 mflr r0 ; Save the link register
3142 stw r24,FM_ARG0+0x00(r1) ; Save a register
3143 stw r25,FM_ARG0+0x04(r1) ; Save a register
3144 mr r25,r7 ; Remember address of next va
3145 stw r26,FM_ARG0+0x08(r1) ; Save a register
3146 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3147 stw r28,FM_ARG0+0x10(r1) ; Save a register
3148 mr r24,r6 ; Save the new protection flags
3149 stw r29,FM_ARG0+0x14(r1) ; Save a register
3150 stw r30,FM_ARG0+0x18(r1) ; Save a register
3151 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3152 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3155 lwz r11,pmapFlags(r3) ; Get pmaps flags
3156 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3157 bne hpPanic ; Call not valid for guest shadow assist pmap
3160 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3161 lwz r7,pmapvr+4(r3) ; Get the second part
3164 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3166 mr r27,r11 ; Remember the old MSR
3167 mr r26,r12 ; Remember the feature bits
3169 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3171 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
3173 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3175 hpSF1: mr r29,r4 ; Save top half of vaddr
3176 mr r30,r5 ; Save the bottom half
3178 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3179 bl sxlkShared ; Go get a shared lock on the mapping lists
3180 mr. r3,r3 ; Did we get the lock?
3181 bne-- hpBadLock ; Nope...
3183 mr r3,r28 ; get the pmap address
3184 mr r4,r29 ; Get bits 0:31 to look for
3185 mr r5,r30 ; Get bits 32:64
3187 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
3189 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3190 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3191 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3192 cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3193 mr. r31,r3 ; Save the mapping if we found it
3194 mr r29,r4 ; Save next va high half
3195 mr r30,r5 ; Save next va low half
3197 beq-- hpNotFound ; Not found...
3199 bf-- cr1_eq,hpNotAllowed ; Something special is happening...
3201 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
3203 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3205 rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit)
3206 mr. r3,r3 ; Was there a previously valid PTE?
3208 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3210 beq-- hpNoOld32 ; Nope...
3212 stw r5,4(r3) ; Store second half of PTE
3213 eieio ; Make sure we do not reorder
3214 stw r4,0(r3) ; Revalidate the PTE
3216 eieio ; Make sure all updates come first
3217 stw r6,0(r7) ; Unlock PCA
3219 hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3220 bl sxlkUnlock ; Unlock the search list
3222 li r3,mapRtOK ; Set normal return
3223 b hpR32 ; Join common...
3228 hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3230 rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits
3231 mr. r3,r3 ; Was there a previously valid PTE?
3233 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3235 beq-- hpNoOld64 ; Nope...
3237 std r5,8(r3) ; Store second half of PTE
3238 eieio ; Make sure we do not reorder
3239 std r4,0(r3) ; Revalidate the PTE
3241 eieio ; Make sure all updates come first
3242 stw r6,0(r7) ; Unlock PCA
3244 hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3245 bl sxlkUnlock ; Unlock the search list
3247 li r3,mapRtOK ; Set normal return
3248 b hpR64 ; Join common...
3252 hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
3254 hpR32: mtmsr r27 ; Restore enables/translation/etc.
3256 b hpReturnC ; Join common...
3258 hpR64: mtmsrd r27 ; Restore enables/translation/etc.
3261 hpReturnC: stw r29,0(r25) ; Save the top of the next va
3262 stw r30,4(r25) ; Save the bottom of the next va
3263 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3264 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3265 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3266 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3267 mtlr r0 ; Restore the return
3268 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3269 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3270 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3271 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3272 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3273 lwz r1,0(r1) ; Pop the stack
3278 hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
3279 b hpReturn ; Leave....
3281 hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3282 bl sxlkUnlock ; Unlock the search list
3284 li r3,mapRtNotFnd ; Set that we did not find the requested page
3285 b hpReturn ; Leave....
3288 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
3289 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3290 bne-- hpNotFound ; Yeah...
3291 bl sxlkUnlock ; Unlock the search list
3293 li r3,mapRtBlock ; Assume it was a block
3294 rlwinm r0,r7,0,mpType ; Isolate mapping type
3295 cmplwi r0,mpBlock ; Is this a block mapping?
3296 beq++ hpReturn ; Yes, leave...
3298 li r3,mapRtPerm ; Set that we hit a permanent page
3299 b hpReturn ; Leave....
3301 hpPanic: lis r0,hi16(Choke) ; System abend
3302 ori r0,r0,lo16(Choke) ; System abend
3303 li r3,failMapping ; Show that we failed some kind of mapping thing
3308 ; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
3310 ; Returns following code ORed with RC from mapping
3311 ; mapRtOK - if all is ok
3312 ; mapRtBadLk - if mapping lock fails
3313 ; mapRtNotFnd - if mapping is not found
3316 .globl EXT(hw_test_rc)
3319 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3320 mflr r0 ; Save the link register
3321 stw r24,FM_ARG0+0x00(r1) ; Save a register
3322 stw r25,FM_ARG0+0x04(r1) ; Save a register
3323 stw r26,FM_ARG0+0x08(r1) ; Save a register
3324 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3325 stw r28,FM_ARG0+0x10(r1) ; Save a register
3326 mr r24,r6 ; Save the reset request
3327 stw r29,FM_ARG0+0x14(r1) ; Save a register
3328 stw r30,FM_ARG0+0x18(r1) ; Save a register
3329 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3330 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3333 lwz r11,pmapFlags(r3) ; Get pmaps flags
3334 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3335 bne htrPanic ; Call not valid for guest shadow assist pmap
3338 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3339 lwz r7,pmapvr+4(r3) ; Get the second part
3342 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3344 mr r27,r11 ; Remember the old MSR
3345 mr r26,r12 ; Remember the feature bits
3347 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3349 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
3351 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3353 htrSF1: mr r29,r4 ; Save top half of vaddr
3354 mr r30,r5 ; Save the bottom half
3356 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3357 bl sxlkShared ; Go get a shared lock on the mapping lists
3358 mr. r3,r3 ; Did we get the lock?
3360 bne-- htrBadLock ; Nope...
3362 mr r3,r28 ; get the pmap address
3363 mr r4,r29 ; Get bits 0:31 to look for
3364 mr r5,r30 ; Get bits 32:64
3366 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
3368 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3369 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3370 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3371 crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3372 mr. r31,r3 ; Save the mapping if we found it
3373 crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed
3375 bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed...
3377 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
3379 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3381 cmplwi cr1,r24,0 ; Do we want to clear RC?
3382 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3383 mr. r3,r3 ; Was there a previously valid PTE?
3384 li r0,lo16(mpR|mpC) ; Get bits to clear
3386 and r25,r5,r0 ; Save the RC bits
3387 beq++ cr1,htrNoClr32 ; Nope...
3389 andc r12,r12,r0 ; Clear mapping copy of RC
3390 andc r5,r5,r0 ; Clear PTE copy of RC
3391 sth r12,mpVAddr+6(r31) ; Set the new RC
3393 htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
3395 sth r5,6(r3) ; Store updated RC
3396 eieio ; Make sure we do not reorder
3397 stw r4,0(r3) ; Revalidate the PTE
3399 eieio ; Make sure all updates come first
3400 stw r6,0(r7) ; Unlock PCA
3402 htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3403 bl sxlkUnlock ; Unlock the search list
3404 li r3,mapRtOK ; Set normal return
3405 b htrR32 ; Join common...
3410 htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3412 cmplwi cr1,r24,0 ; Do we want to clear RC?
3413 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3414 mr. r3,r3 ; Was there a previously valid PTE?
3415 li r0,lo16(mpR|mpC) ; Get bits to clear
3417 and r25,r5,r0 ; Save the RC bits
3418 beq++ cr1,htrNoClr64 ; Nope...
3420 andc r12,r12,r0 ; Clear mapping copy of RC
3421 andc r5,r5,r0 ; Clear PTE copy of RC
3422 sth r12,mpVAddr+6(r31) ; Set the new RC
3424 htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
3426 sth r5,14(r3) ; Store updated RC
3427 eieio ; Make sure we do not reorder
3428 std r4,0(r3) ; Revalidate the PTE
3430 eieio ; Make sure all updates come first
3431 stw r6,0(r7) ; Unlock PCA
3433 htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3434 bl sxlkUnlock ; Unlock the search list
3435 li r3,mapRtOK ; Set normal return
3436 b htrR64 ; Join common...
3440 htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
3442 htrR32: mtmsr r27 ; Restore enables/translation/etc.
3444 b htrReturnC ; Join common...
3446 htrR64: mtmsrd r27 ; Restore enables/translation/etc.
3449 htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3450 or r3,r3,r25 ; Send the RC bits back
3451 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3452 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3453 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3454 mtlr r0 ; Restore the return
3455 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3456 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3457 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3458 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3459 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3460 lwz r1,0(r1) ; Pop the stack
3465 htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
3466 b htrReturn ; Leave....
3469 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3470 bl sxlkUnlock ; Unlock the search list
3472 li r3,mapRtNotFnd ; Set that we did not find the requested page
3473 b htrReturn ; Leave....
3475 htrPanic: lis r0,hi16(Choke) ; System abend
3476 ori r0,r0,lo16(Choke) ; System abend
3477 li r3,failMapping ; Show that we failed some kind of mapping thing
3483 ; mapFindLockPN - find and lock physent for a given page number
3488 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3489 mr r2,r3 ; Save our target
3490 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3492 mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address
3493 lwz r5,mrStart(r9) ; Get start of table entry
3494 lwz r0,mrEnd(r9) ; Get end of table entry
3495 addi r9,r9,mrSize ; Point to the next slot
3496 cmplwi cr7,r3,0 ; Are we at the end of the table?
3497 cmplw r2,r5 ; See if we are in this table
3498 cmplw cr1,r2,r0 ; Check end also
3499 sub r4,r2,r5 ; Calculate index to physical entry
3500 beq-- cr7,mapFLPNmiss ; Leave if we did not find an entry...
3501 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
3502 slwi r4,r4,3 ; Get offset to physical entry
3504 blt-- mapFLPNitr ; Did not find it...
3506 add r3,r3,r4 ; Point right to the slot
3507 b mapPhysLock ; Join common lock code
3510 li r3,0 ; Show that we did not find it
3515 ; mapPhysFindLock - find physent list and lock it
3516 ; R31 points to mapping
3521 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
3522 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
3523 rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset
3524 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
3525 add r3,r3,r4 ; Point to table entry
3526 lwz r5,mpPAddr(r31) ; Get physical page number
3527 lwz r7,mrStart(r3) ; Get the start of range
3528 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
3529 sub r6,r5,r7 ; Get index to physent
3530 rlwinm r6,r6,3,0,28 ; Get offset to physent
3531 add r3,r3,r6 ; Point right to the physent
3532 b mapPhysLock ; Join in the lock...
3535 ; mapPhysLock - lock a physent list
3536 ; R3 contains list header
3541 li r2,lgKillResv ; Get a spot to kill reservation
3542 stwcx. r2,0,r2 ; Kill it...
3545 lwz r2,ppLink(r3) ; Get physent chain header
3546 rlwinm. r2,r2,0,0,0 ; Is lock clear?
3547 bne-- mapPhysLockT ; Nope, still locked...
3550 lwarx r2,0,r3 ; Get the lock
3551 rlwinm. r0,r2,0,0,0 ; Is it locked?
3552 oris r0,r2,0x8000 ; Set the lock bit
3553 bne-- mapPhysLockS ; It is locked, spin on it...
3554 stwcx. r0,0,r3 ; Try to stuff it back...
3555 bne-- mapPhysLock ; Collision, try again...
3556 isync ; Clear any speculations
3561 ; mapPhysUnlock - unlock a physent list
3562 ; R3 contains list header
3567 lwz r0,ppLink(r3) ; Get physent chain header
3568 rlwinm r0,r0,0,1,31 ; Clear the lock bit
3569 eieio ; Make sure unlock comes last
3570 stw r0,ppLink(r3) ; Unlock the list
3574 ; mapPhysMerge - merge the RC bits into the master copy
3575 ; R3 points to the physent
3576 ; R4 contains the RC bits
3578 ; Note: we just return if RC is 0
3583 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
3584 la r5,ppLink+4(r3) ; Point to the RC field
3585 beqlr-- ; Leave if RC is 0...
3588 lwarx r6,0,r5 ; Get the RC part
3589 or r6,r6,r4 ; Merge in the RC
3590 stwcx. r6,0,r5 ; Try to stuff it back...
3591 bne-- mapPhysMergeT ; Collision, try again...
3595 ; Sets the physent link pointer and preserves all flags
3596 ; The list is locked
3597 ; R3 points to physent
3598 ; R4 has link to set
3604 la r5,ppLink+4(r3) ; Point to the link word
3607 lwarx r2,0,r5 ; Get the link and flags
3608 rlwimi r4,r2,0,ppFlags ; Insert the flags
3609 stwcx. r4,0,r5 ; Stick them back
3610 bne-- mapPhyCSetR ; Someone else did something, try again...
3616 li r0,ppLFAmask ; Get mask to clean up mapping pointer
3617 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
3620 ldarx r2,0,r3 ; Get the link and flags
3621 and r5,r2,r0 ; Isolate the flags
3622 or r6,r4,r5 ; Add them to the link
3623 stdcx. r6,0,r3 ; Stick them back
3624 bne-- mapPhyCSet64x ; Someone else did something, try again...
3628 ; mapBumpBusy - increment the busy count on a mapping
3629 ; R3 points to mapping
3635 lwarx r4,0,r3 ; Get mpBusy
3636 addis r4,r4,0x0100 ; Bump the busy count
3637 stwcx. r4,0,r3 ; Save it back
3638 bne-- mapBumpBusy ; This did not work, try again...
3642 ; mapDropBusy - increment the busy count on a mapping
3643 ; R3 points to mapping
3646 .globl EXT(mapping_drop_busy)
3649 LEXT(mapping_drop_busy)
3651 lwarx r4,0,r3 ; Get mpBusy
3652 addis r4,r4,0xFF00 ; Drop the busy count
3653 stwcx. r4,0,r3 ; Save it back
3654 bne-- mapDropBusy ; This did not work, try again...
3658 ; mapDrainBusy - drain the busy count on a mapping
3659 ; R3 points to mapping
3660 ; Note: we already have a busy for ourselves. Only one
3661 ; busy per processor is allowed, so we just spin here
3662 ; waiting for the count to drop to 1.
3663 ; Also, the mapping can not be on any lists when we do this
3664 ; so all we are doing is waiting until it can be released.
3670 lwz r4,mpFlags(r3) ; Get mpBusy
3671 rlwinm r4,r4,8,24,31 ; Clean it up
3672 cmplwi r4,1 ; Is is just our busy?
3673 beqlr++ ; Yeah, it is clear...
3674 b mapDrainBusy ; Try again...
3679 ; handleDSeg - handle a data segment fault
3680 ; handleISeg - handle an instruction segment fault
3682 ; All that we do here is to map these to DSI or ISI and insure
3683 ; that the hash bit is not set. This forces the fault code
3684 ; to also handle the missing segment.
3686 ; At entry R2 contains per_proc, R13 contains savarea pointer,
3687 ; and R11 is the exception code.
3691 .globl EXT(handleDSeg)
3695 li r11,T_DATA_ACCESS ; Change fault to DSI
3696 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3697 b EXT(handlePF) ; Join common...
3700 .globl EXT(handleISeg)
3704 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3705 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3706 b EXT(handlePF) ; Join common...
3710 * handlePF - handle a page fault interruption
3712 * At entry R2 contains per_proc, R13 contains savarea pointer,
3713 * and R11 is the exception code.
3715 * This first part does a quick check to see if we can handle the fault.
3716 * We canot handle any kind of protection exceptions here, so we pass
3717 * them up to the next level.
3719 * NOTE: In order for a page-fault redrive to work, the translation miss
3720 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3721 * before we come here.
3725 .globl EXT(handlePF)
3729 mfsprg r12,2 ; Get feature flags
3730 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3731 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3732 mtcrf 0x02,r12 ; move pf64Bit to cr6
3733 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3734 lwz r18,SAVflags(r13) ; Get the flags
3736 beq-- gotIfetch ; We have an IFETCH here...
3738 lwz r27,savedsisr(r13) ; Get the DSISR
3739 lwz r29,savedar(r13) ; Get the first half of the DAR
3740 lwz r30,savedar+4(r13) ; And second half
3742 b ckIfProt ; Go check if this is a protection fault...
3744 gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3745 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3746 lwz r30,savesrr0+4(r13) ; And second half
3747 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3749 ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3750 li r20,64 ; Set a limit of 64 nests for sanity check
3751 bne-- hpfExit ; Yes... (probably not though)
3754 ; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3755 ; should be loading the user pmap here.
3758 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3759 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3760 mr r19,r2 ; Remember the per_proc
3761 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3762 mr r23,r30 ; Save the low part of faulting address
3763 beq-- hpfInKern ; Skip if we are in the kernel
3764 la r8,ppUserPmap(r19) ; Point to the current user pmap
3766 hpfInKern: mr r22,r29 ; Save the high part of faulting address
3768 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3771 ; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3772 ; predefined value that corresponds to no address space. When we see that value
3773 ; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3774 ; cause the proper SR to be loaded.
3777 lwz r28,4(r8) ; Pick up the pmap
3778 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3779 mr r25,r28 ; Save the original pmap (in case we nest)
3780 lwz r0,pmapFlags(r28) ; Get pmap's flags
3781 bne hpfGVtest ; Segs are not ours if so...
3782 mfsrin r4,r30 ; Get the SR that was used for translation
3783 cmplwi r4,invalSpace ; Is this a simulated segment fault?
3784 bne++ hpfGVtest ; No...
3786 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
3787 b hpfGVtest ; Join on up...
3791 nop ; Push hpfNest to a 32-byte boundary
3792 nop ; Push hpfNest to a 32-byte boundary
3793 nop ; Push hpfNest to a 32-byte boundary
3795 hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3796 mr r25,r28 ; Save the original pmap (in case we nest)
3797 lwz r0,pmapFlags(r28) ; Get pmap's flags
3799 hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist?
3800 bne hpfGVxlate ; Yup, do accelerated shadow stuff
3803 ; This is where we loop descending nested pmaps
3806 hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3807 addi r20,r20,-1 ; Count nest try
3808 bl sxlkShared ; Go get a shared lock on the mapping lists
3809 mr. r3,r3 ; Did we get the lock?
3810 bne-- hpfBadLock ; Nope...
3812 mr r3,r28 ; Get the pmap pointer
3813 mr r4,r22 ; Get top of faulting vaddr
3814 mr r5,r23 ; Get bottom of faulting vaddr
3815 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3817 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3818 mr. r31,r3 ; Save the mapping if we found it
3819 cmplwi cr1,r0,0 ; Check for removal
3820 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3822 bt-- cr0_eq,hpfNotFound ; Not found or removing...
3824 rlwinm r0,r7,0,mpType ; Isolate mapping type
3825 cmplwi r0,mpNest ; Are we again nested?
3826 cmplwi cr1,r0,mpLinkage ; Are we a linkage type?
3827 cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type?
3828 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3830 lhz r21,mpSpace(r31) ; Get the space
3832 bne++ hpfFoundIt ; No, we found our guy...
3835 #if pmapTransSize != 12
3836 #error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3838 cmplwi r0,mpLinkage ; Linkage mapping?
3839 cmplwi cr1,r20,0 ; Too many nestings?
3840 beq-- hpfSpclNest ; Do we need to do special handling?
3842 hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3843 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3844 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3845 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3846 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3847 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3848 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3849 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3850 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3851 slwi r11,r21,3 ; Multiply space by 8
3852 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3853 addc r23,r23,r9 ; Relocate bottom half of vaddr
3854 lwz r10,0(r10) ; Get the actual translation map
3855 slwi r12,r21,2 ; Multiply space by 4
3856 add r10,r10,r11 ; Add in the higher part of the index
3857 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3858 adde r22,r22,r8 ; Relocate the top half of the vaddr
3859 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3860 bl sxlkUnlock ; Unlock the search list
3862 bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines
3863 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3864 cmplwi r28,0 ; Is the pmap paddr valid?
3865 bne+ hpfNest ; Nest into new pmap...
3866 b hpfBadPmap ; Handle bad pmap
3869 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3870 cmpldi r28,0 ; Is the pmap paddr valid?
3871 bne++ hpfNest ; Nest into new pmap...
3872 b hpfBadPmap ; Handle bad pmap
3876 ; Error condition. We only allow 64 nestings. This keeps us from having to
3877 ; check for recusive nests when we install them.
3883 lwz r20,savedsisr(r13) ; Get the DSISR
3884 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3885 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3886 ori r20,r20,1 ; Indicate that there was a nesting problem
3887 stw r20,savedsisr(r13) ; Stash it
3888 lwz r11,saveexception(r13) ; Restore the exception code
3889 b EXT(PFSExit) ; Yes... (probably not though)
3892 ; Error condition - lock failed - this is fatal
3898 lis r0,hi16(Choke) ; System abend
3899 ori r0,r0,lo16(Choke) ; System abend
3900 li r3,failMapping ; Show mapping failure
3904 ; Error condition - space id selected an invalid pmap - fatal
3910 lis r0,hi16(Choke) ; System abend
3911 ori r0,r0,lo16(Choke) ; System abend
3912 li r3,failPmap ; Show invalid pmap
3916 ; Did not find any kind of mapping
3922 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3923 bl sxlkUnlock ; Unlock it
3924 lwz r11,saveexception(r13) ; Restore the exception code
3926 hpfExit: ; We need this because we can not do a relative branch
3927 b EXT(PFSExit) ; Yes... (probably not though)
3931 ; Here is where we handle special mappings. So far, the only use is to load a
3932 ; processor specific segment register for copy in/out handling.
3934 ; The only (so far implemented) special map is used for copyin/copyout.
3935 ; We keep a mapping of a "linkage" mapping in the per_proc.
3936 ; The linkage mapping is basically a nested pmap that is switched in
3937 ; as part of context switch. It relocates the appropriate user address
3938 ; space slice into the right place in the kernel.
3944 la r31,ppUMWmp(r19) ; Just point to the mapping
3945 oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here
3946 b hpfCSrch ; Go continue search...
3950 ; We have now found a mapping for the address we faulted on.
3954 ; Here we go about calculating what the VSID should be. We concatanate
3955 ; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3956 ; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3957 ; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3960 ; This is used both for segment handling and PTE handling
3965 #error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3968 ; Important non-volatile registers at this point ('home' means the final pmap/mapping found
3969 ; when a multi-level mapping has been successfully searched):
3970 ; r21: home space id number
3971 ; r22: relocated high-order 32 bits of vaddr
3972 ; r23: relocated low-order 32 bits of vaddr
3973 ; r25: pmap physical address
3975 ; r28: home pmap physical address
3976 ; r29: high-order 32 bits of faulting vaddr
3977 ; r30: low-order 32 bits of faulting vaddr
3978 ; r31: mapping's physical address
3982 hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
3983 hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
3984 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3985 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3986 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
3987 rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag
3988 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
3989 cmplwi cr5,r0,0 ; Did we just do a special nesting?
3990 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
3991 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
3992 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
3993 xor r14,r14,r20 ; Calculate the top half of VSID
3994 xor r15,r15,r21 ; Calculate the bottom half of the VSID
3995 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
3996 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
3997 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
3998 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
3999 or r12,r12,r15 ; Add key into the bottom of VSID
4001 ; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
4003 bne++ hpfPteMiss ; Nope, normal PTE miss...
4006 ; Here is the only place that we make an entry in the pmap segment cache.
4008 ; Note that we do not make an entry in the segment cache for special
4009 ; nested mappings. This makes the copy in/out segment get refreshed
4010 ; when switching threads.
4012 ; The first thing that we do is to look up the ESID we are going to load
4013 ; into a segment in the pmap cache. If it is already there, this is
4014 ; a segment that appeared since the last time we switched address spaces.
4015 ; If all is correct, then it was another processors that made the cache
4016 ; entry. If not, well, it is an error that we should die on, but I have
4017 ; not figured a good way to trap it yet.
4019 ; If we get a hit, we just bail, otherwise, lock the pmap cache, select
4020 ; an entry based on the generation number, update the cache entry, and
4021 ; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
4022 ; entries that correspond to the last 4 bits (32:35 for 64-bit and
4023 ; 0:3 for 32-bit) of the ESID.
4025 ; Then we unlock and bail.
4027 ; First lock it. Then select a free slot or steal one based on the generation
4028 ; number. Then store it, update the allocation flags, and unlock.
4030 ; The cache entry contains an image of the ESID/VSID pair we would load for
4031 ; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
4033 ; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
4034 ; the current one, which may have changed because we nested.
4036 ; Also remember that we do not store the valid bit in the ESID. If we
4037 ; od, this will break some other stuff.
4040 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
4042 mr r3,r25 ; Point to the pmap
4043 mr r4,r29 ; ESID high half
4044 mr r5,r30 ; ESID low half
4045 bl pmapCacheLookup ; Go see if this is in the cache already
4047 mr. r3,r3 ; Did we find it?
4048 mr r4,r11 ; Copy this to a different register
4050 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
4052 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
4053 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
4055 cntlzw r7,r4 ; Find a free slot
4057 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
4058 rlwinm r30,r30,0,0,3 ; Clean up the ESID
4059 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
4060 addi r5,r4,1 ; Bump the generation number
4061 and r7,r7,r6 ; Clear bit number if none empty
4062 andc r8,r4,r6 ; Clear generation count if we found an empty
4063 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
4064 or r7,r7,r8 ; Select a slot number
4066 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
4067 oris r8,r8,0x8000 ; Get the high bit on
4068 la r9,pmapSegCache(r25) ; Point to the segment cache
4069 slwi r6,r7,4 ; Get index into the segment cache
4070 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
4071 srw r8,r8,r7 ; Get the mask
4072 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
4074 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
4075 oris r0,r0,0xF000 ; Get the sub-tag mask
4076 add r9,r9,r6 ; Point to the cache slot
4077 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
4078 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
4080 stw r29,sgcESID(r9) ; Save the top of the ESID
4081 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
4082 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
4083 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
4084 or r10,r10,r5 ; Stick in subtag in case top half
4085 or r11,r11,r5 ; Stick in subtag in case bottom half
4086 stw r14,sgcVSID(r9) ; Save the top of the VSID
4087 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
4088 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
4089 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
4091 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
4092 b hpfNoCacheEnt ; Go finish up...
4095 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
4099 eieio ; Make sure cache is updated before lock
4100 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
4104 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
4105 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
4108 ; Make and enter 32-bit segment register
4111 lwz r16,validSegs(r19) ; Get the valid SR flags
4112 xor r12,r12,r4 ; Alter the storage key before loading segment register
4113 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
4114 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
4115 lis r0,0x8000 ; Set bit 0
4116 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
4117 srw r0,r0,r2 ; Get bit corresponding to SR
4118 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
4119 or r16,r16,r0 ; Show that SR is valid
4121 mtsrin r6,r30 ; Set the actual SR
4123 stw r16,validSegs(r19) ; Set the valid SR flags
4125 b hpfPteMiss ; SR loaded, go do a PTE...
4128 ; Make and enter 64-bit segment look-aside buffer entry.
4129 ; Note that the cache entry is the right format except for valid bit.
4130 ; We also need to convert from long long to 64-bit register values.
4137 ld r16,validSegs(r19) ; Get the valid SLB entry flags
4138 sldi r8,r29,32 ; Move high order address over
4139 sldi r10,r14,32 ; Move high part of VSID over
4141 not r3,r16 ; Make valids be 0s
4142 li r0,1 ; Prepare to set bit 0
4144 cntlzd r17,r3 ; Find a free SLB
4145 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
4146 or r9,r8,r30 ; Form full 64-bit address
4147 cmplwi r17,63 ; Did we find a free SLB entry?
4148 sldi r0,r0,63 ; Get bit 0 set
4149 or r10,r10,r12 ; Move in low part and keys
4150 addi r17,r17,1 ; Skip SLB 0 always
4151 blt++ hpfFreeSeg ; Yes, go load it...
4154 ; No free SLB entries, select one that is in use and invalidate it
4156 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
4157 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4158 addi r4,r4,1 ; Set next slot to steal
4159 slbmfee r7,r17 ; Get the entry that is in the selected spot
4160 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
4161 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4162 srawi r2,r2,31 ; Get -1 if steal index still in range
4163 slbie r7 ; Invalidate the in-use SLB entry
4164 and r4,r4,r2 ; Reset steal index when it should wrap
4167 stw r4,ppSegSteal(r19) ; Set the next slot to steal
4169 ; We are now ready to stick the SLB entry in the SLB and mark it in use
4173 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
4174 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
4175 srd r0,r0,r4 ; Set bit mask for allocation
4176 oris r9,r9,0x0800 ; Turn on the valid bit
4177 or r16,r16,r0 ; Turn on the allocation flag
4178 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
4180 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
4181 slbie r7 ; Blow away a potential duplicate
4183 hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
4185 std r16,validSegs(r19) ; Mark as valid
4186 b hpfPteMiss ; STE loaded, go do a PTE...
4189 ; The segment has been set up and loaded if need be. Now we are ready to build the
4190 ; PTE and get it into the hash table.
4192 ; Note that there is actually a race here. If we start fault processing on
4193 ; a different pmap, i.e., we have descended into a nested pmap, it is possible
4194 ; that the nest could have been removed from the original pmap. We would
4195 ; succeed with this translation anyway. I do not think we need to worry
4196 ; about this (famous last words) because nobody should be unnesting anything
4197 ; if there are still people activily using them. It should be up to the
4198 ; higher level VM system to put the kibosh on this.
4200 ; There is also another race here: if we fault on the same mapping on more than
4201 ; one processor at the same time, we could end up with multiple PTEs for the same
4202 ; mapping. This is not a good thing.... We really only need one of the
4203 ; fault handlers to finish, so what we do is to set a "fault in progress" flag in
4204 ; the mapping. If we see that set, we just abandon the handler and hope that by
4205 ; the time we restore context and restart the interrupted code, the fault has
4206 ; been resolved by the other guy. If not, we will take another fault.
4210 ; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
4211 ; It is required to stay there until after we call mapSelSlot!!!!
4216 hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
4217 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
4218 li r3,mpHValid ; Get the PTE valid bit
4219 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
4220 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
4221 crnot cr1_eq,cr0_eq ; Remember if FIP was on
4222 and. r12,r12,r3 ; Isolate the valid bit
4223 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
4224 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
4225 rlwinm r0,r2,0,mpType ; Isolate mapping type
4226 cmplwi r0,mpBlock ; Is this a block mapping?
4227 crnot cr7_eq,cr0_eq ; Remember if we have a block mapping
4228 stwcx. r2,0,r31 ; Store the flags
4229 bne-- hpfPteMiss ; Collision, try again...
4231 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
4234 ; At this point we are about to do the 32-bit PTE generation.
4236 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4240 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4241 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4242 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4244 ; The 24 bits of the 32-bit architecture VSID is in the following:
4248 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4249 ; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4250 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4255 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
4256 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4258 mfsdr1 r27 ; Get the hash table base address
4260 rlwinm r0,r23,0,4,19 ; Isolate just the page index
4261 rlwinm r18,r23,10,26,31 ; Extract the API
4262 xor r19,r15,r0 ; Calculate hash << 12
4263 mr r2,r25 ; Save the flag part of the mapping
4264 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
4265 rlwinm r16,r27,16,7,15 ; Extract the hash table size
4266 rlwinm r25,r25,0,0,19 ; Clear out the flags
4267 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
4268 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
4269 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
4270 rlwinm r27,r27,0,0,15 ; Extract the hash table base
4271 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
4272 add r24,r24,r25 ; Adjust to true physical address
4273 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
4274 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
4275 and r19,r19,r16 ; Wrap hash table offset into the hash table
4276 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
4277 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
4278 add r19,r19,r27 ; Point to the PTEG
4279 subfic r20,r20,-4 ; Get negative offset to PCA
4280 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4281 add r20,r20,r27 ; Point to the PCA slot
4284 ; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
4285 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4287 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4288 ; that some other processor beat us and stuck in a PTE or that
4289 ; all we had was a simple segment exception and the PTE was there the whole time.
4290 ; If we find one a pointer, we are done.
4293 mr r7,r20 ; Copy the PCA pointer
4294 bl mapLockPteg ; Lock the PTEG
4296 lwz r12,mpPte(r31) ; Get the offset to the PTE
4297 mr r17,r6 ; Remember the PCA image
4298 mr r16,r6 ; Prime the post-select PCA image
4299 andi. r0,r12,mpHValid ; Is there a PTE here already?
4300 li r21,8 ; Get the number of slots
4302 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
4304 bne- hpfBailOut ; Someone already did this for us...
4307 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a
4308 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4309 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4310 ; R4 returns the slot index.
4312 ; REMEMBER: CR7 indicates that we are building a block mapping.
4315 hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
4316 mr r6,r17 ; Get back the original PCA
4317 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
4318 blt- hpfBailOut ; Holy Cow, all slots are locked...
4320 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
4322 cmplwi cr5,r3,1 ; Did we steal a slot?
4323 rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address
4324 mr r16,r6 ; Remember the PCA image after selection
4325 blt+ cr5,hpfInser32 ; Nope, no steal...
4327 lwz r6,0(r19) ; Get the old PTE
4328 lwz r7,4(r19) ; Get the real part of the stealee
4329 rlwinm r6,r6,0,1,31 ; Clear the valid bit
4330 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
4331 srwi r3,r7,12 ; Change phys address to a ppnum
4332 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4333 cmplwi cr1,r3,0 ; Check if this is in RAM
4334 bne- hpfNoPte32 ; Could not get it, try for another...
4336 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4338 hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
4340 sync ; Make sure the invalid is stored
4341 li r9,tlbieLock ; Get the TLBIE lock
4342 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
4344 hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
4345 mfsprg r4,0 ; Get the per_proc
4346 rlwinm r8,r6,25,18,31 ; Extract the space ID
4347 rlwinm r11,r6,25,18,31 ; Extract the space ID
4348 lwz r7,hwSteals(r4) ; Get the steal count
4349 srwi r2,r6,7 ; Align segment number with hash
4350 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
4351 mr. r0,r0 ; Is it locked?
4352 srwi r0,r19,6 ; Align PTEG offset for back hash
4353 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
4354 xor r11,r11,r0 ; Hash backwards to partial vaddr
4355 rlwinm r12,r2,14,0,3 ; Shift segment up
4356 mfsprg r2,2 ; Get feature flags
4357 li r0,1 ; Get our lock word
4358 rlwimi r12,r6,22,4,9 ; Move up the API
4359 bne- hpfTLBIE32 ; It is locked, go wait...
4360 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
4362 stwcx. r0,0,r9 ; Try to get it
4363 bne- hpfTLBIE32 ; We was beat...
4364 addi r7,r7,1 ; Bump the steal count
4366 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
4367 li r0,0 ; Lock clear value
4369 tlbie r12 ; Invalidate it everywhere
4372 beq- hpfNoTS32 ; Can not have MP on this machine...
4374 eieio ; Make sure that the tlbie happens first
4375 tlbsync ; Wait for everyone to catch up
4376 sync ; Make sure of it all
4378 hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
4380 stw r7,hwSteals(r4) ; Save the steal count
4381 bgt cr5,hpfInser32 ; We just stole a block mapping...
4383 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
4385 la r11,ppLink+4(r3) ; Point to the master RC copy
4386 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
4387 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
4389 hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
4390 or r0,r0,r2 ; Merge in the new RC
4391 stwcx. r0,0,r11 ; Try to stick it back
4392 bne- hpfMrgRC32 ; Try again if we collided...
4395 hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address
4396 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4398 lhz r10,mpSpace(r7) ; Get the space
4399 lwz r9,mpVAddr+4(r7) ; And the vaddr
4400 cmplw cr1,r10,r8 ; Is this one of ours?
4401 xor r9,r12,r9 ; Compare virtual address
4402 cmplwi r9,0x1000 ; See if we really match
4403 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4404 beq+ hpfFPnch2 ; Yes, found ours...
4406 lwz r7,mpAlias+4(r7) ; Chain on to the next
4407 b hpfFPnch ; Check it out...
4409 hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
4410 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
4411 bl mapPhysUnlock ; Unlock the physent now
4413 hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4415 stw r24,4(r19) ; Stuff in the real part of the PTE
4416 eieio ; Make sure this gets there first
4418 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
4419 mr r17,r16 ; Get the PCA image to save
4420 b hpfFinish ; Go join the common exit code...
4424 ; At this point we are about to do the 64-bit PTE generation.
4426 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4430 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4431 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4432 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4439 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
4440 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4442 mfsdr1 r27 ; Get the hash table base address
4444 sldi r11,r22,32 ; Slide top of adjusted EA over
4445 sldi r14,r14,32 ; Slide top of VSID over
4446 rlwinm r5,r27,0,27,31 ; Isolate the size
4447 eqv r16,r16,r16 ; Get all foxes here
4448 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
4449 mr r2,r10 ; Save the flag part of the mapping
4450 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
4451 rldicr r27,r27,0,45 ; Clean up the hash table base
4452 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
4453 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
4454 subfic r5,r5,46 ; Get number of leading zeros
4455 xor r19,r0,r15 ; Calculate hash
4456 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
4457 srd r16,r16,r5 ; Shift over to get length of table
4458 srdi r19,r19,5 ; Convert page offset to hash table offset
4459 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
4460 rldicr r10,r10,0,51 ; Clear out flags
4461 sldi r24,r24,12 ; Change ppnum to physical address
4462 sub r11,r11,r10 ; Get the offset from the base mapping
4463 and r19,r19,r16 ; Wrap into hash table
4464 add r24,r24,r11 ; Get actual physical address of this page
4465 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
4466 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
4467 subfic r20,r20,-4 ; Get negative offset to PCA
4468 ori r24,r24,lo16(mpR) ; Force on the reference bit
4469 add r20,r20,r27 ; Point to the PCA slot
4470 add r19,r19,r27 ; Point to the PTEG
4473 ; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
4474 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4476 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4477 ; that some other processor beat us and stuck in a PTE or that
4478 ; all we had was a simple segment exception and the PTE was there the whole time.
4479 ; If we find one a pointer, we are done.
4482 mr r7,r20 ; Copy the PCA pointer
4483 bl mapLockPteg ; Lock the PTEG
4485 lwz r12,mpPte(r31) ; Get the offset to the PTE
4486 mr r17,r6 ; Remember the PCA image
4487 mr r18,r6 ; Prime post-selection PCA image
4488 andi. r0,r12,mpHValid ; See if we have a PTE now
4489 li r21,8 ; Get the number of slots
4491 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
4493 bne-- hpfBailOut ; Someone already did this for us...
4496 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
4497 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4498 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4499 ; R4 returns the slot index.
4501 ; REMEMBER: CR7 indicates that we are building a block mapping.
4504 hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
4505 mr r6,r17 ; Restore original state of PCA
4506 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
4507 blt- hpfBailOut ; Holy Cow, all slots are locked...
4509 bl mapSelSlot ; Go select a slot
4511 cmplwi cr5,r3,1 ; Did we steal a slot?
4512 mr r18,r6 ; Remember the PCA image after selection
4513 insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address
4514 lwz r10,hwSteals(r2) ; Get the steal count
4515 blt++ cr5,hpfInser64 ; Nope, no steal...
4517 ld r6,0(r19) ; Get the old PTE
4518 ld r7,8(r19) ; Get the real part of the stealee
4519 rldicr r6,r6,0,62 ; Clear the valid bit
4520 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
4521 srdi r3,r7,12 ; Change page address to a page address
4522 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4523 cmplwi cr1,r3,0 ; Check if this is in RAM
4524 bne-- hpfNoPte64 ; Could not get it, try for another...
4526 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4528 hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
4529 li r9,tlbieLock ; Get the TLBIE lock
4531 srdi r11,r6,5 ; Shift VSID over for back hash
4532 mfsprg r4,0 ; Get the per_proc
4533 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
4534 sync ; Make sure the invalid is stored
4536 sldi r12,r6,16 ; Move AVPN to EA position
4537 sldi r11,r11,5 ; Move this to the page position
4539 hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
4540 mr. r0,r0 ; Is it locked?
4541 li r0,1 ; Get our lock word
4542 bne-- hpfTLBIE65 ; It is locked, go wait...
4544 stwcx. r0,0,r9 ; Try to get it
4545 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
4546 rldicl r8,r6,52,50 ; Isolate the address space ID
4547 bne-- hpfTLBIE64 ; We was beat...
4548 addi r10,r10,1 ; Bump the steal count
4550 rldicl r11,r12,0,16 ; Clear cause the book says so
4551 li r0,0 ; Lock clear value
4553 tlbie r11 ; Invalidate it everywhere
4555 mr r7,r8 ; Get a copy of the space ID
4556 eieio ; Make sure that the tlbie happens first
4557 rldimi r7,r7,14,36 ; Copy address space to make hash value
4558 tlbsync ; Wait for everyone to catch up
4559 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
4560 srdi r2,r6,26 ; Shift original segment down to bottom
4562 ptesync ; Make sure of it all
4563 xor r7,r7,r2 ; Compute original segment
4564 stw r0,tlbieLock(0) ; Clear the tlbie lock
4566 stw r10,hwSteals(r4) ; Save the steal count
4567 bgt cr5,hpfInser64 ; We just stole a block mapping...
4569 rldimi r12,r7,28,0 ; Insert decoded segment
4570 rldicl r4,r12,0,13 ; Trim to max supported address
4572 ld r12,8(r19) ; Get the RC of the just invalidated PTE
4574 la r11,ppLink+4(r3) ; Point to the master RC copy
4575 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
4576 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
4578 hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
4579 li r12,ppLFAmask ; Get mask to clean up alias pointer
4580 or r0,r0,r2 ; Merge in the new RC
4581 rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
4582 stwcx. r0,0,r11 ; Try to stick it back
4583 bne-- hpfMrgRC64 ; Try again if we collided...
4585 hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
4586 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4588 lhz r10,mpSpace(r7) ; Get the space
4589 ld r9,mpVAddr(r7) ; And the vaddr
4590 cmplw cr1,r10,r8 ; Is this one of ours?
4591 xor r9,r4,r9 ; Compare virtual address
4592 cmpldi r9,0x1000 ; See if we really match
4593 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4594 beq++ hpfFPnch2x ; Yes, found ours...
4596 ld r7,mpAlias(r7) ; Chain on to the next
4597 b hpfFPnchx ; Check it out...
4601 hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
4602 stwcx. r7,0,r7 ; Kill reservation
4604 hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
4605 mr. r0,r0 ; Is it locked?
4606 beq++ hpfTLBIE64 ; Yup, wait for it...
4607 b hpfTLBIE63 ; Nope, try again..
4611 hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
4612 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
4613 bl mapPhysUnlock ; Unlock the physent now
4616 hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
4617 eieio ; Make sure this gets there first
4618 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
4619 mr r17,r18 ; Get the PCA image to set
4620 b hpfFinish ; Go join the common exit code...
4623 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
4624 ori r0,r0,lo16(Choke) ; System abend
4628 ; This is the common code we execute when we are finished setting up the PTE.
4633 hpfFinish: sub r4,r19,r27 ; Get offset of PTE
4634 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
4635 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
4636 stw r4,mpPte(r31) ; Remember our PTE
4638 hpfBailOut: eieio ; Make sure all updates come first
4639 stw r17,0(r20) ; Unlock and set the final PCA
4642 ; This is where we go if we have started processing the fault, but find that someone
4643 ; else has taken care of it.
4646 hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
4647 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
4648 sth r2,mpFlags+2(r31) ; Set it
4650 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4651 bl sxlkUnlock ; Unlock the search list
4653 li r11,T_IN_VAIN ; Say that it was handled
4654 b EXT(PFSExit) ; Leave...
4657 ; This is where we go when we find that someone else
4658 ; is in the process of handling the fault.
4661 hpfAbandon: li r3,lgKillResv ; Kill off any reservation
4662 stwcx. r3,0,r3 ; Do it
4664 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4665 bl sxlkUnlock ; Unlock the search list
4667 li r11,T_IN_VAIN ; Say that it was handled
4668 b EXT(PFSExit) ; Leave...
4671 ; Guest shadow assist -- page fault handler
4673 ; Here we handle a fault in a guest pmap that has the guest shadow mapping
4674 ; assist active. We locate the VMM pmap extension block, which contains an
4675 ; index over the discontiguous multi-page shadow hash table. The index
4676 ; corresponding to our vaddr is selected, and the selected group within
4677 ; that page is searched for a valid and active entry that contains
4678 ; our vaddr and space id. The search is pipelined, so that we may fetch
4679 ; the next slot while examining the current slot for a hit. The final
4680 ; search iteration is unrolled so that we don't fetch beyond the end of
4681 ; our group, which could have dire consequences depending upon where the
4682 ; physical hash page is located.
4684 ; The VMM pmap extension block occupies a page. Begining at offset 0, we
4685 ; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary
4686 ; after the pmap_vmm_ext is the hash table physical address index, a
4687 ; linear list of 64-bit physical addresses of the pages that comprise
4690 ; In the event that we succesfully locate a guest mapping, we re-join
4691 ; the page fault path at hpfGVfound with the mapping's address in r31;
4692 ; otherwise, we re-join at hpfNotFound. In either case, we re-join holding
4693 ; a share of the pmap search lock for the host pmap with the host pmap's
4694 ; address in r28, the guest pmap's space id in r21, and the guest pmap's
4700 bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine
4702 lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr
4703 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4704 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4705 lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr
4706 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4707 rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr
4708 lwz r6,vxsGpf(r11) ; Get guest fault count
4710 srwi r3,r10,12 ; Form shadow hash:
4711 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4712 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4713 ; Form index offset from hash page number
4714 add r31,r31,r4 ; r31 <- hash page index entry
4715 lwz r31,4(r31) ; r31 <- hash page paddr
4716 rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK
4717 ; r31 <- hash group paddr
4719 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4720 bl sxlkShared ; Go get a shared lock on the mapping lists
4721 mr. r3,r3 ; Did we get the lock?
4722 bne- hpfBadLock ; Nope...
4724 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4725 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4726 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
4727 addi r6,r6,1 ; Increment guest fault count
4728 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4729 mtctr r0 ; in this group
4730 stw r6,vxsGpf(r11) ; Update guest fault count
4735 mr r6,r3 ; r6 <- current mapping slot's flags
4736 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4737 mr r7,r4 ; r7 <- current mapping slot's space ID
4738 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4739 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4740 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
4741 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
4742 xor r7,r7,r21 ; Compare space ID
4743 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4744 xor r8,r8,r10 ; Compare virtual address
4745 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4746 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4748 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4749 bdnz hpfGVlp32 ; Iterate
4751 clrrwi r5,r5,12 ; Remove flags from virtual address
4752 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4753 xor r4,r4,r21 ; Compare space ID
4754 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4755 xor r5,r5,r10 ; Compare virtual address
4756 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4757 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4763 ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr
4764 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4765 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4766 ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr
4767 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4768 rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr
4769 rldimi r10,r29,32,0 ; cleaning up low-order 12 bits
4770 lwz r6,vxsGpf(r11) ; Get guest fault count
4772 srwi r3,r10,12 ; Form shadow hash:
4773 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4774 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4775 ; Form index offset from hash page number
4776 add r31,r31,r4 ; r31 <- hash page index entry
4777 ld r31,0(r31) ; r31 <- hash page paddr
4778 insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
4779 ; r31 <- hash group paddr
4781 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4782 bl sxlkShared ; Go get a shared lock on the mapping lists
4783 mr. r3,r3 ; Did we get the lock?
4784 bne-- hpfBadLock ; Nope...
4786 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4787 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4788 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
4789 addi r6,r6,1 ; Increment guest fault count
4790 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4791 mtctr r0 ; in this group
4792 stw r6,vxsGpf(r11) ; Update guest fault count
4797 mr r6,r3 ; r6 <- current mapping slot's flags
4798 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4799 mr r7,r4 ; r7 <- current mapping slot's space ID
4800 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4801 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4802 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
4803 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4804 xor r7,r7,r21 ; Compare space ID
4805 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4806 xor r8,r8,r10 ; Compare virtual address
4807 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4808 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4810 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4811 bdnz hpfGVlp64 ; Iterate
4813 clrrdi r5,r5,12 ; Remove flags from virtual address
4814 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4815 xor r4,r4,r21 ; Compare space ID
4816 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4817 xor r5,r5,r10 ; Compare virtual address
4818 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4819 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4822 lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count
4823 addi r6,r6,1 ; Increment miss count
4824 stw r6,vxsGpfMiss(r11) ; Update guest fault miss count
4828 * hw_set_user_space(pmap)
4829 * hw_set_user_space_dis(pmap)
4831 * Indicate whether memory space needs to be switched.
4832 * We really need to turn off interrupts here, because we need to be non-preemptable
4834 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
4835 * register usage here. The VMM switch code in vmachmon.s that calls this
4836 * know what registers are in use. Check that if these change.
4842 .globl EXT(hw_set_user_space)
4844 LEXT(hw_set_user_space)
4846 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
4847 mfmsr r10 ; Get the current MSR
4848 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
4849 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
4850 andc r10,r10,r8 ; Turn off VEC, FP for good
4851 andc r9,r10,r9 ; Turn off EE also
4852 mtmsr r9 ; Disable them
4853 isync ; Make sure FP and vec are off
4854 mfsprg r6,1 ; Get the current activation
4855 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4856 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4857 mfsprg r4,2 ; The the feature flags
4858 lwz r7,pmapvr(r3) ; Get the v to r translation
4859 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4860 mtcrf 0x80,r4 ; Get the Altivec flag
4861 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4862 cmplw cr1,r3,r2 ; Same address space as before?
4863 stw r7,ppUserPmap(r6) ; Show our real pmap address
4864 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4865 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4866 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4867 mtmsr r10 ; Restore interruptions
4868 beqlr-- cr1 ; Leave if the same address space or not Altivec
4870 dssall ; Need to kill all data streams if adrsp changed
4875 .globl EXT(hw_set_user_space_dis)
4877 LEXT(hw_set_user_space_dis)
4879 lwz r7,pmapvr(r3) ; Get the v to r translation
4880 mfsprg r4,2 ; The the feature flags
4881 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4882 mfsprg r6,1 ; Get the current activation
4883 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4884 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4885 mtcrf 0x80,r4 ; Get the Altivec flag
4886 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4887 cmplw cr1,r3,r2 ; Same address space as before?
4888 stw r7,ppUserPmap(r6) ; Show our real pmap address
4889 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4890 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4891 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4892 beqlr-- cr1 ; Leave if the same
4894 dssall ; Need to kill all data streams if adrsp changed
4898 /* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4900 * Lock must already be held on mapping block list
4901 * returns 0 if all slots filled.
4902 * returns n if a slot is found and it is not the last
4903 * returns -n if a slot is found and it is the last
4904 * when n and -n are returned, the corresponding bit is cleared
4905 * the mapping is zeroed out before return
4913 lwz r4,mbfree(r3) ; Get the 1st mask
4914 lis r0,0x8000 ; Get the mask to clear the first free bit
4915 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4916 mr r12,r3 ; Save the block ptr
4917 cntlzw r3,r4 ; Get first 1-bit in 1st word
4918 srw. r9,r0,r3 ; Get bit corresponding to first free one
4919 cntlzw r10,r5 ; Get first free field in second word
4920 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4921 bne mapalc1f ; Found one in 1st word
4923 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4924 li r3,0 ; assume failure return
4925 andc r5,r5,r9 ; Turn it off
4926 beqlr-- ; There are no 1 bits left...
4927 addi r3,r10,32 ; set the correct number
4930 or. r0,r4,r5 ; any more bits set?
4931 stw r4,mbfree(r12) ; update bitmasks
4932 stw r5,mbfree+4(r12)
4934 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4936 dcbz r6,r12 ; clear the 64-byte mapping
4939 bnelr++ ; return if another bit remains set
4941 neg r3,r3 ; indicate we just returned the last bit
4945 /* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4947 * Lock must already be held on mapping block list
4948 * returns 0 if all slots filled.
4949 * returns n if a slot is found and it is not the last
4950 * returns -n if a slot is found and it is the last
4951 * when n and -n are returned, the corresponding bits are cleared
4952 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4953 * the mapping is zero'd out before return
4959 lwz r4,mbfree(r3) ; Get the first mask
4960 lis r0,0x8000 ; Get the mask to clear the first free bit
4961 lwz r5,mbfree+4(r3) ; Get the second mask
4962 mr r12,r3 ; Save the block ptr
4963 slwi r6,r4,1 ; shift first word over
4964 and r6,r4,r6 ; lite start of double bit runs in 1st word
4965 slwi r7,r5,1 ; shift 2nd word over
4966 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4967 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4968 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4969 cntlzw r10,r7 ; Get first free field in second word
4970 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4971 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4972 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4973 bne mapalc2a ; Found two consecutive free bits in 1st word
4975 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4976 li r3,0 ; assume failure
4977 srwi r11,r9,1 ; get mask for 2nd bit
4978 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4979 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4980 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4981 addi r3,r10,32 ; set the correct number
4984 or. r0,r4,r5 ; any more bits set?
4985 stw r4,mbfree(r12) ; update bitmasks
4986 stw r5,mbfree+4(r12)
4987 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4991 dcbz r6,r12 ; zero out the 128-byte mapping
4992 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
4993 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
4996 bnelr++ ; return if another bit remains set
4998 neg r3,r3 ; indicate we just returned the last bit
5002 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
5003 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
5004 beqlr ; no, we failed
5005 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
5006 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
5007 li r3,31 ; get index of this field
5012 ; This routine initialzes the hash table and PCA.
5013 ; It is done here because we may need to be 64-bit to do it.
5017 .globl EXT(hw_hash_init)
5021 mfsprg r10,2 ; Get feature flags
5022 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5023 mtcrf 0x02,r10 ; move pf64Bit to cr6
5024 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5025 lis r4,0xFF01 ; Set all slots free and start steal at end
5026 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5027 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5029 lwz r12,0(r12) ; Get hash table size
5031 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
5033 lwz r11,4(r11) ; Get hash table base
5035 hhiNext32: cmplw r3,r12 ; Have we reached the end?
5036 bge- hhiCPCA32 ; Yes...
5037 dcbz r3,r11 ; Clear the line
5038 addi r3,r3,32 ; Next one...
5039 b hhiNext32 ; Go on...
5041 hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
5042 li r3,-4 ; Displacement to first PCA entry
5043 neg r12,r12 ; Get negative end of PCA
5045 hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
5046 subi r3,r3,4 ; Next slot
5047 cmpw r3,r12 ; Have we finished?
5048 bge+ hhiNPCA32 ; Not yet...
5051 hhiSF: mfmsr r9 ; Save the MSR
5053 mr r0,r9 ; Get a copy of the MSR
5054 ld r11,0(r11) ; Get hash table base
5055 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
5056 mtmsrd r0 ; Turn on SF
5060 hhiNext64: cmpld r3,r12 ; Have we reached the end?
5061 bge-- hhiCPCA64 ; Yes...
5062 dcbz128 r3,r11 ; Clear the line
5063 addi r3,r3,128 ; Next one...
5064 b hhiNext64 ; Go on...
5066 hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
5067 li r3,-4 ; Displacement to first PCA entry
5068 neg r12,r12 ; Get negative end of PCA
5070 hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
5071 subi r3,r3,4 ; Next slot
5072 cmpd r3,r12 ; Have we finished?
5073 bge++ hhiNPCA64 ; Not yet...
5075 mtmsrd r9 ; Turn off SF if it was off
5081 ; This routine sets up the hardware to start translation.
5082 ; Note that we do NOT start translation.
5086 .globl EXT(hw_setup_trans)
5088 LEXT(hw_setup_trans)
5090 mfsprg r11,0 ; Get the per_proc block
5091 mfsprg r12,2 ; Get feature flags
5094 mtcrf 0x02,r12 ; Move pf64Bit to cr6
5095 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
5096 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
5097 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
5098 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
5100 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
5102 li r9,0 ; Clear out a register
5105 mtdbatu 0,r9 ; Invalidate maps
5106 mtdbatl 0,r9 ; Invalidate maps
5107 mtdbatu 1,r9 ; Invalidate maps
5108 mtdbatl 1,r9 ; Invalidate maps
5109 mtdbatu 2,r9 ; Invalidate maps
5110 mtdbatl 2,r9 ; Invalidate maps
5111 mtdbatu 3,r9 ; Invalidate maps
5112 mtdbatl 3,r9 ; Invalidate maps
5114 mtibatu 0,r9 ; Invalidate maps
5115 mtibatl 0,r9 ; Invalidate maps
5116 mtibatu 1,r9 ; Invalidate maps
5117 mtibatl 1,r9 ; Invalidate maps
5118 mtibatu 2,r9 ; Invalidate maps
5119 mtibatl 2,r9 ; Invalidate maps
5120 mtibatu 3,r9 ; Invalidate maps
5121 mtibatl 3,r9 ; Invalidate maps
5123 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5124 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5125 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5126 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5127 lwz r11,4(r11) ; Get hash table base
5128 lwz r12,0(r12) ; Get hash table size
5129 subi r12,r12,1 ; Back off by 1
5130 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
5132 mtsdr1 r11 ; Ok, we now have the hash table set up
5135 li r12,invalSpace ; Get the invalid segment value
5136 li r10,0 ; Start low
5138 hstsetsr: mtsrin r12,r10 ; Set the SR
5139 addis r10,r10,0x1000 ; Bump the segment
5140 mr. r10,r10 ; Are we finished?
5141 bne+ hstsetsr ; Nope...
5149 hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5150 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5151 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5152 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5153 ld r11,0(r11) ; Get hash table base
5154 lwz r12,0(r12) ; Get hash table size
5155 cntlzw r10,r12 ; Get the number of bits
5156 subfic r10,r10,13 ; Get the extra bits we need
5157 or r11,r11,r10 ; Add the size field to SDR1
5159 mtsdr1 r11 ; Ok, we now have the hash table set up
5162 li r0,0 ; Set an SLB slot index of 0
5163 slbia ; Trash all SLB entries (except for entry 0 that is)
5164 slbmfee r7,r0 ; Get the entry that is in SLB index 0
5165 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
5166 slbie r7 ; Invalidate it
5172 ; This routine turns on translation for the first time on a processor
5176 .globl EXT(hw_start_trans)
5178 LEXT(hw_start_trans)
5181 mfmsr r10 ; Get the msr
5182 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
5184 mtmsr r10 ; Everything falls apart here
5192 ; This routine validates a segment register.
5193 ; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
5196 ; r4 = segment[0:31]
5197 ; r5 = segment[32:63]
5201 ; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
5202 ; Note that there is no reason to apply the key modifier here because this is only
5203 ; used for kernel accesses.
5207 .globl EXT(hw_map_seg)
5211 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
5212 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
5213 mfsprg r10,2 ; Get feature flags
5216 ; Note: the following code would problably be easier to follow if I split it,
5217 ; but I just wanted to see if I could write this to work on both 32- and 64-bit
5218 ; machines combined.
5222 ; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
5223 ; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
5225 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
5226 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
5227 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
5228 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
5229 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
5230 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
5231 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
5232 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
5233 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
5234 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
5236 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
5237 ; concatenated together. There is garbage
5238 ; at the top for 64-bit but we will clean
5240 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
5244 ; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
5245 ; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
5249 ; What we have now is:
5252 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5253 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5254 ; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
5255 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5256 ; 0 0 1 2 3 - for 32-bit machines
5260 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5261 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5262 ; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
5263 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5264 ; 0 0 1 2 3 - for 32-bit machines
5268 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5269 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5270 ; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
5271 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5272 ; 0 0 1 2 3 - for 32-bit machines
5276 xor r8,r8,r2 ; Calculate VSID
5278 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
5279 mfsprg r12,0 ; Get the per_proc
5280 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5281 mfmsr r6 ; Get current MSR
5282 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5283 mtmsrd r0,1 ; Set only the EE bit to 0
5284 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5285 mfmsr r11 ; Get the MSR right now, after disabling EE
5286 andc r2,r11,r2 ; Turn off translation now
5287 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5288 or r11,r11,r6 ; Turn on the EE bit if it was on
5289 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5290 isync ; Hang out a bit
5292 ld r6,validSegs(r12) ; Get the valid SLB entry flags
5293 sldi r9,r9,9 ; Position the key and noex bit
5295 rldimi r5,r8,12,0 ; Form the VSID/key
5297 not r3,r6 ; Make valids be 0s
5299 cntlzd r7,r3 ; Find a free SLB
5300 cmplwi r7,63 ; Did we find a free SLB entry?
5302 slbie r4 ; Since this ESID may still be in an SLBE, kill it
5304 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
5305 addi r7,r7,1 ; Make sure we skip slb 0
5306 blt++ hmsFreeSeg ; Yes, go load it...
5309 ; No free SLB entries, select one that is in use and invalidate it
5311 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
5312 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
5313 addi r2,r2,1 ; Set next slot to steal
5314 slbmfee r3,r7 ; Get the entry that is in the selected spot
5315 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
5316 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
5317 srawi r8,r8,31 ; Get -1 if steal index still in range
5318 slbie r3 ; Invalidate the in-use SLB entry
5319 and r2,r2,r8 ; Reset steal index when it should wrap
5322 stw r2,ppSegSteal(r12) ; Set the next slot to steal
5324 ; We are now ready to stick the SLB entry in the SLB and mark it in use
5327 hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
5328 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
5329 srd r0,r0,r2 ; Set bit mask for allocation
5330 rldicl r5,r5,0,15 ; Clean out the unsupported bits
5331 or r6,r6,r0 ; Turn on the allocation flag
5333 slbmte r5,r4 ; Make that SLB entry
5335 std r6,validSegs(r12) ; Mark as valid
5336 mtmsrd r11 ; Restore the MSR
5343 mfsprg r12,1 ; Get the current activation
5344 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5345 rlwinm r8,r8,0,8,31 ; Clean up the VSID
5346 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
5347 lis r0,0x8000 ; Set bit 0
5348 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
5349 srw r0,r0,r2 ; Get bit corresponding to SR
5350 addi r7,r12,validSegs ; Point to the valid segment flags directly
5352 mtsrin r8,r4 ; Set the actual SR
5353 isync ; Need to make sure this is done
5355 hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
5356 or r6,r6,r0 ; Show that SR is valid
5357 stwcx. r6,0,r7 ; Set the valid SR flags
5358 bne-- hmsrupt ; Had an interrupt, need to get flags again...
5364 ; This routine invalidates a segment register.
5368 .globl EXT(hw_blow_seg)
5372 mfsprg r10,2 ; Get feature flags
5373 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5375 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
5377 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
5379 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5380 mfmsr r6 ; Get current MSR
5381 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5382 mtmsrd r0,1 ; Set only the EE bit to 0
5383 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5384 mfmsr r11 ; Get the MSR right now, after disabling EE
5385 andc r2,r11,r2 ; Turn off translation now
5386 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5387 or r11,r11,r6 ; Turn on the EE bit if it was on
5388 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5389 isync ; Hang out a bit
5391 rldimi r9,r3,32,0 ; Insert the top part of the ESID
5393 slbie r9 ; Invalidate the associated SLB entry
5395 mtmsrd r11 ; Restore the MSR
5402 mfsprg r12,1 ; Get the current activation
5403 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5404 addi r7,r12,validSegs ; Point to the valid segment flags directly
5405 lwarx r4,0,r7 ; Get and reserve the valid segment flags
5406 rlwinm r6,r9,4,28,31 ; Convert segment to number
5407 lis r2,0x8000 ; Set up a mask
5408 srw r2,r2,r6 ; Make a mask
5409 and. r0,r4,r2 ; See if this is even valid
5410 li r5,invalSpace ; Set the invalid address space VSID
5411 beqlr ; Leave if already invalid...
5413 mtsrin r5,r9 ; Slam the segment register
5414 isync ; Need to make sure this is done
5416 hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
5417 stwcx. r4,0,r7 ; Set the valid SR flags
5418 beqlr++ ; Stored ok, no interrupt, time to leave...
5420 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
5421 b hbsrupt ; Try again...
5424 ; This routine invadates the entire pmap segment cache
5426 ; Translation is on, interrupts may or may not be enabled.
5430 .globl EXT(invalidateSegs)
5432 LEXT(invalidateSegs)
5434 la r10,pmapCCtl(r3) ; Point to the segment cache control
5435 eqv r2,r2,r2 ; Get all foxes
5437 isInv: lwarx r4,0,r10 ; Get the segment cache control value
5438 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
5439 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5440 bne-- isInv0 ; Yes, try again...
5442 stwcx. r4,0,r10 ; Try to invalidate it
5443 bne-- isInv ; Someone else just stuffed it...
5447 isInv0: li r4,lgKillResv ; Get reservation kill zone
5448 stwcx. r4,0,r4 ; Kill reservation
5450 isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
5451 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5452 bne-- isInv ; Nope...
5453 b isInv1 ; Still locked do it again...
5456 ; This routine switches segment registers between kernel and user.
5457 ; We have some assumptions and rules:
5458 ; We are in the exception vectors
5459 ; pf64Bitb is set up
5460 ; R3 contains the MSR we going to
5461 ; We can not use R4, R13, R20, R21, R29
5462 ; R13 is the savearea
5463 ; R29 has the per_proc
5465 ; We return R3 as 0 if we did not switch between kernel and user
5466 ; We also maintain and apply the user state key modifier used by VMM support;
5467 ; If we go to the kernel it is set to 0, otherwise it follows the bit
5472 .globl EXT(switchSegs)
5476 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
5477 lwz r9,spcFlags(r29) ; Pick up the special user state flags
5478 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
5479 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
5480 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
5481 or r2,r2,r3 ; This will 1 if we will be using user segments
5482 li r3,0 ; Get a selection mask
5483 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
5484 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
5485 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
5486 la r19,ppUserPmap(r29) ; Point to the current user pmap
5488 ; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
5489 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
5491 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
5492 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
5493 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
5494 or r8,r8,r19 ; Get the pointer to the pmap we are using
5496 beqlr ; We are staying in the same mode, do not touch segs...
5498 lwz r28,0(r8) ; Get top half of pmap address
5499 lwz r10,4(r8) ; Get bottom half
5501 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
5502 rlwinm r28,r28,0,1,0 ; Copy top to top
5503 stw r30,ppMapFlags(r29) ; Set the key modifier
5504 rlwimi r28,r10,0,0,31 ; Insert bottom
5506 la r10,pmapCCtl(r28) ; Point to the segment cache control
5507 la r9,pmapSegCache(r28) ; Point to the segment cache
5509 ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
5510 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5511 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
5512 bne-- ssgLock0 ; Yup, this is in use...
5514 stwcx. r16,0,r10 ; Try to set the lock
5515 bne-- ssgLock ; Did we get contention?
5517 not r11,r15 ; Invert the invalids to valids
5518 li r17,0 ; Set a mask for the SRs we are loading
5519 isync ; Make sure we are all caught up
5521 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
5524 slbia ; Trash all SLB entries (except for entry 0 that is)
5525 li r17,1 ; Get SLB index to load (skip slb 0)
5526 oris r0,r0,0x8000 ; Get set for a mask
5527 b ssg64Enter ; Start on a cache line...
5531 ssgLock0: li r15,lgKillResv ; Killing field
5532 stwcx. r15,0,r15 ; Kill reservation
5534 ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
5535 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5536 beq++ ssgLock ; Yup, this is in use...
5537 b ssgLock1 ; Nope, try again...
5539 ; This is the 32-bit address space switch code.
5540 ; We take a reservation on the segment cache and walk through.
5541 ; For each entry, we load the specified entries and remember which
5542 ; we did with a mask. Then, we figure out which segments should be
5543 ; invalid and then see which actually are. Then we load those with the
5544 ; defined invalid VSID.
5545 ; Afterwards, we unlock the segment cache.
5550 ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
5551 cmplwi r12,pmapSegCacheUse ; See if we are done
5552 slwi r14,r12,4 ; Index to the cache slot
5553 lis r0,0x8000 ; Get set for a mask
5554 add r14,r14,r9 ; Point to the entry
5556 bge- ssg32Done ; All done...
5558 lwz r5,sgcESID+4(r14) ; Get the ESID part
5559 srw r2,r0,r12 ; Form a mask for the one we are loading
5560 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
5562 andc r11,r11,r2 ; Clear the bit
5563 lwz r6,sgcVSID(r14) ; And get the VSID top
5565 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
5567 xor r7,r7,r30 ; Modify the key before we actually set it
5568 srw r0,r0,r2 ; Get a mask for the SR we are loading
5569 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
5570 or r17,r17,r0 ; Remember the segment
5571 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
5572 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
5574 mtsrin r8,r5 ; Load the segment
5575 b ssg32Enter ; Go enter the next...
5579 ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
5580 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5582 lis r0,0x8000 ; Get set for a mask
5583 li r2,invalSpace ; Set the invalid address space VSID
5587 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
5590 ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
5591 cmplwi r18,16 ; Have we finished?
5592 srw r22,r0,r18 ; Get the mask bit
5593 rlwinm r23,r18,28,0,3 ; Get the segment register we need
5594 andc r16,r16,r22 ; Get rid of the guy we just did
5595 bge ssg32Really ; Yes, we are really done now...
5597 mtsrin r2,r23 ; Invalidate the SR
5598 b ssg32Inval ; Do the next...
5603 stw r17,validSegs(r29) ; Set the valid SR flags
5604 li r3,1 ; Set kernel/user transition
5608 ; This is the 64-bit address space switch code.
5609 ; First we blow away all of the SLB entries.
5611 ; loading the SLB. Afterwards, we release the cache lock
5613 ; Note that because we have to treat SLBE 0 specially, we do not ever use it...
5614 ; Its a performance thing...
5619 ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
5620 cmplwi r12,pmapSegCacheUse ; See if we are done
5621 slwi r14,r12,4 ; Index to the cache slot
5622 srw r16,r0,r12 ; Form a mask for the one we are loading
5623 add r14,r14,r9 ; Point to the entry
5624 andc r11,r11,r16 ; Clear the bit
5625 bge-- ssg64Done ; All done...
5627 ld r5,sgcESID(r14) ; Get the ESID part
5628 ld r6,sgcVSID(r14) ; And get the VSID part
5629 oris r5,r5,0x0800 ; Turn on the valid bit
5630 or r5,r5,r17 ; Insert the SLB slot
5631 xor r6,r6,r30 ; Modify the key before we actually set it
5632 addi r17,r17,1 ; Bump to the next slot
5633 slbmte r6,r5 ; Make that SLB entry
5634 b ssg64Enter ; Go enter the next...
5638 ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5640 eqv r16,r16,r16 ; Load up with all foxes
5641 subfic r17,r17,64 ; Get the number of 1 bits we need
5643 sld r16,r16,r17 ; Get a mask for the used SLB entries
5644 li r3,1 ; Set kernel/user transition
5645 std r16,validSegs(r29) ; Set the valid SR flags
5649 ; mapSetUp - this function sets initial state for all mapping functions.
5650 ; We turn off all translations (physical), disable interruptions, and
5651 ; enter 64-bit mode if applicable.
5653 ; We also return the original MSR in r11, the feature flags in R12,
5654 ; and CR6 set up so we can do easy branches for 64-bit
5655 ; hw_clear_maps assumes r10, r9 will not be trashed.
5659 .globl EXT(mapSetUp)
5663 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
5664 mfsprg r12,2 ; Get feature flags
5665 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
5666 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5667 mfmsr r11 ; Save the MSR
5668 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5669 andc r11,r11,r0 ; Clear VEC and FP for good
5670 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
5671 li r2,1 ; Prepare for 64 bit
5672 andc r0,r11,r0 ; Clear the rest
5673 bt pfNoMSRirb,msuNoMSR ; No MSR...
5674 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
5676 mtmsr r0 ; Translation and all off
5677 isync ; Toss prefetch
5682 msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
5683 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
5689 msuNoMSR: mr r2,r3 ; Save R3 across call
5690 mr r3,r0 ; Get the new MSR value
5691 li r0,loadMSR ; Get the MSR setter SC
5693 mr r3,r2 ; Restore R3
5694 blr ; Go back all set up...
5698 ; Guest shadow assist -- remove all guest mappings
5700 ; Remove all mappings for a guest pmap from the shadow hash table.
5703 ; r3 : address of pmap, 32-bit kernel virtual address
5705 ; Non-volatile register usage:
5706 ; r24 : host pmap's physical address
5707 ; r25 : VMM extension block's physical address
5708 ; r26 : physent address
5709 ; r27 : guest pmap's space ID number
5710 ; r28 : current hash table page index
5711 ; r29 : guest pmap's physical address
5712 ; r30 : saved msr image
5713 ; r31 : current mapping
5716 .globl EXT(hw_rem_all_gv)
5720 #define graStackSize ((31-24+1)*4)+4
5721 stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1)
5722 ; Mint a new stack frame
5723 mflr r0 ; Get caller's return address
5724 mfsprg r11,2 ; Get feature flags
5725 mtcrf 0x02,r11 ; Insert feature flags into cr6
5726 stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5727 ; Save caller's return address
5728 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5729 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5730 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5731 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5732 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5733 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5734 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5735 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5737 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5739 bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine
5740 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5741 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5742 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5743 b graStart ; Get to it
5744 gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5745 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5746 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5747 graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5748 xor r29,r3,r9 ; Convert pmap_t virt->real
5749 mr r30,r11 ; Save caller's msr image
5751 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5752 bl sxlkExclusive ; Get lock exclusive
5754 lwz r3,vxsGra(r25) ; Get remove all count
5755 addi r3,r3,1 ; Increment remove all count
5756 stw r3,vxsGra(r25) ; Update remove all count
5758 li r28,0 ; r28 <- first hash page table index to search
5759 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5761 la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
5762 rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK
5763 ; Convert page index into page physical index offset
5764 add r31,r31,r11 ; Calculate page physical index entry address
5765 bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit
5766 lwz r31,4(r31) ; r31 <- first slot in hash table page to examine
5767 b graLoop ; Examine all slots in this page
5768 gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine
5769 b graLoop ; Examine all slots in this page
5772 graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags
5773 lhz r4,mpSpace(r31) ; Get mapping's space ID number
5774 rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag
5775 xor r4,r4,r27 ; Compare space ID number
5776 or. r0,r6,r4 ; cr0_eq <- !free && space id match
5777 bne graMiss ; Not one of ours, skip it
5779 lwz r11,vxsGraHits(r25) ; Get remove hit count
5780 addi r11,r11,1 ; Increment remove hit count
5781 stw r11,vxsGraHits(r25) ; Update remove hit count
5783 rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant?
5784 bne graRemPhys ; Yes, nothing to disconnect
5786 lwz r11,vxsGraActive(r25) ; Get remove active count
5787 addi r11,r11,1 ; Increment remove hit count
5788 stw r11,vxsGraActive(r25) ; Update remove hit count
5790 bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately
5791 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
5792 ; r31 <- mapping's physical address
5793 ; r3 -> PTE slot physical address
5794 ; r4 -> High-order 32 bits of PTE
5795 ; r5 -> Low-order 32 bits of PTE
5797 ; r7 -> PCA physical address
5798 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
5799 b graFreePTE ; Join 64-bit path to release the PTE
5800 graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
5801 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
5802 graFreePTE: mr. r3,r3 ; Was there a valid PTE?
5803 beq- graRemPhys ; No valid PTE, we're almost done
5804 lis r0,0x8000 ; Prepare free bit for this slot
5805 srw r0,r0,r2 ; Position free bit
5806 or r6,r6,r0 ; Set it in our PCA image
5807 lwz r8,mpPte(r31) ; Get PTE pointer
5808 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
5809 stw r8,mpPte(r31) ; Save invalidated PTE pointer
5810 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
5811 stw r6,0(r7) ; Update PCA and unlock the PTEG
5814 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
5815 bl mapFindLockPN ; Find 'n' lock this page's physent
5816 mr. r26,r3 ; Got lock on our physent?
5817 beq-- graBadPLock ; No, time to bail out
5819 crset cr1_eq ; cr1_eq <- previous link is the anchor
5820 bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine
5821 la r11,ppLink+4(r26) ; Point to chain anchor
5822 lwz r9,ppLink+4(r26) ; Get chain anchor
5823 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
5825 graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
5826 cmplw r9,r31 ; Is this the mapping to remove?
5827 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
5828 bne graRemNext ; No, chain onward
5829 bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor
5830 stw r8,0(r11) ; Unchain gpv->phys mapping
5831 b graRemoved ; Exit loop
5833 lwarx r0,0,r11 ; Get previous link
5834 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
5835 stwcx. r0,0,r11 ; Update previous link
5836 bne- graRemRetry ; Lost reservation, retry
5837 b graRemoved ; Good work, let's get outta here
5839 graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
5840 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5841 mr. r9,r8 ; Does next entry exist?
5842 b graRemLoop ; Carry on
5845 li r7,ppLFAmask ; Get mask to clean up mapping pointer
5846 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
5847 la r11,ppLink(r26) ; Point to chain anchor
5848 ld r9,ppLink(r26) ; Get chain anchor
5849 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
5850 graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
5851 cmpld r9,r31 ; Is this the mapping to remove?
5852 ld r8,mpAlias(r9) ; Get forward chain pinter
5853 bne graRem64Nxt ; Not mapping to remove, chain on, dude
5854 bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor
5855 std r8,0(r11) ; Unchain gpv->phys mapping
5856 b graRemoved ; Exit loop
5857 graRem64Rt: ldarx r0,0,r11 ; Get previous link
5858 and r0,r0,r7 ; Get flags
5859 or r0,r0,r8 ; Insert new forward pointer
5860 stdcx. r0,0,r11 ; Slam it back in
5861 bne-- graRem64Rt ; Lost reservation, retry
5862 b graRemoved ; Good work, let's go home
5865 la r11,mpAlias(r9) ; Point to (soon to be) previous link
5866 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5867 mr. r9,r8 ; Does next entry exist?
5868 b graRem64Lp ; Carry on
5871 mr r3,r26 ; r3 <- physent's address
5872 bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
5874 lwz r3,mpFlags(r31) ; Get mapping's flags
5875 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
5876 ori r3,r3,mpgFree ; Mark mapping free
5877 stw r3,mpFlags(r31) ; Update flags
5879 graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping
5880 rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
5881 bne graLoop ; No, examine next slot
5882 addi r28,r28,1 ; Increment hash table page index
5883 cmplwi r28,GV_HPAGES ; End of hash table?
5884 bne graPgLoop ; Examine next hash table page
5886 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5887 bl sxlkUnlock ; Release host pmap's search lock
5889 bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately
5890 mtmsr r30 ; Restore 'rupts, translation
5891 isync ; Throw a small wrench into the pipeline
5892 b graPopFrame ; Nothing to do now but pop a frame and return
5893 graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
5895 lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5896 ; Get caller's return address
5897 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
5898 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
5899 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
5900 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
5901 mtlr r0 ; Prepare return address
5902 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
5903 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
5904 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
5905 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
5906 lwz r1,0(r1) ; Pop stack frame
5907 blr ; Return to caller
5911 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
5912 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
5913 li r3,failMapping ; The BOMB, Dmitri.
5914 sc ; The hydrogen bomb.
5918 ; Guest shadow assist -- remove local guest mappings
5920 ; Remove local mappings for a guest pmap from the shadow hash table.
5923 ; r3 : address of guest pmap, 32-bit kernel virtual address
5925 ; Non-volatile register usage:
5926 ; r20 : current active map word's physical address
5927 ; r21 : current hash table page address
5928 ; r22 : updated active map word in process
5929 ; r23 : active map word in process
5930 ; r24 : host pmap's physical address
5931 ; r25 : VMM extension block's physical address
5932 ; r26 : physent address
5933 ; r27 : guest pmap's space ID number
5934 ; r28 : current active map index
5935 ; r29 : guest pmap's physical address
5936 ; r30 : saved msr image
5937 ; r31 : current mapping
5940 .globl EXT(hw_rem_local_gv)
5942 LEXT(hw_rem_local_gv)
5944 #define grlStackSize ((31-20+1)*4)+4
5945 stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1)
5946 ; Mint a new stack frame
5947 mflr r0 ; Get caller's return address
5948 mfsprg r11,2 ; Get feature flags
5949 mtcrf 0x02,r11 ; Insert feature flags into cr6
5950 stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5951 ; Save caller's return address
5952 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5953 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5954 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5955 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5956 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5957 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5958 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5959 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5960 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
5961 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
5962 stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21
5963 stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20
5965 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5967 bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine
5968 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5969 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5970 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5971 b grlStart ; Get to it
5972 grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5973 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5974 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5976 grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5977 xor r29,r3,r9 ; Convert pmap_t virt->real
5978 mr r30,r11 ; Save caller's msr image
5980 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5981 bl sxlkExclusive ; Get lock exclusive
5983 li r28,0 ; r28 <- index of first active map word to search
5984 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5985 b grlMap1st ; Examine first map word
5988 grlNextMap: stw r22,0(r21) ; Save updated map word
5989 addi r28,r28,1 ; Increment map word index
5990 cmplwi r28,GV_MAP_WORDS ; See if we're done
5991 beq grlDone ; Yup, let's get outta here
5993 grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
5994 rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK
5995 ; Convert map index into map index offset
5996 add r20,r20,r11 ; Calculate map array element address
5997 lwz r22,0(r20) ; Get active map word at index
5998 mr. r23,r22 ; Any active mappings indicated?
5999 beq grlNextMap ; Nope, check next word
6001 la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
6002 rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK
6003 ; Extract page index from map word index and convert
6004 ; into page physical index offset
6005 add r21,r21,r11 ; Calculate page physical index entry address
6006 bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit
6007 lwz r21,4(r21) ; Get selected hash table page's address
6008 b grlLoop ; Examine all slots in this page
6009 grl64Page: ld r21,0(r21) ; Get selected hash table page's address
6010 b grlLoop ; Examine all slots in this page
6013 grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word
6014 cmplwi r11,32 ; Any active mappings left in this word?
6015 lis r12,0x8000 ; Prepare mask to reset bit
6016 srw r12,r12,r11 ; Position mask bit
6017 andc r23,r23,r12 ; Reset lit bit
6018 beq grlNextMap ; No bits lit, examine next map word
6020 slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number
6021 rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK
6022 ; Extract slot band number from index and insert
6023 add r31,r31,r21 ; Add hash page address yielding mapping slot address
6025 lwz r3,mpFlags(r31) ; Get mapping's flags
6026 lhz r4,mpSpace(r31) ; Get mapping's space ID number
6027 rlwinm r5,r3,0,mpgGlobal ; Extract global bit
6028 xor r4,r4,r27 ; Compare space ID number
6029 or. r4,r4,r5 ; (space id miss || global)
6030 bne grlLoop ; Not one of ours, skip it
6031 andc r22,r22,r12 ; Reset active bit corresponding to this mapping
6032 ori r3,r3,mpgDormant ; Mark entry dormant
6033 stw r3,mpFlags(r31) ; Update mapping's flags
6035 bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately
6036 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6037 ; r31 <- mapping's physical address
6038 ; r3 -> PTE slot physical address
6039 ; r4 -> High-order 32 bits of PTE
6040 ; r5 -> Low-order 32 bits of PTE
6042 ; r7 -> PCA physical address
6043 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6044 b grlFreePTE ; Join 64-bit path to release the PTE
6045 grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6046 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6047 grlFreePTE: mr. r3,r3 ; Was there a valid PTE?
6048 beq- grlLoop ; No valid PTE, we're done with this mapping
6049 lis r0,0x8000 ; Prepare free bit for this slot
6050 srw r0,r0,r2 ; Position free bit
6051 or r6,r6,r0 ; Set it in our PCA image
6052 lwz r8,mpPte(r31) ; Get PTE pointer
6053 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6054 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6055 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
6056 stw r6,0(r7) ; Update PCA and unlock the PTEG
6057 b grlLoop ; On to next active mapping in this map word
6059 grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
6060 bl sxlkUnlock ; Release host pmap's search lock
6062 bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately
6063 mtmsr r30 ; Restore 'rupts, translation
6064 isync ; Throw a small wrench into the pipeline
6065 b grlPopFrame ; Nothing to do now but pop a frame and return
6066 grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
6068 lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6069 ; Get caller's return address
6070 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6071 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6072 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6073 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6074 mtlr r0 ; Prepare return address
6075 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6076 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6077 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6078 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6079 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6080 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6081 lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21
6082 lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20
6083 lwz r1,0(r1) ; Pop stack frame
6084 blr ; Return to caller
6088 ; Guest shadow assist -- resume a guest mapping
6090 ; Locates the specified dormant mapping, and if it exists validates it and makes it
6094 ; r3 : address of host pmap, 32-bit kernel virtual address
6095 ; r4 : address of guest pmap, 32-bit kernel virtual address
6096 ; r5 : host virtual address, high-order 32 bits
6097 ; r6 : host virtual address, low-order 32 bits
6098 ; r7 : guest virtual address, high-order 32 bits
6099 ; r8 : guest virtual address, low-order 32 bits
6100 ; r9 : guest mapping protection code
6102 ; Non-volatile register usage:
6103 ; r23 : VMM extension block's physical address
6104 ; r24 : physent physical address
6105 ; r25 : caller's msr image from mapSetUp
6106 ; r26 : guest mapping protection code
6107 ; r27 : host pmap physical address
6108 ; r28 : guest pmap physical address
6109 ; r29 : host virtual address
6110 ; r30 : guest virtual address
6111 ; r31 : gva->phys mapping's physical address
6114 .globl EXT(hw_res_map_gv)
6118 #define grsStackSize ((31-23+1)*4)+4
6120 stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1)
6121 ; Mint a new stack frame
6122 mflr r0 ; Get caller's return address
6123 mfsprg r11,2 ; Get feature flags
6124 mtcrf 0x02,r11 ; Insert feature flags into cr6
6125 stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6126 ; Save caller's return address
6127 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6128 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6129 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6130 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6131 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6132 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6133 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6134 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6135 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6137 rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
6138 rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6139 mr r26,r9 ; Copy guest mapping protection code
6141 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6142 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6143 bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately
6144 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6145 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6146 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6147 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6148 srwi r11,r30,12 ; Form shadow hash:
6149 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6150 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6151 ; Form index offset from hash page number
6152 add r31,r31,r10 ; r31 <- hash page index entry
6153 lwz r31,4(r31) ; r31 <- hash page paddr
6154 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6155 ; r31 <- hash group paddr
6156 b grsStart ; Get to it
6158 grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
6159 rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6160 ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6161 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6162 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6163 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6164 srwi r11,r30,12 ; Form shadow hash:
6165 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6166 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6167 ; Form index offset from hash page number
6168 add r31,r31,r10 ; r31 <- hash page index entry
6169 ld r31,0(r31) ; r31 <- hash page paddr
6170 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6171 ; r31 <- hash group paddr
6173 grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6174 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6175 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6176 mr r25,r11 ; Save caller's msr image
6178 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6179 bl sxlkExclusive ; Get lock exclusive
6181 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6182 mtctr r0 ; in this group
6183 bt++ pf64Bitb,grs64Search ; Test for 64-bit machine
6185 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6186 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6187 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6188 b grs32SrchLp ; Let the search begin!
6192 mr r6,r3 ; r6 <- current mapping slot's flags
6193 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6194 mr r7,r4 ; r7 <- current mapping slot's space ID
6195 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6196 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6197 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6198 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6199 xor r7,r7,r9 ; Compare space ID
6200 or r0,r11,r7 ; r0 <- !(!free && space match)
6201 xor r8,r8,r30 ; Compare virtual address
6202 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6203 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6205 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6206 bdnz grs32SrchLp ; Iterate
6208 mr r6,r3 ; r6 <- current mapping slot's flags
6209 clrrwi r5,r5,12 ; Remove flags from virtual address
6210 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6211 xor r4,r4,r9 ; Compare space ID
6212 or r0,r11,r4 ; r0 <- !(!free && space match)
6213 xor r5,r5,r30 ; Compare virtual address
6214 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6215 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6216 b grsSrchMiss ; No joy in our hash group
6219 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6220 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6221 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6222 b grs64SrchLp ; Let the search begin!
6226 mr r6,r3 ; r6 <- current mapping slot's flags
6227 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6228 mr r7,r4 ; r7 <- current mapping slot's space ID
6229 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6230 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6231 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6232 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6233 xor r7,r7,r9 ; Compare space ID
6234 or r0,r11,r7 ; r0 <- !(!free && space match)
6235 xor r8,r8,r30 ; Compare virtual address
6236 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6237 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6239 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6240 bdnz grs64SrchLp ; Iterate
6242 mr r6,r3 ; r6 <- current mapping slot's flags
6243 clrrdi r5,r5,12 ; Remove flags from virtual address
6244 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6245 xor r4,r4,r9 ; Compare space ID
6246 or r0,r11,r4 ; r0 <- !(!free && space match)
6247 xor r5,r5,r30 ; Compare virtual address
6248 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6249 bne grsSrchMiss ; No joy in our hash group
6252 rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant?
6253 bne grsFindHost ; Yes, nothing to disconnect
6255 bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately
6256 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6257 ; r31 <- mapping's physical address
6258 ; r3 -> PTE slot physical address
6259 ; r4 -> High-order 32 bits of PTE
6260 ; r5 -> Low-order 32 bits of PTE
6262 ; r7 -> PCA physical address
6263 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6264 b grsFreePTE ; Join 64-bit path to release the PTE
6265 grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6266 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6267 grsFreePTE: mr. r3,r3 ; Was there a valid PTE?
6268 beq- grsFindHost ; No valid PTE, we're almost done
6269 lis r0,0x8000 ; Prepare free bit for this slot
6270 srw r0,r0,r2 ; Position free bit
6271 or r6,r6,r0 ; Set it in our PCA image
6272 lwz r8,mpPte(r31) ; Get PTE pointer
6273 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6274 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6275 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6276 stw r6,0(r7) ; Update PCA and unlock the PTEG
6280 // We now have a dormant guest mapping that matches our space id and virtual address. Our next
6281 // step is to locate the host mapping that completes the guest mapping's connection to a physical
6282 // frame. The guest and host mappings must connect to the same physical frame, so they must both
6283 // be chained on the same physent. We search the physent chain for a host mapping matching our
6284 // host's space id and the host virtual address. If we succeed, we know that the entire chain
6285 // of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
6286 // resumed. If we fail to find the specified host virtual->physical mapping, it is because the
6287 // host virtual or physical address has changed since the guest mapping was suspended, so it
6288 // is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell
6289 // our caller that it will have to take its long path, translating the host virtual address
6290 // through the host's skiplist and installing a new guest mapping.
6292 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6293 bl mapFindLockPN ; Find 'n' lock this page's physent
6294 mr. r24,r3 ; Got lock on our physent?
6295 beq-- grsBadPLock ; No, time to bail out
6297 bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
6299 lwz r9,ppLink+4(r24) ; Get first mapping on physent
6300 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
6301 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
6302 grsPELoop: mr. r12,r9 ; Got a mapping to look at?
6303 beq- grsPEMiss ; Nope, we've missed hva->phys mapping
6304 lwz r7,mpFlags(r12) ; Get mapping's flags
6305 lhz r4,mpSpace(r12) ; Get mapping's space id number
6306 lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address
6307 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
6309 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6310 rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags
6311 xori r0,r0,mpNormal ; Normal mapping?
6312 xor r4,r4,r6 ; Compare w/ host space id number
6313 xor r5,r5,r29 ; Compare w/ host virtual address
6314 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6315 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6317 b grsPELoop ; Iterate
6319 grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
6320 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6321 ld r9,ppLink(r24) ; Get first mapping on physent
6322 lwz r6,pmapSpace(r27) ; Get pmap's space id number
6323 andc r9,r9,r0 ; Cleanup mapping pointer
6324 grsPELp64: mr. r12,r9 ; Got a mapping to look at?
6325 beq-- grsPEMiss ; Nope, we've missed hva->phys mapping
6326 lwz r7,mpFlags(r12) ; Get mapping's flags
6327 lhz r4,mpSpace(r12) ; Get mapping's space id number
6328 ld r5,mpVAddr(r12) ; Get mapping's virtual address
6329 ld r9,mpAlias(r12) ; Next mapping physent alias chain
6330 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6331 rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
6332 xori r0,r0,mpNormal ; Normal mapping?
6333 xor r4,r4,r6 ; Compare w/ host space id number
6334 xor r5,r5,r29 ; Compare w/ host virtual address
6335 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6336 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6338 b grsPELp64 ; Iterate
6340 grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits
6341 rlwimi r0,r26,0,mpPP ; Insert new protection bits
6342 stw r0,mpVAddr+4(r31) ; Write 'em back
6344 eieio ; Ensure previous mapping updates are visible
6345 lwz r0,mpFlags(r31) ; Get flags
6346 rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag
6347 stw r0,mpFlags(r31) ; Set updated flags, entry is now valid
6349 li r31,mapRtOK ; Indicate success
6350 b grsRelPhy ; Exit through physent lock release
6352 grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor
6353 bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine
6354 la r11,ppLink+4(r24) ; Point to chain anchor
6355 lwz r9,ppLink+4(r24) ; Get chain anchor
6356 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6357 grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
6358 cmplw r9,r31 ; Is this the mapping to remove?
6359 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6360 bne grsRemNext ; No, chain onward
6361 bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor
6362 stw r8,0(r11) ; Unchain gpv->phys mapping
6363 b grsDelete ; Finish deleting mapping
6365 lwarx r0,0,r11 ; Get previous link
6366 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6367 stwcx. r0,0,r11 ; Update previous link
6368 bne- grsRemRetry ; Lost reservation, retry
6369 b grsDelete ; Finish deleting mapping
6372 grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6373 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6374 mr. r9,r8 ; Does next entry exist?
6375 b grsRemLoop ; Carry on
6378 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6379 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6380 la r11,ppLink(r24) ; Point to chain anchor
6381 ld r9,ppLink(r24) ; Get chain anchor
6382 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6383 grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
6384 cmpld r9,r31 ; Is this the mapping to remove?
6385 ld r8,mpAlias(r9) ; Get forward chain pinter
6386 bne grsRem64Nxt ; Not mapping to remove, chain on, dude
6387 bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor
6388 std r8,0(r11) ; Unchain gpv->phys mapping
6389 b grsDelete ; Finish deleting mapping
6390 grsRem64Rt: ldarx r0,0,r11 ; Get previous link
6391 and r0,r0,r7 ; Get flags
6392 or r0,r0,r8 ; Insert new forward pointer
6393 stdcx. r0,0,r11 ; Slam it back in
6394 bne-- grsRem64Rt ; Lost reservation, retry
6395 b grsDelete ; Finish deleting mapping
6399 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6400 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6401 mr. r9,r8 ; Does next entry exist?
6402 b grsRem64Lp ; Carry on
6405 lwz r3,mpFlags(r31) ; Get mapping's flags
6406 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
6407 ori r3,r3,mpgFree ; Mark mapping free
6408 stw r3,mpFlags(r31) ; Update flags
6410 li r31,mapRtNotFnd ; Didn't succeed
6412 grsRelPhy: mr r3,r24 ; r3 <- physent addr
6413 bl mapPhysUnlock ; Unlock physent chain
6415 grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6416 bl sxlkUnlock ; Release host pmap search lock
6418 grsRtn: mr r3,r31 ; r3 <- result code
6419 bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately
6420 mtmsr r25 ; Restore 'rupts, translation
6421 isync ; Throw a small wrench into the pipeline
6422 b grsPopFrame ; Nothing to do now but pop a frame and return
6423 grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6425 lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6426 ; Get caller's return address
6427 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6428 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6429 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6430 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6431 mtlr r0 ; Prepare return address
6432 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6433 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6434 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6435 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6436 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6437 lwz r1,0(r1) ; Pop stack frame
6438 blr ; Return to caller
6442 li r31,mapRtNotFnd ; Could not locate requested mapping
6443 b grsRelPmap ; Exit through host pmap search lock release
6447 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6448 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6449 li r3,failMapping ; The BOMB, Dmitri.
6450 sc ; The hydrogen bomb.
6454 ; Guest shadow assist -- add a guest mapping
6456 ; Adds a guest mapping.
6459 ; r3 : address of host pmap, 32-bit kernel virtual address
6460 ; r4 : address of guest pmap, 32-bit kernel virtual address
6461 ; r5 : guest virtual address, high-order 32 bits
6462 ; r6 : guest virtual address, low-order 32 bits (with mpHWFlags)
6463 ; r7 : new mapping's flags
6464 ; r8 : physical address, 32-bit page number
6466 ; Non-volatile register usage:
6467 ; r22 : hash group's physical address
6468 ; r23 : VMM extension block's physical address
6469 ; r24 : mapping's flags
6470 ; r25 : caller's msr image from mapSetUp
6471 ; r26 : physent physical address
6472 ; r27 : host pmap physical address
6473 ; r28 : guest pmap physical address
6474 ; r29 : physical address, 32-bit 4k-page number
6475 ; r30 : guest virtual address
6476 ; r31 : gva->phys mapping's physical address
6480 .globl EXT(hw_add_map_gv)
6485 #define gadStackSize ((31-22+1)*4)+4
6487 stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1)
6488 ; Mint a new stack frame
6489 mflr r0 ; Get caller's return address
6490 mfsprg r11,2 ; Get feature flags
6491 mtcrf 0x02,r11 ; Insert feature flags into cr6
6492 stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6493 ; Save caller's return address
6494 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6495 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6496 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6497 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6498 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6499 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6500 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6501 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6502 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6503 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
6505 rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
6506 rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
6507 mr r24,r7 ; Copy guest mapping's flags
6508 mr r29,r8 ; Copy target frame's physical address
6510 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6511 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6512 bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine
6513 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6514 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6515 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6516 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6517 srwi r11,r30,12 ; Form shadow hash:
6518 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6519 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6520 ; Form index offset from hash page number
6521 add r22,r22,r10 ; r22 <- hash page index entry
6522 lwz r22,4(r22) ; r22 <- hash page paddr
6523 rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6524 ; r22 <- hash group paddr
6525 b gadStart ; Get to it
6527 gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6528 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6529 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6530 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6531 srwi r11,r30,12 ; Form shadow hash:
6532 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6533 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6534 ; Form index offset from hash page number
6535 add r22,r22,r10 ; r22 <- hash page index entry
6536 ld r22,0(r22) ; r22 <- hash page paddr
6537 insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6538 ; r22 <- hash group paddr
6540 gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6541 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6542 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6543 mr r25,r11 ; Save caller's msr image
6545 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6546 bl sxlkExclusive ; Get lock exlusive
6548 mr r31,r22 ; Prepare to search this group
6549 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6550 mtctr r0 ; in this group
6551 bt++ pf64Bitb,gad64Search ; Test for 64-bit machine
6553 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6554 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6555 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6556 clrrwi r12,r30,12 ; r12 <- virtual address we're searching for
6557 b gad32SrchLp ; Let the search begin!
6561 mr r6,r3 ; r6 <- current mapping slot's flags
6562 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6563 mr r7,r4 ; r7 <- current mapping slot's space ID
6564 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6565 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6566 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6567 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6568 xor r7,r7,r9 ; Compare space ID
6569 or r0,r11,r7 ; r0 <- !(!free && space match)
6570 xor r8,r8,r12 ; Compare virtual address
6571 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6572 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6574 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6575 bdnz gad32SrchLp ; Iterate
6577 mr r6,r3 ; r6 <- current mapping slot's flags
6578 clrrwi r5,r5,12 ; Remove flags from virtual address
6579 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6580 xor r4,r4,r9 ; Compare space ID
6581 or r0,r11,r4 ; r0 <- !(!free && && space match)
6582 xor r5,r5,r12 ; Compare virtual address
6583 or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match
6584 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6585 b gadScan ; No joy in our hash group
6588 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6589 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6590 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6591 clrrdi r12,r30,12 ; r12 <- virtual address we're searching for
6592 b gad64SrchLp ; Let the search begin!
6596 mr r6,r3 ; r6 <- current mapping slot's flags
6597 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6598 mr r7,r4 ; r7 <- current mapping slot's space ID
6599 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6600 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6601 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6602 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6603 xor r7,r7,r9 ; Compare space ID
6604 or r0,r11,r7 ; r0 <- !(!free && space match)
6605 xor r8,r8,r12 ; Compare virtual address
6606 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6607 beq gadRelPmap ; Hit, let upper-level redrive sort it out
6609 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6610 bdnz gad64SrchLp ; Iterate
6612 mr r6,r3 ; r6 <- current mapping slot's flags
6613 clrrdi r5,r5,12 ; Remove flags from virtual address
6614 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6615 xor r4,r4,r9 ; Compare space ID
6616 or r0,r11,r4 ; r0 <- !(!free && && space match)
6617 xor r5,r5,r12 ; Compare virtual address
6618 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6619 bne gadScan ; No joy in our hash group
6620 b gadRelPmap ; Hit, let upper-level redrive sort it out
6622 gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor
6623 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6624 ; Prepare to address slot at cursor
6625 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6626 mtctr r0 ; in this group
6627 or r2,r22,r12 ; r2 <- 1st mapping to search
6628 lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags
6629 li r11,0 ; No dormant entries found yet
6630 b gadScanLoop ; Let the search begin!
6634 addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search
6635 rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6636 ; Trim off any carry, wrapping into slot number range
6637 mr r31,r2 ; r31 <- current mapping's address
6638 or r2,r22,r12 ; r2 <- next mapping to search
6639 mr r6,r3 ; r6 <- current mapping slot's flags
6640 lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags
6641 rlwinm. r0,r6,0,mpgFree ; Test free flag
6642 bne gadFillMap ; Join common path on hit (r31 points to free mapping)
6643 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6644 xori r0,r0,mpgDormant ; Invert dormant flag
6645 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6646 bne gadNotDorm ; Not dormant or we've already seen one
6647 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6648 gadNotDorm: bdnz gadScanLoop ; Iterate
6650 mr r31,r2 ; r31 <- final mapping's address
6651 rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping
6652 bne gadFillMap ; Join common path on hit (r31 points to dormant mapping)
6653 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6654 xori r0,r0,mpgDormant ; Invert dormant flag
6655 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6656 bne gadCkDormant ; Not dormant or we've already seen one
6657 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6660 mr. r31,r11 ; Get dormant mapping, if any, and test
6661 bne gadUpCursor ; Go update the cursor, we'll take the dormant entry
6664 lbz r12,mpgCursor(r22) ; Get group's cursor
6665 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6666 ; Prepare to address slot at cursor
6667 or r31,r22,r12 ; r31 <- address of mapping to steal
6669 bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately
6670 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6671 ; r31 <- mapping's physical address
6672 ; r3 -> PTE slot physical address
6673 ; r4 -> High-order 32 bits of PTE
6674 ; r5 -> Low-order 32 bits of PTE
6676 ; r7 -> PCA physical address
6677 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6678 b gadFreePTE ; Join 64-bit path to release the PTE
6679 gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6680 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6681 gadFreePTE: mr. r3,r3 ; Was there a valid PTE?
6682 beq- gadUpCursor ; No valid PTE, we're almost done
6683 lis r0,0x8000 ; Prepare free bit for this slot
6684 srw r0,r0,r2 ; Position free bit
6685 or r6,r6,r0 ; Set it in our PCA image
6686 lwz r8,mpPte(r31) ; Get PTE pointer
6687 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6688 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6689 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6690 stw r6,0(r7) ; Update PCA and unlock the PTEG
6693 rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK
6694 ; Recover slot number from stolen mapping's address
6695 addi r12,r12,1 ; Increment slot number
6696 rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range
6697 stb r12,mpgCursor(r22) ; Update group's cursor
6699 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6700 bl mapFindLockPN ; Find 'n' lock this page's physent
6701 mr. r26,r3 ; Got lock on our physent?
6702 beq-- gadBadPLock ; No, time to bail out
6704 crset cr1_eq ; cr1_eq <- previous link is the anchor
6705 bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine
6706 la r11,ppLink+4(r26) ; Point to chain anchor
6707 lwz r9,ppLink+4(r26) ; Get chain anchor
6708 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6709 gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
6710 cmplw r9,r31 ; Is this the mapping to remove?
6711 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6712 bne gadRemNext ; No, chain onward
6713 bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor
6714 stw r8,0(r11) ; Unchain gpv->phys mapping
6715 b gadDelDone ; Finish deleting mapping
6717 lwarx r0,0,r11 ; Get previous link
6718 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6719 stwcx. r0,0,r11 ; Update previous link
6720 bne- gadRemRetry ; Lost reservation, retry
6721 b gadDelDone ; Finish deleting mapping
6723 gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6724 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6725 mr. r9,r8 ; Does next entry exist?
6726 b gadRemLoop ; Carry on
6729 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6730 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6731 la r11,ppLink(r26) ; Point to chain anchor
6732 ld r9,ppLink(r26) ; Get chain anchor
6733 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6734 gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
6735 cmpld r9,r31 ; Is this the mapping to remove?
6736 ld r8,mpAlias(r9) ; Get forward chain pinter
6737 bne gadRem64Nxt ; Not mapping to remove, chain on, dude
6738 bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor
6739 std r8,0(r11) ; Unchain gpv->phys mapping
6740 b gadDelDone ; Finish deleting mapping
6741 gadRem64Rt: ldarx r0,0,r11 ; Get previous link
6742 and r0,r0,r7 ; Get flags
6743 or r0,r0,r8 ; Insert new forward pointer
6744 stdcx. r0,0,r11 ; Slam it back in
6745 bne-- gadRem64Rt ; Lost reservation, retry
6746 b gadDelDone ; Finish deleting mapping
6750 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6751 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6752 mr. r9,r8 ; Does next entry exist?
6753 b gadRem64Lp ; Carry on
6756 mr r3,r26 ; Get physent address
6757 bl mapPhysUnlock ; Unlock physent chain
6760 lwz r12,pmapSpace(r28) ; Get guest space id number
6761 li r2,0 ; Get a zero
6762 stw r24,mpFlags(r31) ; Set mapping's flags
6763 sth r12,mpSpace(r31) ; Set mapping's space id number
6764 stw r2,mpPte(r31) ; Set mapping's pte pointer invalid
6765 stw r29,mpPAddr(r31) ; Set mapping's physical address
6766 bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine
6767 stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags)
6768 b gadChain ; Continue with chaining mapping to physent
6769 gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags)
6771 gadChain: mr r3,r29 ; r3 <- physical frame address
6772 bl mapFindLockPN ; Find 'n' lock this page's physent
6773 mr. r26,r3 ; Got lock on our physent?
6774 beq-- gadBadPLock ; No, time to bail out
6776 bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine
6777 lwz r12,ppLink+4(r26) ; Get forward chain
6778 rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags
6779 rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags
6780 stw r11,mpAlias+4(r31) ; New mapping will head chain
6781 stw r12,ppLink+4(r26) ; Point physent to new mapping
6782 b gadFinish ; All over now...
6784 gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer
6785 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6786 ld r12,ppLink(r26) ; Get forward chain
6787 andc r11,r12,r7 ; Get physent's forward chain pointer sans flags
6788 and r12,r12,r7 ; Isolate pointer's flags
6789 or r12,r12,r31 ; Insert new mapping's address forming pointer
6790 std r11,mpAlias(r31) ; New mapping will head chain
6791 std r12,ppLink(r26) ; Point physent to new mapping
6793 gadFinish: eieio ; Ensure new mapping is completely visible
6795 gadRelPhy: mr r3,r26 ; r3 <- physent addr
6796 bl mapPhysUnlock ; Unlock physent chain
6798 gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6799 bl sxlkUnlock ; Release host pmap search lock
6801 bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately
6802 mtmsr r25 ; Restore 'rupts, translation
6803 isync ; Throw a small wrench into the pipeline
6804 b gadPopFrame ; Nothing to do now but pop a frame and return
6805 gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6807 lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6808 ; Get caller's return address
6809 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6810 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6811 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6812 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6813 mtlr r0 ; Prepare return address
6814 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6815 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6816 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6817 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6818 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6819 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6820 lwz r1,0(r1) ; Pop stack frame
6821 blr ; Return to caller
6825 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6826 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6827 li r3,failMapping ; The BOMB, Dmitri.
6828 sc ; The hydrogen bomb.
6832 ; Guest shadow assist -- supend a guest mapping
6834 ; Suspends a guest mapping.
6837 ; r3 : address of host pmap, 32-bit kernel virtual address
6838 ; r4 : address of guest pmap, 32-bit kernel virtual address
6839 ; r5 : guest virtual address, high-order 32 bits
6840 ; r6 : guest virtual address, low-order 32 bits
6842 ; Non-volatile register usage:
6843 ; r26 : VMM extension block's physical address
6844 ; r27 : host pmap physical address
6845 ; r28 : guest pmap physical address
6846 ; r29 : caller's msr image from mapSetUp
6847 ; r30 : guest virtual address
6848 ; r31 : gva->phys mapping's physical address
6852 .globl EXT(hw_susp_map_gv)
6854 LEXT(hw_susp_map_gv)
6856 #define gsuStackSize ((31-26+1)*4)+4
6858 stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1)
6859 ; Mint a new stack frame
6860 mflr r0 ; Get caller's return address
6861 mfsprg r11,2 ; Get feature flags
6862 mtcrf 0x02,r11 ; Insert feature flags into cr6
6863 stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6864 ; Save caller's return address
6865 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6866 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6867 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6868 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6869 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6870 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6872 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6874 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6875 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6876 bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine
6878 lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr
6879 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6880 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6881 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6882 srwi r11,r30,12 ; Form shadow hash:
6883 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6884 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6885 ; Form index offset from hash page number
6886 add r31,r31,r10 ; r31 <- hash page index entry
6887 lwz r31,4(r31) ; r31 <- hash page paddr
6888 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6889 ; r31 <- hash group paddr
6890 b gsuStart ; Get to it
6891 gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6892 ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr
6893 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6894 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6895 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6896 srwi r11,r30,12 ; Form shadow hash:
6897 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6898 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6899 ; Form index offset from hash page number
6900 add r31,r31,r10 ; r31 <- hash page index entry
6901 ld r31,0(r31) ; r31 <- hash page paddr
6902 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6903 ; r31 <- hash group paddr
6905 gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6906 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6907 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6908 mr r29,r11 ; Save caller's msr image
6910 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6911 bl sxlkExclusive ; Get lock exclusive
6913 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6914 mtctr r0 ; in this group
6915 bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine
6917 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6918 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6919 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6920 b gsu32SrchLp ; Let the search begin!
6924 mr r6,r3 ; r6 <- current mapping slot's flags
6925 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6926 mr r7,r4 ; r7 <- current mapping slot's space ID
6927 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6928 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6929 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6930 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6931 xor r7,r7,r9 ; Compare space ID
6932 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6933 xor r8,r8,r30 ; Compare virtual address
6934 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6935 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6937 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6938 bdnz gsu32SrchLp ; Iterate
6940 mr r6,r3 ; r6 <- current mapping slot's flags
6941 clrrwi r5,r5,12 ; Remove flags from virtual address
6942 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6943 xor r4,r4,r9 ; Compare space ID
6944 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6945 xor r5,r5,r30 ; Compare virtual address
6946 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6947 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6948 b gsuSrchMiss ; No joy in our hash group
6951 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6952 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6953 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6954 b gsu64SrchLp ; Let the search begin!
6958 mr r6,r3 ; r6 <- current mapping slot's flags
6959 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6960 mr r7,r4 ; r7 <- current mapping slot's space ID
6961 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6962 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6963 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6964 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6965 xor r7,r7,r9 ; Compare space ID
6966 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6967 xor r8,r8,r30 ; Compare virtual address
6968 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6969 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6971 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6972 bdnz gsu64SrchLp ; Iterate
6974 mr r6,r3 ; r6 <- current mapping slot's flags
6975 clrrdi r5,r5,12 ; Remove flags from virtual address
6976 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6977 xor r4,r4,r9 ; Compare space ID
6978 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6979 xor r5,r5,r30 ; Compare virtual address
6980 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6981 bne gsuSrchMiss ; No joy in our hash group
6984 bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately
6985 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6986 ; r31 <- mapping's physical address
6987 ; r3 -> PTE slot physical address
6988 ; r4 -> High-order 32 bits of PTE
6989 ; r5 -> Low-order 32 bits of PTE
6991 ; r7 -> PCA physical address
6992 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6993 b gsuFreePTE ; Join 64-bit path to release the PTE
6994 gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6995 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6996 gsuFreePTE: mr. r3,r3 ; Was there a valid PTE?
6997 beq- gsuNoPTE ; No valid PTE, we're almost done
6998 lis r0,0x8000 ; Prepare free bit for this slot
6999 srw r0,r0,r2 ; Position free bit
7000 or r6,r6,r0 ; Set it in our PCA image
7001 lwz r8,mpPte(r31) ; Get PTE pointer
7002 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7003 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7004 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7005 stw r6,0(r7) ; Update PCA and unlock the PTEG
7007 gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags
7008 ori r3,r3,mpgDormant ; Mark entry dormant
7009 stw r3,mpFlags(r31) ; Save updated flags
7010 eieio ; Ensure update is visible when we unlock
7013 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7014 bl sxlkUnlock ; Release host pmap search lock
7016 bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately
7017 mtmsr r29 ; Restore 'rupts, translation
7018 isync ; Throw a small wrench into the pipeline
7019 b gsuPopFrame ; Nothing to do now but pop a frame and return
7020 gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode
7022 lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7023 ; Get caller's return address
7024 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7025 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7026 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7027 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7028 mtlr r0 ; Prepare return address
7029 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7030 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7031 lwz r1,0(r1) ; Pop stack frame
7032 blr ; Return to caller
7035 ; Guest shadow assist -- test guest mapping reference and change bits
7037 ; Locates the specified guest mapping, and if it exists gathers its reference
7038 ; and change bit, optionallyÊresetting them.
7041 ; r3 : address of host pmap, 32-bit kernel virtual address
7042 ; r4 : address of guest pmap, 32-bit kernel virtual address
7043 ; r5 : guest virtual address, high-order 32 bits
7044 ; r6 : guest virtual address, low-order 32 bits
7045 ; r7 : reset boolean
7047 ; Non-volatile register usage:
7048 ; r24 : VMM extension block's physical address
7049 ; r25 : return code (w/reference and change bits)
7050 ; r26 : reset boolean
7051 ; r27 : host pmap physical address
7052 ; r28 : guest pmap physical address
7053 ; r29 : caller's msr image from mapSetUp
7054 ; r30 : guest virtual address
7055 ; r31 : gva->phys mapping's physical address
7059 .globl EXT(hw_test_rc_gv)
7063 #define gtdStackSize ((31-24+1)*4)+4
7065 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7066 ; Mint a new stack frame
7067 mflr r0 ; Get caller's return address
7068 mfsprg r11,2 ; Get feature flags
7069 mtcrf 0x02,r11 ; Insert feature flags into cr6
7070 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7071 ; Save caller's return address
7072 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7073 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7074 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7075 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7076 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7077 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7078 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7079 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7081 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 20 bits of guest vaddr
7083 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7084 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
7086 bt++ pf64Bitb,gtd64Salt ; Test for 64-bit machine
7088 lwz r24,pmapVmmExtPhys+4(r3) ; r24 <- VMM pmap extension block paddr
7089 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
7090 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
7091 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7092 srwi r11,r30,12 ; Form shadow hash:
7093 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7094 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7095 ; Form index offset from hash page number
7096 add r31,r31,r10 ; r31 <- hash page index entry
7097 lwz r31,4(r31) ; r31 <- hash page paddr
7098 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7099 ; r31 <- hash group paddr
7100 b gtdStart ; Get to it
7102 gtd64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7103 ld r24,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7104 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
7105 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
7106 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7107 srwi r11,r30,12 ; Form shadow hash:
7108 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7109 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7110 ; Form index offset from hash page number
7111 add r31,r31,r10 ; r31 <- hash page index entry
7112 ld r31,0(r31) ; r31 <- hash page paddr
7113 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7114 ; r31 <- hash group paddr
7116 gtdStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
7117 xor r28,r4,r28 ; Convert guest pmap_t virt->real
7118 mr r26,r7 ; Save reset boolean
7119 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7120 mr r29,r11 ; Save caller's msr image
7122 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7123 bl sxlkExclusive ; Get lock exclusive
7125 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7126 mtctr r0 ; in this group
7127 bt++ pf64Bitb,gtd64Search ; Test for 64-bit machine
7129 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7130 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7131 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7132 b gtd32SrchLp ; Let the search begin!
7136 mr r6,r3 ; r6 <- current mapping slot's flags
7137 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7138 mr r7,r4 ; r7 <- current mapping slot's space ID
7139 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7140 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7141 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7142 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7143 xor r7,r7,r9 ; Compare space ID
7144 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7145 xor r8,r8,r30 ; Compare virtual address
7146 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7147 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7149 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7150 bdnz gtd32SrchLp ; Iterate
7152 mr r6,r3 ; r6 <- current mapping slot's flags
7153 clrrwi r5,r5,12 ; Remove flags from virtual address
7154 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7155 xor r4,r4,r9 ; Compare space ID
7156 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7157 xor r5,r5,r30 ; Compare virtual address
7158 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7159 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7160 b gtdSrchMiss ; No joy in our hash group
7163 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7164 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7165 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7166 b gtd64SrchLp ; Let the search begin!
7170 mr r6,r3 ; r6 <- current mapping slot's flags
7171 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7172 mr r7,r4 ; r7 <- current mapping slot's space ID
7173 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7174 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7175 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7176 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7177 xor r7,r7,r9 ; Compare space ID
7178 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7179 xor r8,r8,r30 ; Compare virtual address
7180 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7181 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7183 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7184 bdnz gtd64SrchLp ; Iterate
7186 mr r6,r3 ; r6 <- current mapping slot's flags
7187 clrrdi r5,r5,12 ; Remove flags from virtual address
7188 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7189 xor r4,r4,r9 ; Compare space ID
7190 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7191 xor r5,r5,r30 ; Compare virtual address
7192 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7193 bne gtdSrchMiss ; No joy in our hash group
7196 bt++ pf64Bitb,gtdDo64 ; Split for 64 bit
7198 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
7200 cmplwi cr1,r26,0 ; Do we want to clear RC?
7201 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7202 mr. r3,r3 ; Was there a previously valid PTE?
7203 li r0,lo16(mpR|mpC) ; Get bits to clear
7205 and r25,r5,r0 ; Copy RC bits into result
7206 beq++ cr1,gtdNoClr32 ; Nope...
7208 andc r12,r12,r0 ; Clear mapping copy of RC
7209 andc r5,r5,r0 ; Clear PTE copy of RC
7210 sth r12,mpVAddr+6(r31) ; Set the new RC in mapping
7212 gtdNoClr32: beq-- gtdNoOld32 ; No previously valid PTE...
7214 sth r5,6(r3) ; Store updated RC in PTE
7215 eieio ; Make sure we do not reorder
7216 stw r4,0(r3) ; Revalidate the PTE
7218 eieio ; Make sure all updates come first
7219 stw r6,0(r7) ; Unlock PCA
7221 gtdNoOld32: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7222 bl sxlkUnlock ; Unlock the search list
7223 b gtdR32 ; Join common...
7228 gtdDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
7230 cmplwi cr1,r26,0 ; Do we want to clear RC?
7231 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7232 mr. r3,r3 ; Was there a previously valid PTE?
7233 li r0,lo16(mpR|mpC) ; Get bits to clear
7235 and r25,r5,r0 ; Copy RC bits into result
7236 beq++ cr1,gtdNoClr64 ; Nope...
7238 andc r12,r12,r0 ; Clear mapping copy of RC
7239 andc r5,r5,r0 ; Clear PTE copy of RC
7240 sth r12,mpVAddr+6(r31) ; Set the new RC
7242 gtdNoClr64: beq-- gtdNoOld64 ; Nope, no pevious pte...
7244 sth r5,14(r3) ; Store updated RC
7245 eieio ; Make sure we do not reorder
7246 std r4,0(r3) ; Revalidate the PTE
7248 eieio ; Make sure all updates come first
7249 stw r6,0(r7) ; Unlock PCA
7251 gtdNoOld64: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7252 bl sxlkUnlock ; Unlock the search list
7253 b gtdR64 ; Join common...
7256 la r3,pmapSXlk(r27) ; Point to the pmap search lock
7257 bl sxlkUnlock ; Unlock the search list
7258 li r25,mapRtNotFnd ; Get ready to return not found
7259 bt++ pf64Bitb,gtdR64 ; Test for 64-bit machine
7261 gtdR32: mtmsr r29 ; Restore caller's msr image
7265 gtdR64: mtmsrd r29 ; Restore caller's msr image
7267 gtdEpilog: lwz r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7268 ; Get caller's return address
7269 mr r3,r25 ; Get return code
7270 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7271 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7272 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7273 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7274 mtlr r0 ; Prepare return address
7275 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7276 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7277 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7278 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7279 lwz r1,0(r1) ; Pop stack frame
7280 blr ; Return to caller
7283 ; Guest shadow assist -- convert guest to host virtual address
7285 ; Locates the specified guest mapping, and if it exists locates the
7286 ; first mapping belonging to its host on the physical chain and returns
7287 ; its virtual address.
7289 ; Note that if there are multiple mappings belonging to this host
7290 ; chained to the physent to which the guest mapping is chained, then
7291 ; host virtual aliases exist for this physical address. If host aliases
7292 ; exist, then we select the first on the physent chain, making it
7293 ; unpredictable which of the two or more possible host virtual addresses
7297 ; r3 : address of guest pmap, 32-bit kernel virtual address
7298 ; r4 : guest virtual address, high-order 32 bits
7299 ; r5 : guest virtual address, low-order 32 bits
7301 ; Non-volatile register usage:
7302 ; r24 : physent physical address
7303 ; r25 : VMM extension block's physical address
7304 ; r26 : host virtual address
7305 ; r27 : host pmap physical address
7306 ; r28 : guest pmap physical address
7307 ; r29 : caller's msr image from mapSetUp
7308 ; r30 : guest virtual address
7309 ; r31 : gva->phys mapping's physical address
7313 .globl EXT(hw_gva_to_hva)
7317 #define gthStackSize ((31-24+1)*4)+4
7319 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7320 ; Mint a new stack frame
7321 mflr r0 ; Get caller's return address
7322 mfsprg r11,2 ; Get feature flags
7323 mtcrf 0x02,r11 ; Insert feature flags into cr6
7324 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7325 ; Save caller's return address
7326 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7327 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7328 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7329 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7330 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7331 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7332 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7333 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7335 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7337 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7338 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7340 bt++ pf64Bitb,gth64Salt ; Test for 64-bit machine
7342 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7343 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7344 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7345 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7346 srwi r11,r30,12 ; Form shadow hash:
7347 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7348 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7349 ; Form index offset from hash page number
7350 add r31,r31,r10 ; r31 <- hash page index entry
7351 lwz r31,4(r31) ; r31 <- hash page paddr
7352 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7353 ; r31 <- hash group paddr
7354 b gthStart ; Get to it
7356 gth64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7357 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7358 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7359 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7360 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7361 srwi r11,r30,12 ; Form shadow hash:
7362 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7363 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7364 ; Form index offset from hash page number
7365 add r31,r31,r10 ; r31 <- hash page index entry
7366 ld r31,0(r31) ; r31 <- hash page paddr
7367 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7368 ; r31 <- hash group paddr
7370 gthStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7371 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7372 mr r29,r11 ; Save caller's msr image
7374 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7375 bl sxlkExclusive ; Get lock exclusive
7377 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7378 mtctr r0 ; in this group
7379 bt++ pf64Bitb,gth64Search ; Test for 64-bit machine
7381 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7382 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7383 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7384 b gth32SrchLp ; Let the search begin!
7388 mr r6,r3 ; r6 <- current mapping slot's flags
7389 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7390 mr r7,r4 ; r7 <- current mapping slot's space ID
7391 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7392 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7393 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7394 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7395 xor r7,r7,r9 ; Compare space ID
7396 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7397 xor r8,r8,r30 ; Compare virtual address
7398 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7399 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7401 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7402 bdnz gth32SrchLp ; Iterate
7404 mr r6,r3 ; r6 <- current mapping slot's flags
7405 clrrwi r5,r5,12 ; Remove flags from virtual address
7406 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7407 xor r4,r4,r9 ; Compare space ID
7408 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7409 xor r5,r5,r30 ; Compare virtual address
7410 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7411 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7412 b gthSrchMiss ; No joy in our hash group
7415 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7416 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7417 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7418 b gth64SrchLp ; Let the search begin!
7422 mr r6,r3 ; r6 <- current mapping slot's flags
7423 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7424 mr r7,r4 ; r7 <- current mapping slot's space ID
7425 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7426 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7427 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7428 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7429 xor r7,r7,r9 ; Compare space ID
7430 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7431 xor r8,r8,r30 ; Compare virtual address
7432 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7433 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7435 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7436 bdnz gth64SrchLp ; Iterate
7438 mr r6,r3 ; r6 <- current mapping slot's flags
7439 clrrdi r5,r5,12 ; Remove flags from virtual address
7440 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7441 xor r4,r4,r9 ; Compare space ID
7442 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7443 xor r5,r5,r30 ; Compare virtual address
7444 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7445 bne gthSrchMiss ; No joy in our hash group
7447 gthSrchHit: lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
7448 bl mapFindLockPN ; Find 'n' lock this page's physent
7449 mr. r24,r3 ; Got lock on our physent?
7450 beq-- gthBadPLock ; No, time to bail out
7452 bt++ pf64Bitb,gthPFnd64 ; 64-bit version of physent chain search
7454 lwz r9,ppLink+4(r24) ; Get first mapping on physent
7455 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7456 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
7457 gthPELoop: mr. r12,r9 ; Got a mapping to look at?
7458 beq- gthPEMiss ; Nope, we've missed hva->phys mapping
7459 lwz r7,mpFlags(r12) ; Get mapping's flags
7460 lhz r4,mpSpace(r12) ; Get mapping's space id number
7461 lwz r26,mpVAddr+4(r12) ; Get mapping's virtual address
7462 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
7464 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7465 rlwinm r26,r26,0,~mpHWFlags ; Bye-bye unsightly flags
7466 xori r0,r0,mpNormal ; Normal mapping?
7467 xor r4,r4,r6 ; Compare w/ host space id number
7468 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7470 b gthPELoop ; Iterate
7472 gthPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
7473 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
7474 ld r9,ppLink(r24) ; Get first mapping on physent
7475 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7476 andc r9,r9,r0 ; Cleanup mapping pointer
7477 gthPELp64: mr. r12,r9 ; Got a mapping to look at?
7478 beq-- gthPEMiss ; Nope, we've missed hva->phys mapping
7479 lwz r7,mpFlags(r12) ; Get mapping's flags
7480 lhz r4,mpSpace(r12) ; Get mapping's space id number
7481 ld r26,mpVAddr(r12) ; Get mapping's virtual address
7482 ld r9,mpAlias(r12) ; Next mapping physent alias chain
7483 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7484 rldicr r26,r26,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
7485 xori r0,r0,mpNormal ; Normal mapping?
7486 xor r4,r4,r6 ; Compare w/ host space id number
7487 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7489 b gthPELp64 ; Iterate
7492 gthPEMiss: mr r3,r24 ; Get physent's address
7493 bl mapPhysUnlock ; Unlock physent chain
7495 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7496 bl sxlkUnlock ; Release host pmap search lock
7497 li r3,-1 ; Return 64-bit -1
7499 bt++ pf64Bitb,gthEpi64 ; Take 64-bit exit
7500 b gthEpi32 ; Take 32-bit exit
7503 gthPEHit: mr r3,r24 ; Get physent's address
7504 bl mapPhysUnlock ; Unlock physent chain
7505 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7506 bl sxlkUnlock ; Release host pmap search lock
7508 bt++ pf64Bitb,gthR64 ; Test for 64-bit machine
7510 gthR32: li r3,0 ; High-order 32 bits host virtual address
7511 mr r4,r26 ; Low-order 32 bits host virtual address
7512 gthEpi32: mtmsr r29 ; Restore caller's msr image
7517 gthR64: srdi r3,r26,32 ; High-order 32 bits host virtual address
7518 clrldi r4,r26,32 ; Low-order 32 bits host virtual address
7519 gthEpi64: mtmsrd r29 ; Restore caller's msr image
7521 gthEpilog: lwz r0,(FM_ALIGN(gthStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7522 ; Get caller's return address
7523 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7524 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7525 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7526 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7527 mtlr r0 ; Prepare return address
7528 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7529 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7530 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7531 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7532 lwz r1,0(r1) ; Pop stack frame
7533 blr ; Return to caller
7536 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
7537 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
7538 li r3,failMapping ; The BOMB, Dmitri.
7539 sc ; The hydrogen bomb.
7543 ; Guest shadow assist -- find a guest mapping
7545 ; Locates the specified guest mapping, and if it exists returns a copy
7549 ; r3 : address of guest pmap, 32-bit kernel virtual address
7550 ; r4 : guest virtual address, high-order 32 bits
7551 ; r5 : guest virtual address, low-order 32 bits
7552 ; r6 : 32 byte copy area, 32-bit kernel virtual address
7554 ; Non-volatile register usage:
7555 ; r25 : VMM extension block's physical address
7556 ; r26 : copy area virtual address
7557 ; r27 : host pmap physical address
7558 ; r28 : guest pmap physical address
7559 ; r29 : caller's msr image from mapSetUp
7560 ; r30 : guest virtual address
7561 ; r31 : gva->phys mapping's physical address
7565 .globl EXT(hw_find_map_gv)
7567 LEXT(hw_find_map_gv)
7569 #define gfmStackSize ((31-25+1)*4)+4
7571 stwu r1,-(FM_ALIGN(gfmStackSize)+FM_SIZE)(r1)
7572 ; Mint a new stack frame
7573 mflr r0 ; Get caller's return address
7574 mfsprg r11,2 ; Get feature flags
7575 mtcrf 0x02,r11 ; Insert feature flags into cr6
7576 stw r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7577 ; Save caller's return address
7578 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7579 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7580 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7581 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7582 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7583 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7584 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7586 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7587 mr r26,r6 ; Copy copy buffer vaddr
7589 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7590 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7592 bt++ pf64Bitb,gfm64Salt ; Test for 64-bit machine
7594 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7595 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7596 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7597 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7598 srwi r11,r30,12 ; Form shadow hash:
7599 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7600 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7601 ; Form index offset from hash page number
7602 add r31,r31,r10 ; r31 <- hash page index entry
7603 lwz r31,4(r31) ; r31 <- hash page paddr
7604 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7605 ; r31 <- hash group paddr
7606 b gfmStart ; Get to it
7608 gfm64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7609 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7610 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7611 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7612 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7613 srwi r11,r30,12 ; Form shadow hash:
7614 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7615 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7616 ; Form index offset from hash page number
7617 add r31,r31,r10 ; r31 <- hash page index entry
7618 ld r31,0(r31) ; r31 <- hash page paddr
7619 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7620 ; r31 <- hash group paddr
7622 gfmStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7623 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7624 mr r29,r11 ; Save caller's msr image
7626 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7627 bl sxlkExclusive ; Get lock exclusive
7629 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7630 mtctr r0 ; in this group
7631 bt++ pf64Bitb,gfm64Search ; Test for 64-bit machine
7633 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7634 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7635 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7636 b gfm32SrchLp ; Let the search begin!
7640 mr r6,r3 ; r6 <- current mapping slot's flags
7641 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7642 mr r7,r4 ; r7 <- current mapping slot's space ID
7643 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7644 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7645 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7646 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7647 xor r7,r7,r9 ; Compare space ID
7648 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7649 xor r8,r8,r30 ; Compare virtual address
7650 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7651 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7653 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7654 bdnz gfm32SrchLp ; Iterate
7656 mr r6,r3 ; r6 <- current mapping slot's flags
7657 clrrwi r5,r5,12 ; Remove flags from virtual address
7658 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7659 xor r4,r4,r9 ; Compare space ID
7660 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7661 xor r5,r5,r30 ; Compare virtual address
7662 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7663 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7664 b gfmSrchMiss ; No joy in our hash group
7667 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7668 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7669 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7670 b gfm64SrchLp ; Let the search begin!
7674 mr r6,r3 ; r6 <- current mapping slot's flags
7675 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7676 mr r7,r4 ; r7 <- current mapping slot's space ID
7677 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7678 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7679 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7680 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7681 xor r7,r7,r9 ; Compare space ID
7682 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7683 xor r8,r8,r30 ; Compare virtual address
7684 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7685 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7687 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7688 bdnz gfm64SrchLp ; Iterate
7690 mr r6,r3 ; r6 <- current mapping slot's flags
7691 clrrdi r5,r5,12 ; Remove flags from virtual address
7692 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7693 xor r4,r4,r9 ; Compare space ID
7694 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7695 xor r5,r5,r30 ; Compare virtual address
7696 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7697 bne gfmSrchMiss ; No joy in our hash group
7699 gfmSrchHit: lwz r5,0(r31) ; Fetch 32 bytes of mapping from physical
7702 lwz r8,12(r31) ; +12
7703 lwz r9,16(r31) ; +16
7704 lwz r10,20(r31) ; +20
7705 lwz r11,24(r31) ; +24
7706 lwz r12,28(r31) ; +28
7708 li r31,mapRtOK ; Return found mapping
7710 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7711 bl sxlkUnlock ; Release host pmap search lock
7713 bt++ pf64Bitb,gfmEpi64 ; Test for 64-bit machine
7715 gfmEpi32: mtmsr r29 ; Restore caller's msr image
7716 isync ; A small wrench
7717 b gfmEpilog ; and a larger bubble
7720 gfmEpi64: mtmsrd r29 ; Restore caller's msr image
7722 gfmEpilog: mr. r3,r31 ; Copy/test mapping address
7723 beq gfmNotFound ; Skip copy if no mapping found
7725 stw r5,0(r26) ; Store 32 bytes of mapping into virtual
7728 stw r8,12(r26) ; +12
7729 stw r9,16(r26) ; +16
7730 stw r10,20(r26) ; +20
7731 stw r11,24(r26) ; +24
7732 stw r12,28(r26) ; +28
7735 lwz r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7736 ; Get caller's return address
7737 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7738 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7739 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7740 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7741 mtlr r0 ; Prepare return address
7742 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7743 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7744 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7745 lwz r1,0(r1) ; Pop stack frame
7746 blr ; Return to caller
7750 li r31,mapRtNotFnd ; Indicate mapping not found
7751 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7752 bl sxlkUnlock ; Release host pmap search lock
7753 bt++ pf64Bitb,gfmEpi64 ; Take 64-bit exit
7754 b gfmEpi32 ; Take 32-bit exit
7758 ; Guest shadow assist -- change guest page protection
7760 ; Locates the specified dormant mapping, and if it is active, changes its
7764 ; r3 : address of guest pmap, 32-bit kernel virtual address
7765 ; r4 : guest virtual address, high-order 32 bits
7766 ; r5 : guest virtual address, low-order 32 bits
7767 ; r6 : guest mapping protection code
7769 ; Non-volatile register usage:
7770 ; r25 : caller's msr image from mapSetUp
7771 ; r26 : guest mapping protection code
7772 ; r27 : host pmap physical address
7773 ; r28 : guest pmap physical address
7774 ; r29 : VMM extension block's physical address
7775 ; r30 : guest virtual address
7776 ; r31 : gva->phys mapping's physical address
7779 .globl EXT(hw_protect_gv)
7783 #define gcpStackSize ((31-24+1)*4)+4
7785 stwu r1,-(FM_ALIGN(gcpStackSize)+FM_SIZE)(r1)
7786 ; Mint a new stack frame
7787 mflr r0 ; Get caller's return address
7788 mfsprg r11,2 ; Get feature flags
7789 mtcrf 0x02,r11 ; Insert feature flags into cr6
7790 stw r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7791 ; Save caller's return address
7792 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7793 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7794 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7795 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7796 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7797 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7798 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7800 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7801 mr r26,r6 ; Copy guest mapping protection code
7803 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7804 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7805 bt++ pf64Bitb,gcp64Salt ; Handle 64-bit machine separately
7806 lwz r29,pmapVmmExtPhys+4(r3) ; r29 <- VMM pmap extension block paddr
7807 lwz r27,vmxHostPmapPhys+4(r11) ; r27 <- host pmap paddr
7808 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7809 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7810 srwi r11,r30,12 ; Form shadow hash:
7811 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7812 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7813 ; Form index offset from hash page number
7814 add r31,r31,r10 ; r31 <- hash page index entry
7815 lwz r31,4(r31) ; r31 <- hash page paddr
7816 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7817 ; r31 <- hash group paddr
7818 b gcpStart ; Get to it
7820 gcp64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7821 ld r29,pmapVmmExtPhys(r3) ; r29 <- VMM pmap extension block paddr
7822 ld r27,vmxHostPmapPhys(r11) ; r27 <- host pmap paddr
7823 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7824 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7825 srwi r11,r30,12 ; Form shadow hash:
7826 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7827 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7828 ; Form index offset from hash page number
7829 add r31,r31,r10 ; r31 <- hash page index entry
7830 ld r31,0(r31) ; r31 <- hash page paddr
7831 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7832 ; r31 <- hash group paddr
7834 gcpStart: xor r28,r4,r28 ; Convert guest pmap_t virt->real
7835 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7836 mr r25,r11 ; Save caller's msr image
7838 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7839 bl sxlkExclusive ; Get lock exclusive
7841 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7842 mtctr r0 ; in this group
7843 bt++ pf64Bitb,gcp64Search ; Test for 64-bit machine
7845 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7846 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7847 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7848 b gcp32SrchLp ; Let the search begin!
7852 mr r6,r3 ; r6 <- current mapping slot's flags
7853 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7854 mr r7,r4 ; r7 <- current mapping slot's space ID
7855 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7856 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7857 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7858 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7859 xor r7,r7,r9 ; Compare space ID
7860 or r0,r11,r7 ; r0 <- free || dormant || !space match
7861 xor r8,r8,r30 ; Compare virtual address
7862 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7863 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7865 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7866 bdnz gcp32SrchLp ; Iterate
7868 mr r6,r3 ; r6 <- current mapping slot's flags
7869 clrrwi r5,r5,12 ; Remove flags from virtual address
7870 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7871 xor r4,r4,r9 ; Compare space ID
7872 or r0,r11,r4 ; r0 <- free || dormant || !space match
7873 xor r5,r5,r30 ; Compare virtual address
7874 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7875 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7876 b gcpSrchMiss ; No joy in our hash group
7879 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7880 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7881 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7882 b gcp64SrchLp ; Let the search begin!
7886 mr r6,r3 ; r6 <- current mapping slot's flags
7887 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7888 mr r7,r4 ; r7 <- current mapping slot's space ID
7889 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7890 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7891 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7892 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7893 xor r7,r7,r9 ; Compare space ID
7894 or r0,r11,r7 ; r0 <- free || dormant || !space match
7895 xor r8,r8,r30 ; Compare virtual address
7896 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7897 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7899 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7900 bdnz gcp64SrchLp ; Iterate
7902 mr r6,r3 ; r6 <- current mapping slot's flags
7903 clrrdi r5,r5,12 ; Remove flags from virtual address
7904 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7905 xor r4,r4,r9 ; Compare space ID
7906 or r0,r11,r4 ; r0 <- free || dormant || !space match
7907 xor r5,r5,r30 ; Compare virtual address
7908 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7909 bne gcpSrchMiss ; No joy in our hash group
7912 bt++ pf64Bitb,gcpDscon64 ; Handle 64-bit disconnect separately
7913 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
7914 ; r31 <- mapping's physical address
7915 ; r3 -> PTE slot physical address
7916 ; r4 -> High-order 32 bits of PTE
7917 ; r5 -> Low-order 32 bits of PTE
7919 ; r7 -> PCA physical address
7920 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7921 b gcpFreePTE ; Join 64-bit path to release the PTE
7922 gcpDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7923 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7924 gcpFreePTE: mr. r3,r3 ; Was there a valid PTE?
7925 beq- gcpSetKey ; No valid PTE, we're almost done
7926 lis r0,0x8000 ; Prepare free bit for this slot
7927 srw r0,r0,r2 ; Position free bit
7928 or r6,r6,r0 ; Set it in our PCA image
7929 lwz r8,mpPte(r31) ; Get PTE pointer
7930 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7931 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7932 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7933 stw r6,0(r7) ; Update PCA and unlock the PTEG
7935 gcpSetKey: lwz r0,mpVAddr+4(r31) ; Get va word containing protection bits
7936 rlwimi r0,r26,0,mpPP ; Insert new protection bits
7937 stw r0,mpVAddr+4(r31) ; Write 'em back
7938 eieio ; Ensure previous mapping updates are visible
7939 li r31,mapRtOK ; I'm a success
7941 gcpRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7942 bl sxlkUnlock ; Release host pmap search lock
7944 mr r3,r31 ; r3 <- result code
7945 bt++ pf64Bitb,gcpRtn64 ; Handle 64-bit separately
7946 mtmsr r25 ; Restore 'rupts, translation
7947 isync ; Throw a small wrench into the pipeline
7948 b gcpPopFrame ; Nothing to do now but pop a frame and return
7949 gcpRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
7951 lwz r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7952 ; Get caller's return address
7953 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7954 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7955 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7956 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7957 mtlr r0 ; Prepare return address
7958 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7959 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7960 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7961 lwz r1,0(r1) ; Pop stack frame
7962 blr ; Return to caller
7966 li r31,mapRtNotFnd ; Could not locate requested mapping
7967 b gcpRelPmap ; Exit through host pmap search lock release
7971 ; Find the physent based on a physical page and try to lock it (but not too hard)
7972 ; Note that this table always has an entry that with a 0 table pointer at the end
7974 ; R3 contains ppnum on entry
7975 ; R3 is 0 if no entry was found
7976 ; R3 is physent if found
7977 ; cr0_eq is true if lock was obtained or there was no entry to lock
7978 ; cr0_eq is false of there was an entry and it was locked
7984 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7985 mr r2,r3 ; Save our target
7986 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7988 mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address
7989 lwz r5,mrStart(r9) ; Get start of table entry
7990 lwz r0,mrEnd(r9) ; Get end of table entry
7991 addi r9,r9,mrSize ; Point to the next slot
7992 cmplwi cr2,r3,0 ; Are we at the end of the table?
7993 cmplw r2,r5 ; See if we are in this table
7994 cmplw cr1,r2,r0 ; Check end also
7995 sub r4,r2,r5 ; Calculate index to physical entry
7996 beq-- cr2,mapFindNo ; Leave if we did not find an entry...
7997 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
7998 slwi r4,r4,3 ; Get offset to physical entry
8000 blt-- mapFindPhz ; Did not find it...
8002 add r3,r3,r4 ; Point right to the slot
8004 mapFindOv: lwz r2,0(r3) ; Get the lock contents right now
8005 rlwinm. r0,r2,0,0,0 ; Is it locked?
8006 bnelr-- ; Yes it is...
8008 lwarx r2,0,r3 ; Get the lock
8009 rlwinm. r0,r2,0,0,0 ; Is it locked?
8010 oris r0,r2,0x8000 ; Set the lock bit
8011 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
8012 stwcx. r0,0,r3 ; Try to stuff it back...
8013 bne-- mapFindOv ; Collision, try again...
8014 isync ; Clear any speculations
8017 mapFindKl: li r2,lgKillResv ; Killing field
8018 stwcx. r2,0,r2 ; Trash reservation...
8019 crclr cr0_eq ; Make sure we do not think we got the lock
8022 mapFindNo: crset cr0_eq ; Make sure that we set this
8023 li r3,0 ; Show that we did not find it
8026 ; pmapCacheLookup - This function will look up an entry in the pmap segment cache.
8028 ; How the pmap cache lookup works:
8030 ; We use a combination of three things: a mask of valid entries, a sub-tag, and the
8031 ; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
8032 ; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits
8033 ; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
8034 ; entry contains the full 36 bit ESID.
8036 ; The purpose of the sub-tag is to limit the number of searches necessary when looking
8037 ; for an existing cache entry. Because there are 16 slots in the cache, we could end up
8038 ; searching all 16 if an match is not found.
8040 ; Essentially, we will search only the slots that have a valid entry and whose sub-tag
8041 ; matches. More than likely, we will eliminate almost all of the searches.
8045 ; R4 = ESID high half
8046 ; R5 = ESID low half
8049 ; R3 = pmap cache slot if found, 0 if not
8050 ; R10 = pmapCCtl address
8051 ; R11 = pmapCCtl image
8052 ; pmapCCtl locked on exit
8058 la r10,pmapCCtl(r3) ; Point to the segment cache control
8061 lwarx r11,0,r10 ; Get the segment cache control value
8062 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8063 ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit
8064 bne-- pmapCacheLookur ; Nope...
8065 stwcx. r0,0,r10 ; Try to take the lock
8066 bne-- pmapCacheLookuq ; Someone else just stuffed it, try again...
8068 isync ; Make sure we get reservation first
8069 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
8070 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
8071 lwz r10,pmapSCSubTag+4(r3) ; And the bottom half
8072 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
8073 lis r8,0x8888 ; Get some eights
8074 rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right
8075 ori r8,r8,0x8888 ; Fill the rest with eights
8077 eqv r10,r10,r5 ; Get 0xF where we hit in bottom half
8078 eqv r9,r9,r5 ; Get 0xF where we hit in top half
8080 rlwinm r2,r10,1,0,30 ; Shift over 1
8081 rlwinm r0,r9,1,0,30 ; Shift over 1
8082 and r2,r2,r10 ; AND the even/odd pair into the even
8083 and r0,r0,r9 ; AND the even/odd pair into the even
8084 rlwinm r10,r2,2,0,28 ; Shift over 2
8085 rlwinm r9,r0,2,0,28 ; Shift over 2
8086 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8087 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8089 and r10,r10,r8 ; Clear out extras
8090 and r9,r9,r8 ; Clear out extras
8092 rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other
8093 rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other
8094 or r10,r0,r10 ; Merge them
8095 or r9,r2,r9 ; Merge them
8096 rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other
8097 rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other
8098 or r10,r0,r10 ; Merge them
8099 or r9,r2,r9 ; Merge them
8100 rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad
8101 rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad
8102 not r6,r11 ; Turn invalid into valid
8103 rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask
8105 la r10,pmapSegCache(r3) ; Point at the cache slots
8106 and. r6,r9,r6 ; Get mask of valid and hit
8108 li r3,0 ; Assume not found
8109 oris r0,r0,0x8000 ; Start a mask
8110 beqlr++ ; Leave, should usually be no hits...
8112 pclNextEnt: cntlzw r5,r6 ; Find an in use one
8113 cmplwi cr1,r5,pmapSegCacheUse ; Did we find one?
8114 rlwinm r7,r5,4,0,27 ; Index to the cache entry
8115 srw r2,r0,r5 ; Get validity mask bit
8116 add r7,r7,r10 ; Point to the cache slot
8117 andc r6,r6,r2 ; Clear the validity bit we just tried
8118 bgelr-- cr1 ; Leave if there are no more to check...
8120 lwz r5,sgcESID(r7) ; Get the top half
8122 cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half
8124 bne++ pclNextEnt ; Nope, try again...
8126 mr r3,r7 ; Point to the slot
8132 li r11,lgKillResv ; The killing spot
8133 stwcx. r11,0,r11 ; Kill the reservation
8136 lwz r11,pmapCCtl(r3) ; Get the segment cache control
8137 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8138 beq++ pmapCacheLookup ; Nope...
8139 b pmapCacheLookus ; Yup, keep waiting...
8143 ; mapMergeRC -- Given a physical mapping address in R31, locate its
8144 ; connected PTE (if any) and merge the PTE referenced and changed bits
8145 ; into the mapping and physent.
8151 lwz r0,mpPte(r31) ; Grab the PTE offset
8152 mfsdr1 r7 ; Get the pointer to the hash table
8153 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8154 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8155 andi. r3,r0,mpHValid ; Is there a possible PTE?
8156 srwi r7,r0,4 ; Convert to PCA units
8157 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8158 mflr r2 ; Save the return
8159 subfic r7,r7,-4 ; Convert to -4 based negative index
8160 add r7,r10,r7 ; Point to the PCA directly
8161 beqlr-- ; There was no PTE to start with...
8163 bl mapLockPteg ; Lock the PTEG
8165 lwz r0,mpPte(r31) ; Grab the PTE offset
8166 mtlr r2 ; Restore the LR
8167 andi. r3,r0,mpHValid ; Is there a possible PTE?
8168 beq- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8170 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8171 add r3,r3,r10 ; Point to actual PTE
8172 lwz r5,4(r3) ; Get the real part of the PTE
8173 srwi r10,r5,12 ; Change physical address to a ppnum
8175 mMNmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8176 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8177 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8178 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8179 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8180 add r11,r11,r8 ; Point to the bank table
8181 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8182 lwz r11,mrStart(r11) ; Get the start of bank
8183 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8184 addi r2,r2,4 ; Offset to last half of field
8185 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8186 sub r11,r10,r11 ; Get the index into the table
8187 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8189 mMmrgRC: lwarx r10,r11,r2 ; Get the master RC
8190 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8191 or r0,r0,r10 ; Merge in the new RC
8192 stwcx. r0,r11,r2 ; Try to stick it back
8193 bne-- mMmrgRC ; Try again if we collided...
8194 eieio ; Commit all updates
8197 stw r6,0(r7) ; Unlock PTEG
8201 ; 64-bit version of mapMergeRC
8206 lwz r0,mpPte(r31) ; Grab the PTE offset
8207 ld r5,mpVAddr(r31) ; Grab the virtual address
8208 mfsdr1 r7 ; Get the pointer to the hash table
8209 rldicr r10,r7,0,45 ; Clean up the hash table base
8210 andi. r3,r0,mpHValid ; Is there a possible PTE?
8211 srdi r7,r0,5 ; Convert to PCA units
8212 rldicr r7,r7,0,61 ; Clean up PCA
8213 subfic r7,r7,-4 ; Convert to -4 based negative index
8214 mflr r2 ; Save the return
8215 add r7,r10,r7 ; Point to the PCA directly
8216 beqlr-- ; There was no PTE to start with...
8218 bl mapLockPteg ; Lock the PTEG
8220 lwz r0,mpPte(r31) ; Grab the PTE offset again
8221 mtlr r2 ; Restore the LR
8222 andi. r3,r0,mpHValid ; Is there a possible PTE?
8223 beq-- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8225 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8226 add r3,r3,r10 ; Point to the actual PTE
8227 ld r5,8(r3) ; Get the real part
8228 srdi r10,r5,12 ; Change physical address to a ppnum
8229 b mMNmerge ; Join the common 32-64-bit code...
8233 ; This routine, given a mapping, will find and lock the PTEG
8234 ; If mpPte does not point to a PTE (checked before and after lock), it will unlock the
8235 ; PTEG and return. In this case we will have undefined in R4
8236 ; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
8238 ; If the mapping is still valid, we will invalidate the PTE and merge
8239 ; the RC bits into the physent and also save them into the mapping.
8241 ; We then return with R3 pointing to the PTE slot, R4 is the
8242 ; top of the PTE and R5 is the bottom. R6 contains the PCA.
8243 ; R7 points to the PCA entry.
8245 ; Note that we should NEVER be called on a block or special mapping.
8246 ; We could do many bad things.
8252 lwz r0,mpPte(r31) ; Grab the PTE offset
8253 mfsdr1 r7 ; Get the pointer to the hash table
8254 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8255 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8256 andi. r3,r0,mpHValid ; Is there a possible PTE?
8257 srwi r7,r0,4 ; Convert to PCA units
8258 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8259 mflr r2 ; Save the return
8260 subfic r7,r7,-4 ; Convert to -4 based negative index
8261 add r7,r10,r7 ; Point to the PCA directly
8262 beqlr-- ; There was no PTE to start with...
8264 bl mapLockPteg ; Lock the PTEG
8266 lwz r0,mpPte(r31) ; Grab the PTE offset
8267 mtlr r2 ; Restore the LR
8268 andi. r3,r0,mpHValid ; Is there a possible PTE?
8269 beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8271 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8272 add r3,r3,r10 ; Point to actual PTE
8273 lwz r4,0(r3) ; Get the top of the PTE
8275 li r8,tlbieLock ; Get the TLBIE lock
8276 rlwinm r0,r4,0,1,31 ; Clear the valid bit
8277 stw r0,0(r3) ; Invalidate the PTE
8279 sync ; Make sure everyone sees the invalidate
8281 mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock
8282 mfsprg r2,2 ; Get feature flags
8283 mr. r0,r0 ; Is it locked?
8284 li r0,1 ; Get our lock word
8285 bne- mITLBIE32 ; It is locked, go wait...
8287 stwcx. r0,0,r8 ; Try to get it
8288 bne- mITLBIE32 ; We was beat...
8290 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
8291 li r0,0 ; Lock clear value
8293 tlbie r5 ; Invalidate it everywhere
8295 beq- mINoTS32 ; Can not have MP on this machine...
8297 eieio ; Make sure that the tlbie happens first
8298 tlbsync ; Wait for everyone to catch up
8299 sync ; Make sure of it all
8301 mINoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
8302 lwz r5,4(r3) ; Get the real part
8303 srwi r10,r5,12 ; Change physical address to a ppnum
8305 mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8306 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8307 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8308 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8309 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8310 add r11,r11,r8 ; Point to the bank table
8311 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8312 lwz r11,mrStart(r11) ; Get the start of bank
8313 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8314 addi r2,r2,4 ; Offset to last half of field
8315 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8316 sub r11,r10,r11 ; Get the index into the table
8317 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8320 mImrgRC: lwarx r10,r11,r2 ; Get the master RC
8321 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8322 or r0,r0,r10 ; Merge in the new RC
8323 stwcx. r0,r11,r2 ; Try to stick it back
8324 bne-- mImrgRC ; Try again if we collided...
8326 blr ; Leave with the PCA still locked up...
8328 mIPUnlock: eieio ; Make sure all updates come first
8330 stw r6,0(r7) ; Unlock
8339 lwz r0,mpPte(r31) ; Grab the PTE offset
8340 ld r5,mpVAddr(r31) ; Grab the virtual address
8341 mfsdr1 r7 ; Get the pointer to the hash table
8342 rldicr r10,r7,0,45 ; Clean up the hash table base
8343 andi. r3,r0,mpHValid ; Is there a possible PTE?
8344 srdi r7,r0,5 ; Convert to PCA units
8345 rldicr r7,r7,0,61 ; Clean up PCA
8346 subfic r7,r7,-4 ; Convert to -4 based negative index
8347 mflr r2 ; Save the return
8348 add r7,r10,r7 ; Point to the PCA directly
8349 beqlr-- ; There was no PTE to start with...
8351 bl mapLockPteg ; Lock the PTEG
8353 lwz r0,mpPte(r31) ; Grab the PTE offset again
8354 mtlr r2 ; Restore the LR
8355 andi. r3,r0,mpHValid ; Is there a possible PTE?
8356 beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8358 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8359 add r3,r3,r10 ; Point to the actual PTE
8360 ld r4,0(r3) ; Get the top of the PTE
8362 li r8,tlbieLock ; Get the TLBIE lock
8363 rldicr r0,r4,0,62 ; Clear the valid bit
8364 std r0,0(r3) ; Invalidate the PTE
8366 rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN
8367 sync ; Make sure everyone sees the invalidate
8368 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
8370 mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock
8371 mr. r0,r0 ; Is it locked?
8372 li r0,1 ; Get our lock word
8373 bne-- mITLBIE64a ; It is locked, toss reservation and wait...
8375 stwcx. r0,0,r8 ; Try to get it
8376 bne-- mITLBIE64 ; We was beat...
8378 rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders
8380 li r0,0 ; Lock clear value
8382 tlbie r2 ; Invalidate it everywhere
8384 eieio ; Make sure that the tlbie happens first
8385 tlbsync ; Wait for everyone to catch up
8386 ptesync ; Wait for quiet again
8388 stw r0,tlbieLock(0) ; Clear the tlbie lock
8390 ld r5,8(r3) ; Get the real part
8391 srdi r10,r5,12 ; Change physical address to a ppnum
8392 b mINmerge ; Join the common 32-64-bit code...
8394 mITLBIE64a: li r5,lgKillResv ; Killing field
8395 stwcx. r5,0,r5 ; Kill reservation
8397 mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock
8398 mr. r0,r0 ; Is it locked?
8399 beq++ mITLBIE64 ; Nope, try again...
8400 b mITLBIE64b ; Yup, wait for it...
8403 ; mapLockPteg - Locks a PTEG
8404 ; R7 points to PCA entry
8405 ; R6 contains PCA on return
8412 lwarx r6,0,r7 ; Pick up the PCA
8413 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8414 ori r0,r6,PCAlock ; Set the lock bit
8415 bne-- mLSkill ; It is locked...
8417 stwcx. r0,0,r7 ; Try to lock the PTEG
8418 bne-- mapLockPteg ; We collided...
8420 isync ; Nostradamus lied
8423 mLSkill: li r6,lgKillResv ; Get killing field
8424 stwcx. r6,0,r6 ; Kill it
8427 lwz r6,0(r7) ; Pick up the PCA
8428 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8429 beq++ mapLockPteg ; Nope, try again...
8430 b mapLockPteh ; Yes, wait for it...
8434 ; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6
8435 ; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was
8436 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
8437 ; R4 returns the slot index.
8439 ; CR7 also indicates that we have a block mapping
8441 ; The PTEG allocation controls are a bit map of the state of the PTEG.
8442 ; PCAfree indicates that the PTE slot is empty.
8443 ; PCAauto means that it comes from an autogen area. These
8444 ; guys do not keep track of reference and change and are actually "wired".
8445 ; They are easy to maintain. PCAsteal
8446 ; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
8447 ; fields fit in a single word and are loaded and stored under control of the
8448 ; PTEG control area lock (PCAlock).
8450 ; Note that PCAauto does not contribute to the steal calculations at all. Originally
8451 ; it did, autogens were second in priority. This can result in a pathalogical
8452 ; case where an instruction can not make forward progress, or one PTE slot
8455 ; Note that the PCA must be locked when we get here.
8457 ; Physically, the fields are arranged:
8464 ; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched)
8469 ; R3 = 1 - steal regular
8470 ; R3 = 2 - steal autogen
8471 ; R4 contains slot number
8472 ; R6 contains updated PCA image
8477 mapSelSlot: lis r10,0 ; Clear autogen mask
8478 li r9,0 ; Start a mask
8479 beq cr7,mSSnotblk ; Skip if this is not a block mapping
8480 ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen)
8482 mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask
8483 oris r9,r9,0x8000 ; Get a mask
8484 cntlzw r4,r6 ; Find a slot or steal one
8485 ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000
8486 rlwinm r4,r4,0,29,31 ; Isolate bit position
8487 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
8488 srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags
8489 srwi r11,r11,1 ; Slide steal mask right
8490 and r8,r6,r2 ; Isolate the old in use and autogen bits
8491 andc r6,r6,r2 ; Allocate the slot and also clear autogen flag
8492 addi r0,r8,0x7F00 ; Push autogen flag to bit 16
8493 and r2,r2,r10 ; Keep the autogen part if autogen
8494 addis r8,r8,0xFF00 ; Push in use to bit 0 and invert
8495 or r6,r6,r2 ; Add in the new autogen bit
8496 rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use)
8497 rlwinm r8,r8,1,31,31 ; Isolate old in use
8498 rlwimi r6,r11,16,8,15 ; Stick the new steal slot in
8500 add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen
8504 ; Shared/Exclusive locks
8506 ; A shared/exclusive lock allows multiple shares of a lock to be taken
8507 ; but only one exclusive. A shared lock can be "promoted" to exclusive
8508 ; when it is the only share. If there are multiple sharers, the lock
8509 ; must be "converted". A promotion drops the share and gains exclusive as
8510 ; an atomic operation. If anyone else has a share, the operation fails.
8511 ; A conversion first drops the share and then takes an exclusive lock.
8513 ; We will want to add a timeout to this eventually.
8515 ; R3 is set to 0 for success, non-zero for failure
8519 ; Convert a share into an exclusive
8526 lis r0,0x8000 ; Get the locked lock image
8528 mflr r0 ; (TEST/DEBUG)
8529 oris r0,r0,0x8000 ; (TEST/DEBUG)
8532 sxlkCTry: lwarx r2,0,r3 ; Get the lock word
8533 cmplwi r2,1 ; Does it just have our share?
8534 subi r2,r2,1 ; Drop our share in case we do not get it
8535 bne-- sxlkCnotfree ; No, we need to unlock...
8536 stwcx. r0,0,r3 ; Try to take it exclusively
8537 bne-- sxlkCTry ; Collision, try again...
8544 stwcx. r2,0,r3 ; Try to drop our share...
8545 bne-- sxlkCTry ; Try again if we collided...
8546 b sxlkExclusive ; Go take it exclusively...
8549 ; Promote shared to exclusive
8555 lis r0,0x8000 ; Get the locked lock image
8557 mflr r0 ; (TEST/DEBUG)
8558 oris r0,r0,0x8000 ; (TEST/DEBUG)
8561 sxlkPTry: lwarx r2,0,r3 ; Get the lock word
8562 cmplwi r2,1 ; Does it just have our share?
8563 bne-- sxlkPkill ; No, just fail (R3 is non-zero)...
8564 stwcx. r0,0,r3 ; Try to take it exclusively
8565 bne-- sxlkPTry ; Collision, try again...
8571 sxlkPkill: li r2,lgKillResv ; Point to killing field
8572 stwcx. r2,0,r2 ; Kill reservation
8578 ; Take lock exclusivily
8584 lis r0,0x8000 ; Get the locked lock image
8586 mflr r0 ; (TEST/DEBUG)
8587 oris r0,r0,0x8000 ; (TEST/DEBUG)
8590 sxlkXTry: lwarx r2,0,r3 ; Get the lock word
8591 mr. r2,r2 ; Is it locked?
8592 bne-- sxlkXWait ; Yes...
8593 stwcx. r0,0,r3 ; Try to take it
8594 bne-- sxlkXTry ; Collision, try again...
8596 isync ; Toss anything younger than us
8602 sxlkXWait: li r2,lgKillResv ; Point to killing field
8603 stwcx. r2,0,r2 ; Kill reservation
8605 sxlkXWaiu: lwz r2,0(r3) ; Get the lock again
8606 mr. r2,r2 ; Is it free yet?
8607 beq++ sxlkXTry ; Yup...
8608 b sxlkXWaiu ; Hang around a bit more...
8611 ; Take a share of the lock
8616 sxlkShared: lwarx r2,0,r3 ; Get the lock word
8617 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8618 addi r2,r2,1 ; Up the share count
8619 bne-- sxlkSWait ; Yes...
8620 stwcx. r2,0,r3 ; Try to take it
8621 bne-- sxlkShared ; Collision, try again...
8623 isync ; Toss anything younger than us
8629 sxlkSWait: li r2,lgKillResv ; Point to killing field
8630 stwcx. r2,0,r2 ; Kill reservation
8632 sxlkSWaiu: lwz r2,0(r3) ; Get the lock again
8633 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8634 beq++ sxlkShared ; Nope...
8635 b sxlkSWaiu ; Hang around a bit more...
8638 ; Unlock either exclusive or shared.
8643 sxlkUnlock: eieio ; Make sure we order our stores out
8645 sxlkUnTry: lwarx r2,0,r3 ; Get the lock
8646 rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively?
8647 subi r2,r2,1 ; Remove our share if we have one
8648 li r0,0 ; Clear this
8649 bne-- sxlkUExclu ; We hold exclusive...
8651 stwcx. r2,0,r3 ; Try to lose our share
8652 bne-- sxlkUnTry ; Collision...
8655 sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation
8656 beqlr++ ; Leave if ok...
8657 b sxlkUnTry ; Could not store, try over...
8661 .globl EXT(fillPage)
8665 mfsprg r0,2 ; Get feature flags
8666 mtcrf 0x02,r0 ; move pf64Bit to cr
8668 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
8669 lis r2,0x0200 ; Get vec
8671 ori r2,r2,0x2000 ; Get FP
8675 andc r5,r5,r2 ; Clear out permanent turn-offs
8677 ori r2,r2,0x8030 ; Clear IR, DR and EE
8679 andc r0,r5,r2 ; Kill them
8682 bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint)
8684 slwi r3,r3,12 ; Make into a physical address
8685 mtmsr r2 ; Interrupts and translation off
8688 li r2,4096/32 ; Get number of cache lines
8690 fp32again: dcbz 0,r3 ; Clear
8691 addic. r2,r2,-1 ; Count down
8695 stw r8,12(r3) ; Fill
8696 stw r9,16(r3) ; Fill
8697 stw r10,20(r3) ; Fill
8698 stw r11,24(r3) ; Fill
8699 stw r12,28(r3) ; Fill
8700 addi r3,r3,32 ; Point next
8701 bgt+ fp32again ; Keep going
8703 mtmsr r5 ; Restore all
8710 sldi r2,r2,63 ; Get 64-bit bit
8711 or r0,r0,r2 ; Turn on 64-bit
8712 sldi r3,r3,12 ; Make into a physical address
8714 mtmsrd r0 ; Interrupts and translation off
8717 li r2,4096/128 ; Get number of cache lines
8719 fp64again: dcbz128 0,r3 ; Clear
8720 addic. r2,r2,-1 ; Count down
8723 std r7,16(r3) ; Fill
8724 std r8,24(r3) ; Fill
8725 std r9,32(r3) ; Fill
8726 std r10,40(r3) ; Fill
8727 std r11,48(r3) ; Fill
8728 std r12,56(r3) ; Fill
8729 std r4,64+0(r3) ; Fill
8730 std r6,64+8(r3) ; Fill
8731 std r7,64+16(r3) ; Fill
8732 std r8,64+24(r3) ; Fill
8733 std r9,64+32(r3) ; Fill
8734 std r10,64+40(r3) ; Fill
8735 std r11,64+48(r3) ; Fill
8736 std r12,64+56(r3) ; Fill
8737 addi r3,r3,128 ; Point next
8738 bgt+ fp64again ; Keep going
8740 mtmsrd r5 ; Restore all
8750 lis r11,hi16(EXT(mapdebug))
8751 ori r11,r11,lo16(EXT(mapdebug))
8756 mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1
8771 .globl EXT(checkBogus)
8776 blr ; No-op normally