2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <db_machine_commands.h>
27 #include <mach_debug.h>
29 #include <ppc/proc_reg.h>
30 #include <ppc/exception.h>
31 #include <ppc/Performance.h>
32 #include <ppc/exception.h>
33 #include <mach/ppc/vm_param.h>
40 ; +--------+--------+--------+--------+--------+--------+--------+--------+
41 ; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
42 ; +--------+--------+--------+--------+--------+--------+--------+--------+
46 ; +--------+--------+--------+
47 ; |//////BB|BBBBBBBB|BBBB////| - SID - base
48 ; +--------+--------+--------+
52 ; +--------+--------+--------+
53 ; |////////|11111111|111111//| - SID - copy 1
54 ; +--------+--------+--------+
58 ; +--------+--------+--------+
59 ; |////////|//222222|22222222| - SID - copy 2
60 ; +--------+--------+--------+
64 ; +--------+--------+--------+
65 ; |//////33|33333333|33//////| - SID - copy 3 - not needed
66 ; +--------+--------+--------+ for 65 bit VPN
70 ; +--------+--------+--------+--------+--------+--------+--------+
71 ; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
72 ; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
75 ; +--------+--------+--------+--------+--------+--------+--------+
76 ; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
77 ; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
84 ; +--------+--------+--------+--------+--------+--------+--------+
85 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
86 ; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
88 ; 0 0 1 2 3 4 4 5 6 7 7
89 ; 0 8 6 4 2 0 8 6 4 2 9
90 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
91 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
92 ; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
96 /* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
98 * Maps a page or block into a pmap
100 * Returns 0 if add worked or the vaddr of the first overlap if not
102 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
104 * 1) bump mapping busy count
106 * 3) find mapping full path - finds all possible list previous elements
107 * 4) upgrade pmap to exclusive
108 * 5) add mapping to search list
114 * 11) drop mapping busy count
117 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
119 * 1) bump mapping busy count
121 * 3) find mapping full path - finds all possible list previous elements
122 * 4) upgrade pmap to exclusive
123 * 5) add mapping to search list
125 * 7) drop mapping busy count
130 .globl EXT(hw_add_map)
134 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
135 mflr r0 ; Save the link register
136 stw r17,FM_ARG0+0x00(r1) ; Save a register
137 stw r18,FM_ARG0+0x04(r1) ; Save a register
138 stw r19,FM_ARG0+0x08(r1) ; Save a register
139 mfsprg r19,2 ; Get feature flags
140 stw r20,FM_ARG0+0x0C(r1) ; Save a register
141 stw r21,FM_ARG0+0x10(r1) ; Save a register
142 mtcrf 0x02,r19 ; move pf64Bit cr6
143 stw r22,FM_ARG0+0x14(r1) ; Save a register
144 stw r23,FM_ARG0+0x18(r1) ; Save a register
145 stw r24,FM_ARG0+0x1C(r1) ; Save a register
146 stw r25,FM_ARG0+0x20(r1) ; Save a register
147 stw r26,FM_ARG0+0x24(r1) ; Save a register
148 stw r27,FM_ARG0+0x28(r1) ; Save a register
149 stw r28,FM_ARG0+0x2C(r1) ; Save a register
150 stw r29,FM_ARG0+0x30(r1) ; Save a register
151 stw r30,FM_ARG0+0x34(r1) ; Save a register
152 stw r31,FM_ARG0+0x38(r1) ; Save a register
153 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
156 lwz r11,pmapFlags(r3) ; Get pmaps flags
157 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
158 bne hamPanic ; Call not valid for guest shadow assist pmap
161 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
162 mr r28,r3 ; Save the pmap
163 mr r31,r4 ; Save the mapping
164 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
165 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
166 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
170 hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
171 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
173 hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
175 mr r17,r11 ; Save the MSR
176 xor r28,r28,r20 ; Convert the pmap to physical addressing
177 xor r31,r31,r21 ; Convert the mapping to physical addressing
179 la r3,pmapSXlk(r28) ; Point to the pmap search lock
180 bl sxlkShared ; Go get a shared lock on the mapping lists
181 mr. r3,r3 ; Did we get the lock?
182 lwz r24,mpFlags(r31) ; Pick up the flags
183 bne-- hamBadLock ; Nope...
185 li r21,0 ; Remember that we have the shared lock
188 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
189 ; here so that we will know the previous elements so we can dequeue them
193 hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
194 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
195 mr r3,r28 ; Pass in pmap to search
196 lhz r23,mpBSize(r31) ; Get the block size for later
197 mr r29,r4 ; Save top half of vaddr for later
198 mr r30,r5 ; Save bottom half of vaddr for later
200 bl EXT(mapSearchFull) ; Go see if we can find it
202 li r22,lo16(0x800C) ; Get 0xFFFF800C
203 rlwinm r0,r24,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
204 addi r23,r23,1 ; Get actual length
205 rlwnm r22,r22,r0,27,31 ; Rotate to get 12 or 25
206 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
207 slw r9,r23,r22 ; Isolate the low part
208 rlwnm r22,r23,r22,22,31 ; Extract the high order
209 addic r23,r9,-4096 ; Get the length to the last page
210 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
211 addme r22,r22 ; Do high order as well...
212 mr. r3,r3 ; Did we find a mapping here?
213 or r0,r30,r0 ; Fill high word of 64-bit with 1s so we will properly carry
214 bne-- hamOverlay ; We found a mapping, this is no good, can not double map...
216 addc r9,r0,r23 ; Add size to get last page in new range
217 or. r0,r4,r5 ; Are we beyond the end?
218 adde r8,r29,r22 ; Add the rest of the length on
219 rlwinm r9,r9,0,0,31 ; Clean top half of sum
220 beq++ hamFits ; We are at the end...
222 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
223 cmplw r8,r4 ; Is our end before the next (top part)
224 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
225 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
227 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
230 ; Here we try to convert to an exclusive lock. This will fail if someone else
233 hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
234 la r3,pmapSXlk(r28) ; Point to the pmap search lock
236 bne-- hamGotX ; We already have the exclusive...
238 bl sxlkPromote ; Try to promote shared to exclusive
239 mr. r3,r3 ; Could we?
240 beq++ hamGotX ; Yeah...
243 ; Since we could not promote our lock, we need to convert to it.
244 ; That means that we drop the shared lock and wait to get it
245 ; exclusive. Since we release the lock, we need to do the look up
249 la r3,pmapSXlk(r28) ; Point to the pmap search lock
250 bl sxlkConvert ; Convert shared to exclusive
251 mr. r3,r3 ; Could we?
252 bne-- hamBadLock ; Nope, we must have timed out...
254 li r21,1 ; Remember that we have the exclusive lock
255 b hamRescan ; Go look again...
259 hamGotX: mr r3,r28 ; Get the pmap to insert into
260 mr r4,r31 ; Point to the mapping
261 bl EXT(mapInsert) ; Insert the mapping into the list
263 rlwinm r11,r24,mpPcfgb+2,mpPcfg>>6 ; Get the index into the page config table
264 lhz r8,mpSpace(r31) ; Get the address space
265 lwz r11,lgpPcfg(r11) ; Get the page config
266 mfsdr1 r7 ; Get the hash table base/bounds
267 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
269 andi. r0,r24,mpType ; Is this a normal mapping?
271 rlwimi r8,r8,14,4,17 ; Double address space
272 rlwinm r9,r30,0,4,31 ; Clear segment
273 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
274 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
275 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
276 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
277 addi r4,r4,1 ; Bump up the mapped page count
278 srw r9,r9,r11 ; Isolate just the page index
279 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
280 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
281 xor r9,r9,r10 ; Get the hash to the PTEG
283 bne-- hamDoneNP ; Not a normal mapping, therefore, no physent...
285 bl mapPhysFindLock ; Go find and lock the physent
287 bt++ pf64Bitb,ham64 ; This is 64-bit...
289 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
290 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
291 slwi r9,r9,6 ; Make PTEG offset
292 ori r7,r7,0xFFC0 ; Stick in the bottom part
293 rlwinm r12,r11,0,~ppFlags ; Clean it up
294 and r9,r9,r7 ; Wrap offset into table
295 mr r4,r31 ; Set the link to install
296 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
297 stw r12,mpAlias+4(r31) ; Move to the mapping
298 bl mapPhyCSet32 ; Install the link
299 b hamDone ; Go finish up...
303 ham64: li r0,ppLFAmask ; Get mask to clean up alias pointer
304 subfic r7,r7,46 ; Get number of leading zeros
305 eqv r4,r4,r4 ; Get all ones
306 ld r11,ppLink(r3) ; Get the alias chain pointer
307 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
308 srd r4,r4,r7 ; Get the wrap mask
309 sldi r9,r9,7 ; Change hash to PTEG offset
310 andc r11,r11,r0 ; Clean out the lock and flags
311 and r9,r9,r4 ; Wrap to PTEG
313 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
314 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
316 bl mapPhyCSet64 ; Install the link
318 hamDone: bl mapPhysUnlock ; Unlock the physent chain
320 hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
321 bl sxlkUnlock ; Unlock the search list
323 mr r3,r31 ; Get the mapping pointer
324 bl mapDropBusy ; Drop the busy count
326 li r3,0 ; Set successful return
327 li r4,0 ; Set successful return
329 hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
331 mtmsr r17 ; Restore enables/translation/etc.
333 b hamReturnC ; Join common...
335 hamR64: mtmsrd r17 ; Restore enables/translation/etc.
338 hamReturnC: lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
339 lwz r17,FM_ARG0+0x00(r1) ; Save a register
340 lwz r18,FM_ARG0+0x04(r1) ; Save a register
341 lwz r19,FM_ARG0+0x08(r1) ; Save a register
342 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
343 mtlr r0 ; Restore the return
344 lwz r21,FM_ARG0+0x10(r1) ; Save a register
345 lwz r22,FM_ARG0+0x14(r1) ; Save a register
346 lwz r23,FM_ARG0+0x18(r1) ; Save a register
347 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
348 lwz r25,FM_ARG0+0x20(r1) ; Save a register
349 lwz r26,FM_ARG0+0x24(r1) ; Save a register
350 lwz r27,FM_ARG0+0x28(r1) ; Save a register
351 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
352 lwz r29,FM_ARG0+0x30(r1) ; Save a register
353 lwz r30,FM_ARG0+0x34(r1) ; Save a register
354 lwz r31,FM_ARG0+0x38(r1) ; Save a register
355 lwz r1,0(r1) ; Pop the stack
362 hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
363 li r0,mpC|mpR ; Get a mask to turn off RC bits
364 lwz r23,mpFlags(r31) ; Get the requested flags
365 lwz r20,mpVAddr(r3) ; Get the overlay address
366 lwz r8,mpVAddr(r31) ; Get the requested address
367 lwz r21,mpVAddr+4(r3) ; Get the overlay address
368 lwz r9,mpVAddr+4(r31) ; Get the requested address
369 lhz r10,mpBSize(r3) ; Get the overlay length
370 lhz r11,mpBSize(r31) ; Get the requested length
371 lwz r24,mpPAddr(r3) ; Get the overlay physical address
372 lwz r25,mpPAddr(r31) ; Get the requested physical address
373 andc r21,r21,r0 ; Clear RC bits
374 andc r9,r9,r0 ; Clear RC bits
376 la r3,pmapSXlk(r28) ; Point to the pmap search lock
377 bl sxlkUnlock ; Unlock the search list
379 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
380 mr r3,r20 ; Save the top of the colliding address
381 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
383 bne++ hamRemv ; Removing, go say so so we help...
385 cmplw r20,r8 ; High part of vaddr the same?
386 cmplw cr1,r21,r9 ; Low part?
387 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
389 cmplw r10,r11 ; Size the same?
390 cmplw cr1,r24,r25 ; Physical address?
391 crand cr5_eq,cr5_eq,cr0_eq ; Remember
392 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
394 xor r23,r23,r22 ; Compare mapping flag words
395 andi. r23,r23,mpType|mpPerm ; Are mapping types and attributes the same?
396 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
397 bf-- cr5_eq,hamSmash ; This is not the same, so we return a smash...
399 ori r4,r4,mapRtMapDup ; Set duplicate
400 b hamReturn ; And leave...
402 hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
403 b hamReturn ; Come back yall...
405 hamSmash: ori r4,r4,mapRtSmash ; Tell caller that it has some clean up to do
406 b hamReturn ; Join common epilog code
410 hamBadLock: li r3,0 ; Set lock time out error code
411 li r4,mapRtBadLk ; Set lock time out error code
412 b hamReturn ; Leave....
414 hamPanic: lis r0,hi16(Choke) ; System abend
415 ori r0,r0,lo16(Choke) ; System abend
416 li r3,failMapping ; Show that we failed some kind of mapping thing
423 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
425 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
426 * a 64-bit quantity, it is a long long so it is in R4 and R5.
428 * We return the virtual address of the removed mapping as a
431 * Note that this is designed to be called from 32-bit mode with a stack.
433 * We disable translation and all interruptions here. This keeps is
434 * from having to worry about a deadlock due to having anything locked
435 * and needing it to process a fault.
437 * Note that this must be done with both interruptions off and VM off
439 * Remove mapping via pmap, regular page, no pte
442 * 2) find mapping full path - finds all possible list previous elements
443 * 4) upgrade pmap to exclusive
444 * 3) bump mapping busy count
445 * 5) remove mapping from search list
448 * 8) remove from physent
450 * 10) drop mapping busy count
451 * 11) drain mapping busy count
454 * Remove mapping via pmap, regular page, with pte
457 * 2) find mapping full path - finds all possible list previous elements
458 * 3) upgrade lock to exclusive
459 * 4) bump mapping busy count
461 * 6) invalidate pte and tlbie
462 * 7) atomic merge rc into physent
464 * 9) remove mapping from search list
467 * 12) remove from physent
469 * 14) drop mapping busy count
470 * 15) drain mapping busy count
473 * Remove mapping via pmap, I/O or block
476 * 2) find mapping full path - finds all possible list previous elements
477 * 3) upgrade lock to exclusive
478 * 4) bump mapping busy count
479 * 5) mark remove-in-progress
480 * 6) check and bump remove chunk cursor if needed
482 * 8) if something to invalidate, go to step 11
485 * 10) return with mapRtRemove to force higher level to call again
488 * 12) invalidate ptes, no tlbie
490 * 14) repeat 11 - 13 for all pages in chunk
491 * 15) if not final chunk, go to step 9
492 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
493 * 17) lock pmap share
494 * 18) find mapping full path - finds all possible list previous elements
495 * 19) upgrade lock to exclusive
496 * 20) remove mapping from search list
497 * 21) drop mapping busy count
498 * 22) drain mapping busy count
503 .globl EXT(hw_rem_map)
508 ; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
509 ; THE HW_PURGE_* ROUTINES ALSO
512 #define hrmStackSize ((31-15+1)*4)+4
513 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
514 mflr r0 ; Save the link register
515 stw r15,FM_ARG0+0x00(r1) ; Save a register
516 stw r16,FM_ARG0+0x04(r1) ; Save a register
517 stw r17,FM_ARG0+0x08(r1) ; Save a register
518 stw r18,FM_ARG0+0x0C(r1) ; Save a register
519 stw r19,FM_ARG0+0x10(r1) ; Save a register
520 mfsprg r19,2 ; Get feature flags
521 stw r20,FM_ARG0+0x14(r1) ; Save a register
522 stw r21,FM_ARG0+0x18(r1) ; Save a register
523 mtcrf 0x02,r19 ; move pf64Bit cr6
524 stw r22,FM_ARG0+0x1C(r1) ; Save a register
525 stw r23,FM_ARG0+0x20(r1) ; Save a register
526 stw r24,FM_ARG0+0x24(r1) ; Save a register
527 stw r25,FM_ARG0+0x28(r1) ; Save a register
528 stw r26,FM_ARG0+0x2C(r1) ; Save a register
529 stw r27,FM_ARG0+0x30(r1) ; Save a register
530 stw r28,FM_ARG0+0x34(r1) ; Save a register
531 stw r29,FM_ARG0+0x38(r1) ; Save a register
532 stw r30,FM_ARG0+0x3C(r1) ; Save a register
533 stw r31,FM_ARG0+0x40(r1) ; Save a register
534 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
535 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
538 lwz r11,pmapFlags(r3) ; Get pmaps flags
539 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
540 bne hrmPanic ; Call not valid for guest shadow assist pmap
543 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
544 lwz r9,pmapvr+4(r3) ; Get conversion mask
547 hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
550 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
552 xor r28,r3,r9 ; Convert the pmap to physical addressing
555 ; Here is where we join in from the hw_purge_* routines
558 hrmJoin: lwz r3,pmapFlags(r28) ; Get pmap's flags
559 mfsprg r19,2 ; Get feature flags again (for alternate entries)
561 mr r17,r11 ; Save the MSR
562 mr r29,r4 ; Top half of vaddr
563 mr r30,r5 ; Bottom half of vaddr
565 rlwinm. r3,r3,0,pmapVMgsaa ; Is guest shadow assist active?
566 bne-- hrmGuest ; Yes, handle specially
568 la r3,pmapSXlk(r28) ; Point to the pmap search lock
569 bl sxlkShared ; Go get a shared lock on the mapping lists
570 mr. r3,r3 ; Did we get the lock?
571 bne-- hrmBadLock ; Nope...
574 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
575 ; here so that we will know the previous elements so we can dequeue them
576 ; later. Note: we get back mpFlags in R7.
579 mr r3,r28 ; Pass in pmap to search
580 mr r4,r29 ; High order of address
581 mr r5,r30 ; Low order of address
582 bl EXT(mapSearchFull) ; Go see if we can find it
584 andi. r0,r7,mpPerm ; Mapping marked permanent?
585 crmove cr5_eq,cr0_eq ; Remember permanent marking
586 mr r20,r7 ; Remember mpFlags
587 mr. r31,r3 ; Did we? (And remember mapping address for later)
588 mr r15,r4 ; Save top of next vaddr
589 mr r16,r5 ; Save bottom of next vaddr
590 beq-- hrmNotFound ; Nope, not found...
592 bf-- cr5_eq,hrmPerm ; This one can't be removed...
594 ; Here we try to promote to an exclusive lock. This will fail if someone else
598 la r3,pmapSXlk(r28) ; Point to the pmap search lock
599 bl sxlkPromote ; Try to promote shared to exclusive
600 mr. r3,r3 ; Could we?
601 beq++ hrmGotX ; Yeah...
604 ; Since we could not promote our lock, we need to convert to it.
605 ; That means that we drop the shared lock and wait to get it
606 ; exclusive. Since we release the lock, we need to do the look up
610 la r3,pmapSXlk(r28) ; Point to the pmap search lock
611 bl sxlkConvert ; Convert shared to exclusive
612 mr. r3,r3 ; Could we?
613 bne-- hrmBadLock ; Nope, we must have timed out...
615 mr r3,r28 ; Pass in pmap to search
616 mr r4,r29 ; High order of address
617 mr r5,r30 ; Low order of address
618 bl EXT(mapSearchFull) ; Rescan the list
620 andi. r0,r7,mpPerm ; Mapping marked permanent?
621 crmove cr5_eq,cr0_eq ; Remember permanent marking
622 mr. r31,r3 ; Did we lose it when we converted?
623 mr r20,r7 ; Remember mpFlags
624 mr r15,r4 ; Save top of next vaddr
625 mr r16,r5 ; Save bottom of next vaddr
626 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
628 bf-- cr5_eq,hrmPerm ; This one can't be removed...
631 ; We have an exclusive lock on the mapping chain. And we
632 ; also have the busy count bumped in the mapping so it can
636 hrmGotX: mr r3,r31 ; Get the mapping
637 bl mapBumpBusy ; Bump up the busy count
640 ; Invalidate any PTEs associated with this
641 ; mapping (more than one if a block) and accumulate the reference
644 ; Here is also where we need to split 32- and 64-bit processing
647 lwz r21,mpPte(r31) ; Grab the offset to the PTE
648 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
649 mfsdr1 r29 ; Get the hash table base and size
651 rlwinm r0,r20,0,mpType ; Isolate mapping type
652 cmplwi cr5,r0,mpBlock ; Remember whether this is a block mapping
653 cmplwi r0,mpMinSpecial ; cr0_lt <- not a special mapping type
655 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
656 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
657 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
658 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
659 crorc cr0_eq,cr1_eq,cr0_lt ; No need to look at PTE if none or a special mapping
660 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
661 andc r29,r29,r2 ; Clean up hash table base
662 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
663 mr r30,r23 ; Move the now merged vaddr to the correct register
664 add r26,r29,r21 ; Point to the PTEG slot
666 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
668 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
669 beq- cr5,hrmBlock32 ; Go treat block specially...
670 subfic r9,r9,-4 ; Get the PCA entry offset
671 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
672 add r7,r9,r29 ; Point to the PCA slot
674 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
676 lwz r21,mpPte(r31) ; Get the quick pointer again
677 lwz r5,0(r26) ; Get the top of PTE
679 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
680 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
681 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
682 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
683 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
685 stw r5,0(r26) ; Invalidate the PTE
687 li r9,tlbieLock ; Get the TLBIE lock
689 sync ; Make sure the invalid PTE is actually in memory
691 hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
692 mr. r5,r5 ; Is it locked?
693 li r5,1 ; Get locked indicator
694 bne- hrmPtlb32 ; It is locked, go spin...
695 stwcx. r5,0,r9 ; Try to get it
696 bne- hrmPtlb32 ; We was beat...
698 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
700 tlbie r30 ; Invalidate it all corresponding TLB entries
702 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
704 eieio ; Make sure that the tlbie happens first
705 tlbsync ; Wait for everyone to catch up
706 sync ; Make sure of it all
708 hrmNTlbs: li r0,0 ; Clear this
709 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
710 stw r0,tlbieLock(0) ; Clear the tlbie lock
711 lis r0,0x8000 ; Get bit for slot 0
712 eieio ; Make sure those RC bit have been stashed in PTE
714 srw r0,r0,r2 ; Get the allocation hash mask
715 lwz r22,4(r26) ; Get the latest reference and change bits
716 or r6,r6,r0 ; Show that this slot is free
719 eieio ; Make sure all updates come first
720 stw r6,0(r7) ; Unlock the PTEG
723 ; Now, it is time to remove the mapping and unlock the chain.
724 ; But first, we need to make sure no one else is using this
725 ; mapping so we drain the busy now
728 hrmPysDQ32: mr r3,r31 ; Point to the mapping
729 bl mapDrainBusy ; Go wait until mapping is unused
731 mr r3,r28 ; Get the pmap to remove from
732 mr r4,r31 ; Point to the mapping
733 bl EXT(mapRemove) ; Remove the mapping from the list
735 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
736 rlwinm r0,r20,0,mpType ; Isolate mapping type
737 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
738 la r3,pmapSXlk(r28) ; Point to the pmap search lock
739 subi r4,r4,1 ; Drop down the mapped page count
740 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
741 bl sxlkUnlock ; Unlock the search list
743 bf-- cr1_lt,hrmRetn32 ; This one has no real memory associated with it so we are done...
745 bl mapPhysFindLock ; Go find and lock the physent
747 lwz r9,ppLink+4(r3) ; Get first mapping
749 mr r4,r22 ; Get the RC bits we just got
750 bl mapPhysMerge ; Go merge the RC bits
752 rlwinm r9,r9,0,~ppFlags ; Clear the flags from the mapping pointer
754 cmplw r9,r31 ; Are we the first on the list?
755 bne- hrmNot1st ; Nope...
758 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
759 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
760 bl mapPhyCSet32 ; Go set the physent link and preserve flags
762 b hrmPhyDQd ; Join up and unlock it all...
766 hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
767 and r8,r8,r31 ; Get back to a page
768 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
770 la r3,pmapSXlk(r28) ; Point to the pmap search lock
771 bl sxlkUnlock ; Unlock the search list
773 xor r3,r31,r8 ; Flip mapping address to virtual
774 ori r3,r3,mapRtPerm ; Set permanent mapping error
777 hrmBadLock: li r3,mapRtBadLk ; Set bad lock
781 la r3,pmapSXlk(r28) ; Point to the pmap search lock
782 bl sxlkUnlock ; Unlock the search list
785 mr r3,r31 ; Point to the mapping
786 bl mapDropBusy ; Drop the busy here since we need to come back
787 li r3,mapRtRemove ; Say we are still removing this
793 la r3,pmapSXlk(r28) ; Point to the pmap search lock
794 bl sxlkUnlock ; Unlock the search list
795 li r3,mapRtNotFnd ; No mapping found
797 hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
799 mtmsr r17 ; Restore enables/translation/etc.
801 b hrmRetnCmn ; Join the common return code...
803 hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
805 b hrmRetnCmn ; Join the common return code...
809 hrmNot1st: mr. r8,r9 ; Remember and test current node
810 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
811 lwz r9,mpAlias+4(r9) ; Chain to the next
812 cmplw r9,r31 ; Is this us?
813 bne- hrmNot1st ; Not us...
815 lwz r9,mpAlias+4(r9) ; Get our forward pointer
816 stw r9,mpAlias+4(r8) ; Unchain us
820 hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
822 hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
823 mr r3,r31 ; Copy the pointer to the mapping
824 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
825 bl mapDrainBusy ; Go wait until mapping is unused
827 xor r3,r31,r8 ; Flip mapping address to virtual
829 mtmsr r17 ; Restore enables/translation/etc.
832 hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
833 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
834 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
835 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
836 mr. r6,r6 ; Should we pass back the "next" vaddr?
837 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
838 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
839 mtlr r0 ; Restore the return
841 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
842 beq hrmNoNextAdr ; Do not pass back the next vaddr...
843 stw r15,0(r6) ; Pass back the top of the next vaddr
844 stw r16,4(r6) ; Pass back the bottom of the next vaddr
847 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
848 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
849 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
850 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
851 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
852 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
853 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
854 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
855 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
856 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
857 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
858 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
859 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
860 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
861 lwz r1,0(r1) ; Pop the stack
865 ; Here is where we come when all is lost. Somehow, we failed a mapping function
866 ; that must work... All hope is gone. Alas, we die.......
869 hrmPanic: lis r0,hi16(Choke) ; System abend
870 ori r0,r0,lo16(Choke) ; System abend
871 li r3,failMapping ; Show that we failed some kind of mapping thing
876 ; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
877 ; in the range. Then, if we did not finish, return a code indicating that we need to
878 ; be called again. Eventually, we will finish and then, we will do a TLBIE for each
879 ; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
881 ; A potential speed up is that we stop the invalidate loop once we have walked through
882 ; the hash table once. This really is not worth the trouble because we need to have
883 ; mapped 1/2 of physical RAM in an individual block. Way unlikely.
885 ; We should rethink this and see if we think it will be faster to check PTE and
886 ; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
891 hrmBlock32: lis r29,0xD000 ; Get shift to 32MB bsu
892 rlwinm r24,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
893 lhz r25,mpBSize(r31) ; Get the number of pages in block
894 lhz r23,mpSpace(r31) ; Get the address space hash
895 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
896 rlwnm r29,r29,r24,28,31 ; Rotate to get 0 or 13
897 addi r25,r25,1 ; Account for zero-based counting
898 ori r0,r20,mpRIP ; Turn on the remove in progress flag
899 slw r25,r25,r29 ; Adjust for 32MB if needed
900 mfsdr1 r29 ; Get the hash table base and size
901 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
902 subi r25,r25,1 ; Convert back to zero-based counting
903 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
904 sub r4,r25,r9 ; Get number of pages left
905 cmplw cr1,r9,r25 ; Have we already hit the end?
906 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
907 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
908 rlwinm r26,r29,16,7,15 ; Get the hash table size
909 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
910 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
911 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
912 cmpwi cr7,r2,0 ; Remember if we have finished
913 slwi r0,r9,12 ; Make cursor into page offset
914 or r24,r24,r23 ; Get full hash
915 and r4,r4,r2 ; If more than a chunk, bring this back to 0
916 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
917 add r27,r27,r0 ; Adjust vaddr to start of current chunk
918 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
920 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
922 la r3,pmapSXlk(r28) ; Point to the pmap search lock
923 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
924 bl sxlkUnlock ; Unlock the search list while we are invalidating
926 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
927 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
928 xor r24,r24,r8 ; Get the proper VSID
929 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
930 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
931 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
932 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
933 add r22,r22,r30 ; Get end address (in PTEG units)
935 hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
936 xor r23,r23,r24 ; Hash it
937 and r23,r23,r26 ; Wrap it into the table
938 rlwinm r3,r23,28,4,29 ; Change to PCA offset
939 subfic r3,r3,-4 ; Get the PCA entry offset
940 add r7,r3,r29 ; Point to the PCA slot
941 cmplw cr5,r30,r22 ; Check if we reached the end of the range
942 addi r30,r30,64 ; bump to the next vaddr
944 bl mapLockPteg ; Lock the PTEG
946 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
947 add r5,r23,r29 ; Point to the PTEG
948 li r0,0 ; Set an invalid PTE value
949 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
950 mtcrf 0x80,r4 ; Set CRs to select PTE slots
951 mtcrf 0x40,r4 ; Set CRs to select PTE slots
953 bf 0,hrmSlot0 ; No autogen here
954 stw r0,0x00(r5) ; Invalidate PTE
956 hrmSlot0: bf 1,hrmSlot1 ; No autogen here
957 stw r0,0x08(r5) ; Invalidate PTE
959 hrmSlot1: bf 2,hrmSlot2 ; No autogen here
960 stw r0,0x10(r5) ; Invalidate PTE
962 hrmSlot2: bf 3,hrmSlot3 ; No autogen here
963 stw r0,0x18(r5) ; Invalidate PTE
965 hrmSlot3: bf 4,hrmSlot4 ; No autogen here
966 stw r0,0x20(r5) ; Invalidate PTE
968 hrmSlot4: bf 5,hrmSlot5 ; No autogen here
969 stw r0,0x28(r5) ; Invalidate PTE
971 hrmSlot5: bf 6,hrmSlot6 ; No autogen here
972 stw r0,0x30(r5) ; Invalidate PTE
974 hrmSlot6: bf 7,hrmSlot7 ; No autogen here
975 stw r0,0x38(r5) ; Invalidate PTE
977 hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
978 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
979 andc r6,r6,r0 ; Turn off all the old autogen bits
981 hrmBNone32: eieio ; Make sure all updates come first
983 stw r6,0(r7) ; Unlock and set the PCA
985 bne+ cr5,hrmBInv32 ; Go invalidate the next...
987 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
989 mr r3,r31 ; Copy the pointer to the mapping
990 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
992 sync ; Make sure memory is consistent
994 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
995 li r6,63 ; Assume full invalidate for now
996 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
997 andc r6,r6,r5 ; Clear max if we have less to do
998 and r5,r25,r5 ; Clear count if we have more than max
999 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1000 li r7,tlbieLock ; Get the TLBIE lock
1001 or r5,r5,r6 ; Get number of TLBIEs needed
1003 hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1004 mr. r2,r2 ; Is it locked?
1005 li r2,1 ; Get our lock value
1006 bne- hrmBTLBlck ; It is locked, go wait...
1007 stwcx. r2,0,r7 ; Try to get it
1008 bne- hrmBTLBlck ; We was beat...
1010 hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1011 tlbie r27 ; Invalidate it everywhere
1012 addi r27,r27,0x1000 ; Up to the next page
1013 bge+ hrmBTLBi ; Make sure we have done it all...
1015 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1016 li r2,0 ; Lock clear value
1018 sync ; Make sure all is quiet
1019 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1021 eieio ; Make sure that the tlbie happens first
1022 tlbsync ; Wait for everyone to catch up
1023 sync ; Wait for quiet again
1025 hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1027 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1028 bl sxlkShared ; Go get a shared lock on the mapping lists
1029 mr. r3,r3 ; Did we get the lock?
1030 bne- hrmPanic ; Nope...
1032 lwz r4,mpVAddr(r31) ; High order of address
1033 lwz r5,mpVAddr+4(r31) ; Low order of address
1034 mr r3,r28 ; Pass in pmap to search
1035 mr r29,r4 ; Save this in case we need it (only promote fails)
1036 mr r30,r5 ; Save this in case we need it (only promote fails)
1037 bl EXT(mapSearchFull) ; Go see if we can find it
1039 mr. r3,r3 ; Did we? (And remember mapping address for later)
1040 mr r15,r4 ; Save top of next vaddr
1041 mr r16,r5 ; Save bottom of next vaddr
1042 beq- hrmPanic ; Nope, not found...
1044 cmplw r3,r31 ; Same mapping?
1045 bne- hrmPanic ; Not good...
1047 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1048 bl sxlkPromote ; Try to promote shared to exclusive
1049 mr. r3,r3 ; Could we?
1050 mr r3,r31 ; Restore the mapping pointer
1051 beq+ hrmBDone1 ; Yeah...
1053 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1054 bl sxlkConvert ; Convert shared to exclusive
1055 mr. r3,r3 ; Could we?
1056 bne-- hrmPanic ; Nope, we must have timed out...
1058 mr r3,r28 ; Pass in pmap to search
1059 mr r4,r29 ; High order of address
1060 mr r5,r30 ; Low order of address
1061 bl EXT(mapSearchFull) ; Rescan the list
1063 mr. r3,r3 ; Did we lose it when we converted?
1064 mr r15,r4 ; Save top of next vaddr
1065 mr r16,r5 ; Save bottom of next vaddr
1066 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1068 hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1070 mr r3,r28 ; Get the pmap to remove from
1071 mr r4,r31 ; Point to the mapping
1072 bl EXT(mapRemove) ; Remove the mapping from the list
1074 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1075 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1076 subi r4,r4,1 ; Drop down the mapped page count
1077 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1078 bl sxlkUnlock ; Unlock the search list
1080 b hrmRetn32 ; We are all done, get out...
1083 ; Here we handle the 64-bit version of hw_rem_map
1088 hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
1089 beq-- cr5,hrmBlock64 ; Go treat block specially...
1090 subfic r9,r9,-4 ; Get the PCA entry offset
1091 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1092 add r7,r9,r29 ; Point to the PCA slot
1094 bl mapLockPteg ; Go lock up the PTEG
1096 lwz r21,mpPte(r31) ; Get the quick pointer again
1097 ld r5,0(r26) ; Get the top of PTE
1099 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
1100 rlwinm r21,r21,0,~mpHValid ; Clear out valid bit
1101 sldi r23,r5,16 ; Shift AVPN up to EA format
1102 // **** Need to adjust above shift based on the page size - large pages need to shift a bit more
1103 rldicr r5,r5,0,62 ; Clear the valid bit
1104 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1105 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1106 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1108 std r5,0(r26) ; Invalidate the PTE
1110 li r9,tlbieLock ; Get the TLBIE lock
1112 sync ; Make sure the invalid PTE is actually in memory
1114 hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1115 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1116 mr. r5,r5 ; Is it locked?
1117 li r5,1 ; Get locked indicator
1118 bne-- hrmPtlb64w ; It is locked, go spin...
1119 stwcx. r5,0,r9 ; Try to get it
1120 bne-- hrmPtlb64 ; We was beat...
1122 tlbie r23 ; Invalidate all corresponding TLB entries
1124 eieio ; Make sure that the tlbie happens first
1125 tlbsync ; Wait for everyone to catch up
1127 ptesync ; Make sure of it all
1128 li r0,0 ; Clear this
1129 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1130 stw r0,tlbieLock(0) ; Clear the tlbie lock
1131 oris r0,r0,0x8000 ; Assume slot 0
1133 srw r0,r0,r2 ; Get slot mask to deallocate
1135 lwz r22,12(r26) ; Get the latest reference and change bits
1136 or r6,r6,r0 ; Make the guy we killed free
1139 eieio ; Make sure all updates come first
1141 stw r6,0(r7) ; Unlock and change the PCA
1143 hrmPysDQ64: mr r3,r31 ; Point to the mapping
1144 bl mapDrainBusy ; Go wait until mapping is unused
1146 mr r3,r28 ; Get the pmap to remove from
1147 mr r4,r31 ; Point to the mapping
1148 bl EXT(mapRemove) ; Remove the mapping from the list
1150 rlwinm r0,r20,0,mpType ; Isolate mapping type
1151 cmplwi cr1,r0,mpMinSpecial ; cr1_lt <- not a special mapping type
1152 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1153 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1154 subi r4,r4,1 ; Drop down the mapped page count
1155 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1156 bl sxlkUnlock ; Unlock the search list
1158 bf-- cr1_lt,hrmRetn64 ; This one has no real memory associated with it so we are done...
1160 bl mapPhysFindLock ; Go find and lock the physent
1162 li r0,ppLFAmask ; Get mask to clean up mapping pointer
1163 ld r9,ppLink(r3) ; Get first mapping
1164 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1165 mr r4,r22 ; Get the RC bits we just got
1167 bl mapPhysMerge ; Go merge the RC bits
1169 andc r9,r9,r0 ; Clean up the mapping pointer
1171 cmpld r9,r31 ; Are we the first on the list?
1172 bne-- hrmNot1st64 ; Nope...
1175 ld r4,mpAlias(r31) ; Get our forward pointer
1177 std r9,mpAlias(r31) ; Make sure we are off the chain
1178 bl mapPhyCSet64 ; Go set the physent link and preserve flags
1180 b hrmPhyDQd64 ; Join up and unlock it all...
1182 hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1183 stwcx. r5,0,r5 ; Clear the pending reservation
1186 hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1187 mr. r5,r5 ; is it locked?
1188 beq++ hrmPtlb64 ; Nope...
1189 b hrmPtlb64x ; Sniff some more...
1194 mr. r8,r9 ; Remember and test current node
1195 beq-- hrmPhyDQd64 ; Could not find our node...
1196 ld r9,mpAlias(r9) ; Chain to the next
1197 cmpld r9,r31 ; Is this us?
1198 bne-- hrmNot1st64 ; Not us...
1200 ld r9,mpAlias(r9) ; Get our forward pointer
1201 std r9,mpAlias(r8) ; Unchain us
1206 bl mapPhysUnlock ; Unlock the physent chain
1208 hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1209 mr r3,r31 ; Copy the pointer to the mapping
1210 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1211 bl mapDrainBusy ; Go wait until mapping is unused
1213 xor r3,r31,r8 ; Flip mapping address to virtual
1215 mtmsrd r17 ; Restore enables/translation/etc.
1218 b hrmRetnCmn ; Join the common return path...
1222 ; Check hrmBlock32 for comments.
1227 hrmBlock64: lis r29,0xD000 ; Get shift to 32MB bsu
1228 rlwinm r10,r20,mpBSub+1+2,29,29 ; Rotate to get 0 if 4K bsu or 13 if 32MB bsu
1229 lhz r24,mpSpace(r31) ; Get the address space hash
1230 lhz r25,mpBSize(r31) ; Get the number of pages in block
1231 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
1232 rlwnm r29,r29,r10,28,31 ; Rotate to get 0 or 13
1233 addi r25,r25,1 ; Account for zero-based counting
1234 ori r0,r20,mpRIP ; Turn on the remove in progress flag
1235 slw r25,r25,r29 ; Adjust for 32MB if needed
1236 mfsdr1 r29 ; Get the hash table base and size
1237 ld r27,mpVAddr(r31) ; Get the base vaddr
1238 subi r25,r25,1 ; Convert back to zero-based counting
1239 rlwinm r5,r29,0,27,31 ; Isolate the size
1240 sub r4,r25,r9 ; Get number of pages left
1241 cmplw cr1,r9,r25 ; Have we already hit the end?
1242 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1243 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1244 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1245 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1246 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1247 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1248 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1249 srdi r27,r27,12 ; Change address into page index
1250 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1251 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1253 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1255 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1256 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1257 bl sxlkUnlock ; Unlock the search list while we are invalidating
1259 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1260 eqv r26,r26,r26 ; Get all foxes here
1261 rldimi r24,r24,28,8 ; Make a couple copies up higher
1262 rldicr r29,r29,0,47 ; Isolate just the hash table base
1263 subfic r5,r5,46 ; Get number of leading zeros
1264 srd r26,r26,r5 ; Shift the size bits over
1265 mr r30,r27 ; Get start of chunk to invalidate
1266 rldicr r26,r26,0,56 ; Make length in PTEG units
1267 add r22,r4,r30 ; Get end page number
1269 hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1270 rldicr r0,r0,0,49 ; Clean all but segment portion
1271 rlwinm r2,r30,0,16,31 ; Get the current page index
1272 xor r0,r0,r24 ; Form VSID
1273 xor r8,r2,r0 ; Hash the vaddr
1274 sldi r8,r8,7 ; Make into PTEG offset
1275 and r23,r8,r26 ; Wrap into the hash table
1276 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1277 subfic r3,r3,-4 ; Get the PCA entry offset
1278 add r7,r3,r29 ; Point to the PCA slot
1280 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1282 bl mapLockPteg ; Lock the PTEG
1284 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1285 add r5,r23,r29 ; Point to the PTEG
1286 li r0,0 ; Set an invalid PTE value
1287 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1288 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1289 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1292 bf 0,hrmSlot0s ; No autogen here
1293 std r0,0x00(r5) ; Invalidate PTE
1295 hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1296 std r0,0x10(r5) ; Invalidate PTE
1298 hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1299 std r0,0x20(r5) ; Invalidate PTE
1301 hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1302 std r0,0x30(r5) ; Invalidate PTE
1304 hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1305 std r0,0x40(r5) ; Invalidate PTE
1307 hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1308 std r0,0x50(r5) ; Invalidate PTE
1310 hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1311 std r0,0x60(r5) ; Invalidate PTE
1313 hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1314 std r0,0x70(r5) ; Invalidate PTE
1316 hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1317 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1318 andc r6,r6,r0 ; Turn off all the old autogen bits
1320 hrmBNone64: eieio ; Make sure all updates come first
1321 stw r6,0(r7) ; Unlock and set the PCA
1323 addi r30,r30,1 ; bump to the next PTEG
1324 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1326 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1328 mr r3,r31 ; Copy the pointer to the mapping
1329 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1331 sync ; Make sure memory is consistent
1333 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1334 li r6,255 ; Assume full invalidate for now
1335 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1336 andc r6,r6,r5 ; Clear max if we have less to do
1337 and r5,r25,r5 ; Clear count if we have more than max
1338 sldi r24,r24,28 ; Get the full XOR value over to segment position
1339 ld r27,mpVAddr(r31) ; Get the base vaddr
1340 li r7,tlbieLock ; Get the TLBIE lock
1341 or r5,r5,r6 ; Get number of TLBIEs needed
1343 hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1344 mr. r2,r2 ; Is it locked?
1345 li r2,1 ; Get our lock value
1346 bne-- hrmBTLBlcm ; It is locked, go wait...
1347 stwcx. r2,0,r7 ; Try to get it
1348 bne-- hrmBTLBlcl ; We was beat...
1350 hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1351 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1352 addic. r5,r5,-1 ; See if we did them all
1353 xor r2,r2,r24 ; Make the VSID
1354 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1355 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1357 tlbie r2 ; Invalidate it everywhere
1358 addi r27,r27,0x1000 ; Up to the next page
1359 bge++ hrmBTLBj ; Make sure we have done it all...
1361 eieio ; Make sure that the tlbie happens first
1362 tlbsync ; wait for everyone to catch up
1364 li r2,0 ; Lock clear value
1366 ptesync ; Wait for quiet again
1368 stw r2,tlbieLock(0) ; Clear the tlbie lock
1370 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1371 bl sxlkShared ; Go get a shared lock on the mapping lists
1372 mr. r3,r3 ; Did we get the lock?
1373 bne- hrmPanic ; Nope...
1375 lwz r4,mpVAddr(r31) ; High order of address
1376 lwz r5,mpVAddr+4(r31) ; Low order of address
1377 mr r3,r28 ; Pass in pmap to search
1378 mr r29,r4 ; Save this in case we need it (only promote fails)
1379 mr r30,r5 ; Save this in case we need it (only promote fails)
1380 bl EXT(mapSearchFull) ; Go see if we can find it
1382 mr. r3,r3 ; Did we? (And remember mapping address for later)
1383 mr r15,r4 ; Save top of next vaddr
1384 mr r16,r5 ; Save bottom of next vaddr
1385 beq- hrmPanic ; Nope, not found...
1387 cmpld r3,r31 ; Same mapping?
1388 bne- hrmPanic ; Not good...
1390 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1391 bl sxlkPromote ; Try to promote shared to exclusive
1392 mr. r3,r3 ; Could we?
1393 mr r3,r31 ; Restore the mapping pointer
1394 beq+ hrmBDone2 ; Yeah...
1396 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1397 bl sxlkConvert ; Convert shared to exclusive
1398 mr. r3,r3 ; Could we?
1399 bne-- hrmPanic ; Nope, we must have timed out...
1401 mr r3,r28 ; Pass in pmap to search
1402 mr r4,r29 ; High order of address
1403 mr r5,r30 ; Low order of address
1404 bl EXT(mapSearchFull) ; Rescan the list
1406 mr. r3,r3 ; Did we lose it when we converted?
1407 mr r15,r4 ; Save top of next vaddr
1408 mr r16,r5 ; Save bottom of next vaddr
1409 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1411 hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1413 mr r3,r28 ; Get the pmap to remove from
1414 mr r4,r31 ; Point to the mapping
1415 bl EXT(mapRemove) ; Remove the mapping from the list
1417 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1418 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1419 subi r4,r4,1 ; Drop down the mapped page count
1420 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1421 bl sxlkUnlock ; Unlock the search list
1423 b hrmRetn64 ; We are all done, get out...
1425 hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1426 stwcx. r2,0,r2 ; Unreserve it
1428 hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1429 mr. r2,r2 ; Is it held?
1430 beq++ hrmBTLBlcl ; Nope...
1431 b hrmBTLBlcn ; Yeah...
1434 ; Guest shadow assist -- mapping remove
1436 ; Method of operation:
1437 ; o Locate the VMM extension block and the host pmap
1438 ; o Obtain the host pmap's search lock exclusively
1439 ; o Locate the requested mapping in the shadow hash table,
1441 ; o If connected, disconnect the PTE and gather R&C to physent
1442 ; o Locate and lock the physent
1443 ; o Remove mapping from physent's chain
1445 ; o Unlock pmap's search lock
1447 ; Non-volatile registers on entry:
1448 ; r17: caller's msr image
1449 ; r19: sprg2 (feature flags)
1450 ; r28: guest pmap's physical address
1451 ; r29: high-order 32 bits of guest virtual address
1452 ; r30: low-order 32 bits of guest virtual address
1454 ; Non-volatile register usage:
1455 ; r26: VMM extension block's physical address
1456 ; r27: host pmap's physical address
1457 ; r28: guest pmap's physical address
1458 ; r29: physent's physical address
1459 ; r30: guest virtual address
1460 ; r31: guest mapping's physical address
1464 rlwinm r30,r30,0,0xFFFFF000 ; Clean up low-order bits of 32-bit guest vaddr
1465 bt++ pf64Bitb,hrmG64 ; Test for 64-bit machine
1466 lwz r26,pmapVmmExtPhys+4(r28) ; r26 <- VMM pmap extension block paddr
1467 lwz r27,vmxHostPmapPhys+4(r26) ; r27 <- host pmap's paddr
1468 b hrmGStart ; Join common code
1470 hrmG64: ld r26,pmapVmmExtPhys(r28) ; r26 <- VMM pmap extension block paddr
1471 ld r27,vmxHostPmapPhys(r26) ; r27 <- host pmap's paddr
1472 rldimi r30,r29,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
1474 hrmGStart: la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
1475 bl sxlkExclusive ; Get lock exclusive
1477 lwz r3,vxsGrm(r26) ; Get mapping remove request count
1479 lwz r9,pmapSpace(r28) ; r9 <- guest space ID number
1480 la r31,VMX_HPIDX_OFFSET(r26) ; r31 <- base of hash page physical index
1481 srwi r11,r30,12 ; Form shadow hash:
1482 xor r11,r9,r11 ; spaceID ^ (vaddr >> 12)
1483 rlwinm r12,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
1484 ; Form index offset from hash page number
1485 add r31,r31,r12 ; r31 <- hash page index entry
1486 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
1487 mtctr r0 ; in this group
1488 bt++ pf64Bitb,hrmG64Search ; Separate handling for 64-bit search
1489 lwz r31,4(r31) ; r31 <- hash page paddr
1490 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
1491 ; r31 <- hash group paddr
1493 addi r3,r3,1 ; Increment remove request count
1494 stw r3,vxsGrm(r26) ; Update remove request count
1496 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1497 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1498 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
1499 b hrmG32SrchLp ; Let the search begin!
1503 mr r6,r3 ; r6 <- current mapping slot's flags
1504 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1505 mr r7,r4 ; r7 <- current mapping slot's space ID
1506 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1507 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1508 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
1509 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1510 xor r7,r7,r9 ; Compare space ID
1511 or r0,r11,r7 ; r0 <- !(free && space match)
1512 xor r8,r8,r30 ; Compare virtual address
1513 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1514 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1516 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1517 bdnz hrmG32SrchLp ; Iterate
1519 mr r6,r3 ; r6 <- current mapping slot's flags
1520 clrrwi r5,r5,12 ; Remove flags from virtual address
1521 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1522 xor r4,r4,r9 ; Compare space ID
1523 or r0,r11,r4 ; r0 <- !(free && space match)
1524 xor r5,r5,r30 ; Compare virtual address
1525 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1526 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1527 b hrmGSrchMiss ; No joy in our hash group
1530 ld r31,0(r31) ; r31 <- hash page paddr
1531 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
1532 ; r31 <- hash group paddr
1533 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
1534 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
1535 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
1536 b hrmG64SrchLp ; Let the search begin!
1540 mr r6,r3 ; r6 <- current mapping slot's flags
1541 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
1542 mr r7,r4 ; r7 <- current mapping slot's space ID
1543 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
1544 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
1545 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
1546 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1547 xor r7,r7,r9 ; Compare space ID
1548 or r0,r11,r7 ; r0 <- !(free && space match)
1549 xor r8,r8,r30 ; Compare virtual address
1550 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
1551 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1553 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
1554 bdnz hrmG64SrchLp ; Iterate
1556 mr r6,r3 ; r6 <- current mapping slot's flags
1557 clrrdi r5,r5,12 ; Remove flags from virtual address
1558 rlwinm r11,r6,0,mpgFree ; Isolate guest free mapping flag
1559 xor r4,r4,r9 ; Compare space ID
1560 or r0,r11,r4 ; r0 <- !(free && space match)
1561 xor r5,r5,r30 ; Compare virtual address
1562 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
1563 beq hrmGSrchHit ; Join common path on hit (r31 points to guest mapping)
1565 lwz r3,vxsGrmMiss(r26) ; Get remove miss count
1566 li r25,mapRtNotFnd ; Return not found
1567 addi r3,r3,1 ; Increment miss count
1568 stw r3,vxsGrmMiss(r26) ; Update miss count
1569 b hrmGReturn ; Join guest return
1573 rlwinm. r0,r6,0,mpgDormant ; Is this entry dormant?
1574 bne hrmGDormant ; Yes, nothing to disconnect
1576 lwz r3,vxsGrmActive(r26) ; Get active hit count
1577 addi r3,r3,1 ; Increment active hit count
1578 stw r3,vxsGrmActive(r26) ; Update hit count
1580 bt++ pf64Bitb,hrmGDscon64 ; Handle 64-bit disconnect separately
1581 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
1582 ; r31 <- mapping's physical address
1583 ; r3 -> PTE slot physical address
1584 ; r4 -> High-order 32 bits of PTE
1585 ; r5 -> Low-order 32 bits of PTE
1587 ; r7 -> PCA physical address
1588 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
1589 b hrmGFreePTE ; Join 64-bit path to release the PTE
1591 bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
1592 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
1594 mr. r3,r3 ; Was there a valid PTE?
1595 beq hrmGDormant ; No valid PTE, we're almost done
1596 lis r0,0x8000 ; Prepare free bit for this slot
1597 srw r0,r0,r2 ; Position free bit
1598 or r6,r6,r0 ; Set it in our PCA image
1599 lwz r8,mpPte(r31) ; Get PTE offset
1600 rlwinm r8,r8,0,~mpHValid ; Make the offset invalid
1601 stw r8,mpPte(r31) ; Save invalidated PTE offset
1602 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
1603 stw r6,0(r7) ; Update PCA and unlock the PTEG
1606 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
1607 bl mapFindLockPN ; Find 'n' lock this page's physent
1608 mr. r29,r3 ; Got lock on our physent?
1609 beq-- hrmGBadPLock ; No, time to bail out
1611 crset cr1_eq ; cr1_eq <- previous link is the anchor
1612 bt++ pf64Bitb,hrmGRemove64 ; Use 64-bit version on 64-bit machine
1613 la r11,ppLink+4(r29) ; Point to chain anchor
1614 lwz r9,ppLink+4(r29) ; Get chain anchor
1615 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
1617 beq- hrmGPEMissMiss ; End of chain, this is not good
1618 cmplw r9,r31 ; Is this the mapping to remove?
1619 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
1620 bne hrmGRemNext ; No, chain onward
1621 bt cr1_eq,hrmGRemRetry ; Mapping to remove is chained from anchor
1622 stw r8,0(r11) ; Unchain gpv->phys mapping
1623 b hrmGDelete ; Finish deleting mapping
1625 lwarx r0,0,r11 ; Get previous link
1626 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
1627 stwcx. r0,0,r11 ; Update previous link
1628 bne- hrmGRemRetry ; Lost reservation, retry
1629 b hrmGDelete ; Finish deleting mapping
1632 la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
1633 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1634 mr. r9,r8 ; Does next entry exist?
1635 b hrmGRemLoop ; Carry on
1638 li r7,ppLFAmask ; Get mask to clean up mapping pointer
1639 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1640 la r11,ppLink(r29) ; Point to chain anchor
1641 ld r9,ppLink(r29) ; Get chain anchor
1642 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
1644 beq-- hrmGPEMissMiss ; End of chain, this is not good
1645 cmpld r9,r31 ; Is this the mapping to remove?
1646 ld r8,mpAlias(r9) ; Get forward chain pinter
1647 bne hrmGRem64Nxt ; No mapping to remove, chain on, dude
1648 bt cr1_eq,hrmGRem64Rt ; Mapping to remove is chained from anchor
1649 std r8,0(r11) ; Unchain gpv->phys mapping
1650 b hrmGDelete ; Finish deleting mapping
1652 ldarx r0,0,r11 ; Get previous link
1653 and r0,r0,r7 ; Get flags
1654 or r0,r0,r8 ; Insert new forward pointer
1655 stdcx. r0,0,r11 ; Slam it back in
1656 bne-- hrmGRem64Rt ; Lost reservation, retry
1657 b hrmGDelete ; Finish deleting mapping
1661 la r11,mpAlias(r9) ; Point to (soon to be) previous link
1662 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
1663 mr. r9,r8 ; Does next entry exist?
1664 b hrmGRem64Lp ; Carry on
1667 mr r3,r29 ; r3 <- physent addr
1668 bl mapPhysUnlock ; Unlock physent chain
1669 lwz r3,mpFlags(r31) ; Get mapping's flags
1670 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
1671 ori r3,r3,mpgFree ; Mark mapping free
1672 stw r3,mpFlags(r31) ; Update flags
1673 li r25,mapRtGuest ; Set return code to 'found guest mapping'
1676 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
1677 bl sxlkUnlock ; Release host pmap search lock
1679 mr r3,r25 ; r3 <- return code
1680 bt++ pf64Bitb,hrmGRtn64 ; Handle 64-bit separately
1681 mtmsr r17 ; Restore 'rupts, translation
1682 isync ; Throw a small wrench into the pipeline
1683 b hrmRetnCmn ; Nothing to do now but pop a frame and return
1684 hrmGRtn64: mtmsrd r17 ; Restore 'rupts, translation, 32-bit mode
1685 b hrmRetnCmn ; Join common return
1689 lis r0,hi16(Choke) ; Seen the arrow on the doorpost
1690 ori r0,r0,lo16(Choke) ; Sayin' "THIS LAND IS CONDEMNED"
1691 li r3,failMapping ; All the way from New Orleans
1696 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1698 * Upon entry, R3 contains a pointer to a physent.
1700 * This function removes the first mapping from a physical entry
1701 * alias list. It locks the list, extracts the vaddr and pmap from
1702 * the first entry. It then jumps into the hw_rem_map function.
1703 * NOTE: since we jump into rem_map, we need to set up the stack
1704 * identically. Also, we set the next parm to 0 so we do not
1705 * try to save a next vaddr.
1707 * We return the virtual address of the removed mapping as a
1710 * Note that this is designed to be called from 32-bit mode with a stack.
1712 * We disable translation and all interruptions here. This keeps is
1713 * from having to worry about a deadlock due to having anything locked
1714 * and needing it to process a fault.
1716 * Note that this must be done with both interruptions off and VM off
1719 * Remove mapping via physical page (mapping_purge)
1722 * 2) extract vaddr and pmap
1724 * 4) do "remove mapping via pmap"
1730 .globl EXT(hw_purge_phys)
1733 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1734 mflr r0 ; Save the link register
1735 stw r15,FM_ARG0+0x00(r1) ; Save a register
1736 stw r16,FM_ARG0+0x04(r1) ; Save a register
1737 stw r17,FM_ARG0+0x08(r1) ; Save a register
1738 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1739 stw r19,FM_ARG0+0x10(r1) ; Save a register
1740 stw r20,FM_ARG0+0x14(r1) ; Save a register
1741 stw r21,FM_ARG0+0x18(r1) ; Save a register
1742 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1743 stw r23,FM_ARG0+0x20(r1) ; Save a register
1744 stw r24,FM_ARG0+0x24(r1) ; Save a register
1745 stw r25,FM_ARG0+0x28(r1) ; Save a register
1746 li r6,0 ; Set no next address return
1747 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1748 stw r27,FM_ARG0+0x30(r1) ; Save a register
1749 stw r28,FM_ARG0+0x34(r1) ; Save a register
1750 stw r29,FM_ARG0+0x38(r1) ; Save a register
1751 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1752 stw r31,FM_ARG0+0x40(r1) ; Save a register
1753 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1754 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1756 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1758 bl mapPhysLock ; Lock the physent
1760 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1762 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1763 li r0,ppFlags ; Set the bottom stuff to clear
1764 b hppJoin ; Join the common...
1766 hppSF: li r0,ppLFAmask
1767 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1768 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
1770 hppJoin: andc. r12,r12,r0 ; Clean and test link
1771 beq-- hppNone ; There are no more mappings on physical page
1773 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1774 lhz r7,mpSpace(r12) ; Get the address space hash
1775 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1776 slwi r0,r7,2 ; Multiply space by 4
1777 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1778 slwi r7,r7,3 ; Multiply space by 8
1779 lwz r5,mpVAddr+4(r12) ; and the bottom
1780 add r7,r7,r0 ; Get correct displacement into translate table
1781 lwz r28,0(r28) ; Get the actual translation map
1783 add r28,r28,r7 ; Point to the pmap translation
1785 bl mapPhysUnlock ; Time to unlock the physical entry
1787 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1789 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1790 b hrmJoin ; Go remove the mapping...
1792 hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1793 b hrmJoin ; Go remove the mapping...
1797 hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1799 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1801 mtmsr r11 ; Restore enables/translation/etc.
1803 b hppRetnCmn ; Join the common return code...
1805 hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1809 ; NOTE: we have not used any registers other than the volatiles to this point
1812 hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1814 li r3,mapRtEmpty ; Physent chain is empty
1815 mtlr r12 ; Restore the return
1816 lwz r1,0(r1) ; Pop the stack
1820 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1822 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1823 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1825 * We return the virtual address of the removed mapping as a
1828 * Note that this is designed to be called from 32-bit mode with a stack.
1830 * We disable translation and all interruptions here. This keeps is
1831 * from having to worry about a deadlock due to having anything locked
1832 * and needing it to process a fault.
1834 * Note that this must be done with both interruptions off and VM off
1836 * Remove a mapping which can be reestablished by VM
1841 .globl EXT(hw_purge_map)
1844 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1845 mflr r0 ; Save the link register
1846 stw r15,FM_ARG0+0x00(r1) ; Save a register
1847 stw r16,FM_ARG0+0x04(r1) ; Save a register
1848 stw r17,FM_ARG0+0x08(r1) ; Save a register
1849 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1850 stw r19,FM_ARG0+0x10(r1) ; Save a register
1851 mfsprg r19,2 ; Get feature flags
1852 stw r20,FM_ARG0+0x14(r1) ; Save a register
1853 stw r21,FM_ARG0+0x18(r1) ; Save a register
1854 mtcrf 0x02,r19 ; move pf64Bit cr6
1855 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1856 stw r23,FM_ARG0+0x20(r1) ; Save a register
1857 stw r24,FM_ARG0+0x24(r1) ; Save a register
1858 stw r25,FM_ARG0+0x28(r1) ; Save a register
1859 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1860 stw r27,FM_ARG0+0x30(r1) ; Save a register
1861 stw r28,FM_ARG0+0x34(r1) ; Save a register
1862 stw r29,FM_ARG0+0x38(r1) ; Save a register
1863 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1864 stw r31,FM_ARG0+0x40(r1) ; Save a register
1865 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1866 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1869 lwz r11,pmapFlags(r3) ; Get pmaps flags
1870 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1871 bne hpmPanic ; Call not valid for guest shadow assist pmap
1874 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1875 lwz r9,pmapvr+4(r3) ; Get conversion mask
1878 hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1881 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1883 xor r28,r3,r9 ; Convert the pmap to physical addressing
1885 mr r17,r11 ; Save the MSR
1887 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1888 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1889 mr. r3,r3 ; Did we get the lock?
1890 bne-- hrmBadLock ; Nope...
1892 ; Note that we do a full search (i.e., no shortcut level skips, etc.)
1893 ; here so that we will know the previous elements so we can dequeue them
1897 mr r3,r28 ; Pass in pmap to search
1898 mr r29,r4 ; Top half of vaddr
1899 mr r30,r5 ; Bottom half of vaddr
1900 bl EXT(mapSearchFull) ; Rescan the list
1901 mr. r31,r3 ; Did we? (And remember mapping address for later)
1902 or r0,r4,r5 ; Are we beyond the end?
1903 mr r15,r4 ; Save top of next vaddr
1904 cmplwi cr1,r0,0 ; See if there is another
1905 mr r16,r5 ; Save bottom of next vaddr
1906 bne-- hpmGotOne ; We found one, go check it out...
1908 hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1909 b hrmNotFound ; No more in pmap to check...
1911 hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
1912 andi. r0,r20,lo16(mpType|mpPerm) ; cr0_eq <- normal mapping && !permanent
1913 rlwinm r21,r20,8,24,31 ; Extract the busy count
1914 cmplwi cr2,r21,0 ; Is it busy?
1915 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
1916 beq++ hrmGotX ; Found, branch to remove the mapping...
1917 b hpmCNext ; Nope...
1919 hpmPanic: lis r0,hi16(Choke) ; System abend
1920 ori r0,r0,lo16(Choke) ; System abend
1921 li r3,failMapping ; Show that we failed some kind of mapping thing
1925 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1927 * Upon entry, R3 contains a pointer to a pmap.
1928 * pa is a pointer to the physent
1930 * This function removes the first mapping for a specific pmap from a physical entry
1931 * alias list. It locks the list, extracts the vaddr and pmap from
1932 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1933 * NOTE: since we jump into rem_map, we need to set up the stack
1934 * identically. Also, we set the next parm to 0 so we do not
1935 * try to save a next vaddr.
1937 * We return the virtual address of the removed mapping as a
1940 * Note that this is designed to be called from 32-bit mode with a stack.
1942 * We disable translation and all interruptions here. This keeps is
1943 * from having to worry about a deadlock due to having anything locked
1944 * and needing it to process a fault.
1946 * Note that this must be done with both interruptions off and VM off
1949 * Remove mapping via physical page (mapping_purge)
1952 * 2) extract vaddr and pmap
1954 * 4) do "remove mapping via pmap"
1960 .globl EXT(hw_purge_space)
1962 LEXT(hw_purge_space)
1963 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1964 mflr r0 ; Save the link register
1965 stw r15,FM_ARG0+0x00(r1) ; Save a register
1966 stw r16,FM_ARG0+0x04(r1) ; Save a register
1967 stw r17,FM_ARG0+0x08(r1) ; Save a register
1968 mfsprg r2,2 ; Get feature flags
1969 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1970 stw r19,FM_ARG0+0x10(r1) ; Save a register
1971 stw r20,FM_ARG0+0x14(r1) ; Save a register
1972 stw r21,FM_ARG0+0x18(r1) ; Save a register
1973 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1974 mtcrf 0x02,r2 ; move pf64Bit cr6
1975 stw r23,FM_ARG0+0x20(r1) ; Save a register
1976 stw r24,FM_ARG0+0x24(r1) ; Save a register
1977 stw r25,FM_ARG0+0x28(r1) ; Save a register
1978 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1979 stw r27,FM_ARG0+0x30(r1) ; Save a register
1980 li r6,0 ; Set no next address return
1981 stw r28,FM_ARG0+0x34(r1) ; Save a register
1982 stw r29,FM_ARG0+0x38(r1) ; Save a register
1983 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1984 stw r31,FM_ARG0+0x40(r1) ; Save a register
1985 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1986 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1989 lwz r11,pmapFlags(r4) ; Get pmaps flags
1990 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
1991 bne hpsPanic ; Call not valid for guest shadow assist pmap
1994 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
1996 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
2000 hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
2002 hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2004 xor r4,r4,r9 ; Convert the pmap to physical addressing
2006 bl mapPhysLock ; Lock the physent
2008 lwz r8,pmapSpace(r4) ; Get the space hash
2010 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
2012 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2014 hpsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2015 beq hpsNone ; Did not find one...
2017 lhz r10,mpSpace(r12) ; Get the space
2019 cmplw r10,r8 ; Is this one of ours?
2022 lwz r12,mpAlias+4(r12) ; Chain on to the next
2023 b hpsSrc32 ; Check it out...
2027 hpsSF: li r0,ppLFAmask
2028 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2029 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2031 hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2032 beq hpsNone ; Did not find one...
2034 lhz r10,mpSpace(r12) ; Get the space
2036 cmplw r10,r8 ; Is this one of ours?
2039 ld r12,mpAlias(r12) ; Chain on to the next
2040 b hpsSrc64 ; Check it out...
2044 hpsFnd: mr r28,r4 ; Set the pmap physical address
2045 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2046 lwz r5,mpVAddr+4(r12) ; and the bottom
2048 bl mapPhysUnlock ; Time to unlock the physical entry
2049 b hrmJoin ; Go remove the mapping...
2053 hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2055 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
2057 mtmsr r11 ; Restore enables/translation/etc.
2059 b hpsRetnCmn ; Join the common return code...
2061 hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2065 ; NOTE: we have not used any registers other than the volatiles to this point
2068 hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2070 li r3,mapRtEmpty ; No mappings for specified pmap on physent chain
2071 mtlr r12 ; Restore the return
2072 lwz r1,0(r1) ; Pop the stack
2075 hpsPanic: lis r0,hi16(Choke) ; System abend
2076 ori r0,r0,lo16(Choke) ; System abend
2077 li r3,failMapping ; Show that we failed some kind of mapping thing
2081 * mapping *hw_scrub_guest(physent, pmap) - remove first guest mapping associated with host
2082 * on this physent chain
2084 * Locates the first guest mapping on the physent chain that is associated with the
2085 * specified host pmap. If this succeeds, the mapping is removed by joining the general
2086 * remove path; otherwise, we return NULL. The caller is expected to invoke this entry
2087 * repeatedly until no additional guest mappings that match our criteria are removed.
2089 * Because this entry point exits through hw_rem_map, our prolog pushes its frame.
2092 * r3 : physent, 32-bit kernel virtual address
2093 * r4 : host pmap, 32-bit kernel virtual address
2095 * Volatile register usage (for linkage through hrmJoin):
2096 * r4 : high-order 32 bits of guest virtual address
2097 * r5 : low-order 32 bits of guest virtual address
2098 * r11: saved MSR image
2100 * Non-volatile register usage:
2101 * r26: VMM extension block's physical address
2102 * r27: host pmap's physical address
2103 * r28: guest pmap's physical address
2108 .globl EXT(hw_scrub_guest)
2110 LEXT(hw_scrub_guest)
2111 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
2112 mflr r0 ; Save the link register
2113 stw r15,FM_ARG0+0x00(r1) ; Save a register
2114 stw r16,FM_ARG0+0x04(r1) ; Save a register
2115 stw r17,FM_ARG0+0x08(r1) ; Save a register
2116 mfsprg r2,2 ; Get feature flags
2117 stw r18,FM_ARG0+0x0C(r1) ; Save a register
2118 stw r19,FM_ARG0+0x10(r1) ; Save a register
2119 stw r20,FM_ARG0+0x14(r1) ; Save a register
2120 stw r21,FM_ARG0+0x18(r1) ; Save a register
2121 stw r22,FM_ARG0+0x1C(r1) ; Save a register
2122 mtcrf 0x02,r2 ; move pf64Bit cr6
2123 stw r23,FM_ARG0+0x20(r1) ; Save a register
2124 stw r24,FM_ARG0+0x24(r1) ; Save a register
2125 stw r25,FM_ARG0+0x28(r1) ; Save a register
2126 stw r26,FM_ARG0+0x2C(r1) ; Save a register
2127 stw r27,FM_ARG0+0x30(r1) ; Save a register
2128 li r6,0 ; Set no next address return
2129 stw r28,FM_ARG0+0x34(r1) ; Save a register
2130 stw r29,FM_ARG0+0x38(r1) ; Save a register
2131 stw r30,FM_ARG0+0x3C(r1) ; Save a register
2132 stw r31,FM_ARG0+0x40(r1) ; Save a register
2133 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
2134 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2136 lwz r11,pmapVmmExt(r4) ; get VMM pmap extension block vaddr
2138 bt++ pf64Bitb,hsg64Salt ; Test for 64-bit machine
2139 lwz r26,pmapVmmExtPhys+4(r4) ; Get VMM pmap extension block paddr
2140 lwz r9,pmapvr+4(r4) ; Get 32-bit virt<->real conversion salt
2141 b hsgStart ; Get to work
2143 hsg64Salt: ld r26,pmapVmmExtPhys(r4) ; Get VMM pmap extension block paddr
2144 ld r9,pmapvr+4(r4) ; Get 64-bit virt<->real conversion salt
2146 hsgStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
2147 xor r27,r4,r9 ; Convert host pmap_t virt->real
2148 bl mapPhysLock ; Lock the physent
2150 bt++ pf64Bitb,hsg64Scan ; Test for 64-bit machine
2152 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2153 hsg32Loop: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2154 beq hsg32Miss ; Did not find one...
2155 lwz r8,mpFlags(r12) ; Get mapping's flags
2156 lhz r7,mpSpace(r12) ; Get mapping's space id
2157 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2158 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2159 xori r8,r8,mpGuest ; Is it a guest mapping?
2160 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2161 slwi r9,r7,2 ; Multiply space by 4
2162 lwz r28,0(r28) ; Get the actual translation map
2163 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2164 slwi r7,r7,3 ; Multiply space by 8
2165 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2166 add r7,r7,r9 ; Get correct displacement into translate table
2167 add r28,r28,r7 ; Point to the pmap translation
2168 lwz r28,pmapPAddr+4(r28) ; Get guest pmap paddr
2169 lwz r7,pmapVmmExtPhys+4(r28) ; Get VMM extension block paddr
2170 xor r7,r7,r26 ; Is guest associated with specified host?
2171 or. r7,r7,r8 ; Guest mapping && associated with host?
2172 lwz r12,mpAlias+4(r12) ; Chain on to the next
2173 bne hsg32Loop ; Try next mapping on alias chain
2175 hsg32Hit: bl mapPhysUnlock ; Unlock physent chain
2176 b hrmJoin ; Join common path for mapping removal
2179 hsg32Miss: bl mapPhysUnlock ; Unlock physent chain
2180 mtmsr r11 ; Restore 'rupts, translation
2181 isync ; Throw a small wrench into the pipeline
2182 li r3,mapRtEmpty ; No mappings found matching specified criteria
2183 b hrmRetnCmn ; Exit through common epilog
2186 hsg64Scan: li r6,ppLFAmask ; Get lock, flag, attribute mask seed
2187 ld r12,ppLink(r3) ; Grab the pointer to the first mapping
2188 rotrdi r6,r6,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2189 hsg64Loop: andc. r12,r12,r6 ; Clean and test mapping address
2190 beq hsg64Miss ; Did not find one...
2191 lwz r8,mpFlags(r12) ; Get mapping's flags
2192 lhz r7,mpSpace(r12) ; Get mapping's space id
2193 rlwinm r8,r8,0,mpType ; Extract mapping's type code
2194 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2195 xori r8,r8,mpGuest ; Is it a guest mapping?
2196 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
2197 slwi r9,r7,2 ; Multiply space by 4
2198 lwz r28,0(r28) ; Get the actual translation map
2199 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
2200 slwi r7,r7,3 ; Multiply space by 8
2201 lwz r5,mpVAddr+4(r12) ; Get the bottom of the vaddr
2202 add r7,r7,r9 ; Get correct displacement into translate table
2203 add r28,r28,r7 ; Point to the pmap translation
2204 ld r28,pmapPAddr(r28) ; Get guest pmap paddr
2205 ld r7,pmapVmmExtPhys(r28) ; Get VMM extension block paddr
2206 xor r7,r7,r26 ; Is guest associated with specified host?
2207 or. r7,r7,r8 ; Guest mapping && associated with host?
2208 ld r12,mpAlias(r12) ; Chain on to the next
2209 bne hsg64Loop ; Try next mapping on alias chain
2211 hsg64Hit: bl mapPhysUnlock ; Unlock physent chain
2212 b hrmJoin ; Join common path for mapping removal
2215 hsg64Miss: bl mapPhysUnlock ; Unlock physent chain
2216 mtmsrd r11 ; Restore 'rupts, translation
2217 li r3,mapRtEmpty ; No mappings found matching specified criteria
2218 b hrmRetnCmn ; Exit through common epilog
2222 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
2224 * Upon entry, R3 contains a pointer to a physent.
2225 * space is the space ID from the pmap in question
2227 * We return the virtual address of the found mapping in
2228 * R3. Note that the mapping busy is bumped.
2230 * Note that this is designed to be called from 32-bit mode with a stack.
2232 * We disable translation and all interruptions here. This keeps is
2233 * from having to worry about a deadlock due to having anything locked
2234 * and needing it to process a fault.
2239 .globl EXT(hw_find_space)
2242 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
2243 mflr r0 ; Save the link register
2244 mr r8,r4 ; Remember the space
2245 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2247 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2249 bl mapPhysLock ; Lock the physent
2251 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
2253 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
2255 hfsSrc32: rlwinm. r12,r12,0,~ppFlags ; Clean and test mapping address
2256 beq hfsNone ; Did not find one...
2258 lhz r10,mpSpace(r12) ; Get the space
2260 cmplw r10,r8 ; Is this one of ours?
2263 lwz r12,mpAlias+4(r12) ; Chain on to the next
2264 b hfsSrc32 ; Check it out...
2268 hfsSF: li r0,ppLFAmask
2269 ld r12,ppLink(r3) ; Get the pointer to the first mapping
2270 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2272 hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
2273 beq hfsNone ; Did not find one...
2275 lhz r10,mpSpace(r12) ; Get the space
2277 cmplw r10,r8 ; Is this one of ours?
2280 ld r12,mpAlias(r12) ; Chain on to the next
2281 b hfsSrc64 ; Check it out...
2285 hfsFnd: mr r8,r3 ; Save the physent
2286 mr r3,r12 ; Point to the mapping
2287 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2289 mr r3,r8 ; Get back the physical entry
2290 li r7,0xFFF ; Get a page size mask
2291 bl mapPhysUnlock ; Time to unlock the physical entry
2293 andc r3,r12,r7 ; Move the mapping back down to a page
2294 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
2295 xor r12,r3,r12 ; Convert to virtual
2296 b hfsRet ; Time to return
2300 hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
2302 hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
2304 mtmsr r11 ; Restore enables/translation/etc.
2306 b hfsRetnCmn ; Join the common return code...
2308 hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
2312 ; NOTE: we have not used any registers other than the volatiles to this point
2315 hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
2318 mr. r3,r3 ; Anything to return?
2319 beq hfsRetnNull ; Nope
2320 lwz r11,mpFlags(r3) ; Get mapping flags
2321 rlwinm r0,r11,0,mpType ; Isolate the mapping type
2322 cmplwi r0,mpGuest ; Shadow guest mapping?
2323 beq hfsPanic ; Yup, kick the bucket
2327 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2329 mtlr r12 ; Restore the return
2330 lwz r1,0(r1) ; Pop the stack
2333 hfsPanic: lis r0,hi16(Choke) ; System abend
2334 ori r0,r0,lo16(Choke) ; System abend
2335 li r3,failMapping ; Show that we failed some kind of mapping thing
2339 ; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
2340 ; Returns 0 if not found or the virtual address of the mapping if
2341 ; if is. Also, the mapping has the busy count bumped.
2344 .globl EXT(hw_find_map)
2347 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2348 mflr r0 ; Save the link register
2349 stw r25,FM_ARG0+0x00(r1) ; Save a register
2350 stw r26,FM_ARG0+0x04(r1) ; Save a register
2351 mr r25,r6 ; Remember address of next va
2352 stw r27,FM_ARG0+0x08(r1) ; Save a register
2353 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2354 stw r29,FM_ARG0+0x10(r1) ; Save a register
2355 stw r30,FM_ARG0+0x14(r1) ; Save a register
2356 stw r31,FM_ARG0+0x18(r1) ; Save a register
2357 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2360 lwz r11,pmapFlags(r3) ; Get pmaps flags
2361 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
2362 bne hfmPanic ; Call not valid for guest shadow assist pmap
2365 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2366 lwz r7,pmapvr+4(r3) ; Get the second part
2369 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2371 mr r27,r11 ; Remember the old MSR
2372 mr r26,r12 ; Remember the feature bits
2374 xor r28,r3,r7 ; Change the common 32- and 64-bit half
2376 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
2378 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
2380 hfmSF1: mr r29,r4 ; Save top half of vaddr
2381 mr r30,r5 ; Save the bottom half
2383 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2384 bl sxlkShared ; Go get a shared lock on the mapping lists
2385 mr. r3,r3 ; Did we get the lock?
2386 bne-- hfmBadLock ; Nope...
2388 mr r3,r28 ; get the pmap address
2389 mr r4,r29 ; Get bits 0:31 to look for
2390 mr r5,r30 ; Get bits 32:64
2392 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
2394 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
2395 mr. r31,r3 ; Save the mapping if we found it
2396 cmplwi cr1,r0,0 ; Are we removing?
2397 mr r29,r4 ; Save next va high half
2398 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
2399 mr r30,r5 ; Save next va low half
2400 li r6,0 ; Assume we did not find it
2401 li r26,0xFFF ; Get a mask to relocate to start of mapping page
2403 bt-- cr0_eq,hfmNotFnd ; We did not find it...
2405 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
2407 andc r4,r31,r26 ; Get back to the mapping page start
2409 ; Note: we can treat 32- and 64-bit the same here. Because we are going from
2410 ; physical to virtual and we only do 32-bit virtual, we only need the low order
2413 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2414 li r6,-1 ; Indicate we found it and it is not being removed
2415 xor r31,r31,r4 ; Flip to virtual
2417 hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2418 bl sxlkUnlock ; Unlock the search list
2420 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2421 and r3,r3,r6 ; Clear if not found or removing
2423 hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
2425 mtmsr r27 ; Restore enables/translation/etc.
2427 b hfmReturnC ; Join common...
2429 hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2432 hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2433 stw r30,4(r25) ; Save the bottom of the next va
2434 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2435 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2436 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2437 and r3,r3,r6 ; Clear return if the mapping is being removed
2438 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2439 mtlr r0 ; Restore the return
2440 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2441 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2442 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2443 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2444 lwz r1,0(r1) ; Pop the stack
2449 hfmBadLock: li r3,1 ; Set lock time out error code
2450 b hfmReturn ; Leave....
2452 hfmPanic: lis r0,hi16(Choke) ; System abend
2453 ori r0,r0,lo16(Choke) ; System abend
2454 li r3,failMapping ; Show that we failed some kind of mapping thing
2459 * void hw_clear_maps(void)
2461 * Remove all mappings for all phys entries.
2467 .globl EXT(hw_clear_maps)
2470 mflr r10 ; Save the link register
2471 mfcr r9 ; Save the condition register
2472 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2474 lis r5,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2475 ori r5,r5,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
2478 lwz r3,mrPhysTab(r5) ; Get the actual table address
2479 lwz r0,mrStart(r5) ; Get start of table entry
2480 lwz r4,mrEnd(r5) ; Get end of table entry
2481 addi r5,r5,mrSize ; Point to the next regions
2483 cmplwi r3,0 ; No more regions?
2484 beq-- hcmDone ; Leave...
2486 sub r4,r4,r0 ; Calculate physical entry count
2490 bt++ pf64Bitb,hcmNextPhys64 ; 64-bit version
2494 lwz r4,ppLink+4(r3) ; Grab the pointer to the first mapping
2495 addi r3,r3,physEntrySize ; Next phys_entry
2498 rlwinm. r4,r4,0,~ppFlags ; Clean and test mapping address
2499 beq hcmNoMap32 ; Did not find one...
2501 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2502 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2503 stw r0,mpPte(r4) ; Get the quick pointer again
2505 lwz r4,mpAlias+4(r4) ; Chain on to the next
2506 b hcmNextMap32 ; Check it out...
2514 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2515 ld r4,ppLink(r3) ; Get the pointer to the first mapping
2516 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2517 addi r3,r3,physEntrySize ; Next phys_entry
2520 andc. r4,r4,r0 ; Clean and test mapping address
2521 beq hcmNoMap64 ; Did not find one...
2523 lwz r0,mpPte(r4) ; Grab the offset to the PTE
2524 rlwinm r0,r0,0,~mpHValid ; Clear out valid bit
2525 stw r0,mpPte(r4) ; Get the quick pointer again
2527 ld r4,mpAlias(r4) ; Chain on to the next
2528 li r0,ppLFAmask ; Get mask to clean up mapping pointer
2529 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2530 b hcmNextMap64 ; Check it out...
2538 mtlr r10 ; Restore the return
2539 mtcr r9 ; Restore the condition register
2540 bt++ pf64Bitb,hcmDone64 ; 64-bit version
2542 mtmsr r11 ; Restore translation/mode/etc.
2547 mtmsrd r11 ; Restore translation/mode/etc.
2554 * unsigned int hw_walk_phys(pp, preop, op, postop, parm, opmod)
2555 * walks all mapping for a physical page and performs
2556 * specified operations on each.
2558 * pp is unlocked physent
2559 * preop is operation to perform on physent before walk. This would be
2560 * used to set cache attribute or protection
2561 * op is the operation to perform on each mapping during walk
2562 * postop is operation to perform in the phsyent after walk. this would be
2563 * used to set or reset the RC bits.
2564 * opmod modifies the action taken on any connected PTEs visited during
2567 * We return the RC bits from before postop is run.
2569 * Note that this is designed to be called from 32-bit mode with a stack.
2571 * We disable translation and all interruptions here. This keeps is
2572 * from having to worry about a deadlock due to having anything locked
2573 * and needing it to process a fault.
2575 * We lock the physent, execute preop, and then walk each mapping in turn.
2576 * If there is a PTE, it is invalidated and the RC merged into the physent.
2577 * Then we call the op function.
2578 * Then we revalidate the PTE.
2579 * Once all all mappings are finished, we save the physent RC and call the
2580 * postop routine. Then we unlock the physent and return the RC.
2586 .globl EXT(hw_walk_phys)
2589 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2590 mflr r0 ; Save the link register
2591 stw r24,FM_ARG0+0x00(r1) ; Save a register
2592 stw r25,FM_ARG0+0x04(r1) ; Save a register
2593 stw r26,FM_ARG0+0x08(r1) ; Save a register
2594 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2595 mr r24,r8 ; Save the parm
2596 mr r25,r7 ; Save the parm
2597 stw r28,FM_ARG0+0x10(r1) ; Save a register
2598 stw r29,FM_ARG0+0x14(r1) ; Save a register
2599 stw r30,FM_ARG0+0x18(r1) ; Save a register
2600 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2601 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2603 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2605 mfsprg r26,0 ; (INSTRUMENTATION)
2606 lwz r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2607 addi r27,r27,1 ; (INSTRUMENTATION)
2608 stw r27,hwWalkPhys(r26) ; (INSTRUMENTATION)
2609 la r26,hwWalkFull(r26) ; (INSTRUMENTATION)
2610 slwi r12,r24,2 ; (INSTRUMENTATION)
2611 lwzx r27,r26,r12 ; (INSTRUMENTATION)
2612 addi r27,r27,1 ; (INSTRUMENTATION)
2613 stwx r27,r26,r12 ; (INSTRUMENTATION)
2615 mr r26,r11 ; Save the old MSR
2616 lis r27,hi16(hwpOpBase) ; Get high order of op base
2617 slwi r4,r4,7 ; Convert preop to displacement
2618 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2619 slwi r5,r5,7 ; Convert op to displacement
2620 add r12,r4,r27 ; Point to the preop routine
2621 slwi r28,r6,7 ; Convert postop to displacement
2622 mtctr r12 ; Set preop routine
2623 add r28,r28,r27 ; Get the address of the postop routine
2624 add r27,r5,r27 ; Get the address of the op routine
2626 bl mapPhysLock ; Lock the physent
2628 mr r29,r3 ; Save the physent address
2630 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2632 bctrl ; Call preop routine
2633 bne- hwpEarly32 ; preop says to bail now...
2635 cmplwi r24,hwpMergePTE ; Classify operation modifier
2636 mtctr r27 ; Set up the op function address
2637 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
2638 blt hwpSrc32 ; Do TLB invalidate/purge/merge/reload for each mapping
2639 beq hwpMSrc32 ; Do TLB merge for each mapping
2641 hwpQSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2642 beq hwpNone32 ; Did not find one...
2644 bctrl ; Call the op function
2646 bne- hwpEarly32 ; op says to bail now...
2647 lwz r31,mpAlias+4(r31) ; Chain on to the next
2648 b hwpQSrc32 ; Check it out...
2651 hwpMSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2652 beq hwpNone32 ; Did not find one...
2654 bl mapMergeRC32 ; Merge reference and change into mapping and physent
2655 bctrl ; Call the op function
2657 bne- hwpEarly32 ; op says to bail now...
2658 lwz r31,mpAlias+4(r31) ; Chain on to the next
2659 b hwpMSrc32 ; Check it out...
2662 hwpSrc32: rlwinm. r31,r31,0,~ppFlags ; Clean and test mapping address
2663 beq hwpNone32 ; Did not find one...
2666 ; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2667 ; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2668 ; If there is no PTE, PTE low is obtained from mapping
2670 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2672 bctrl ; Call the op function
2674 crmove cr1_eq,cr0_eq ; Save the return code
2676 mr. r3,r3 ; Was there a previously valid PTE?
2677 beq- hwpNxt32 ; Nope...
2679 stw r5,4(r3) ; Store second half of PTE
2680 eieio ; Make sure we do not reorder
2681 stw r4,0(r3) ; Revalidate the PTE
2683 eieio ; Make sure all updates come first
2684 stw r6,0(r7) ; Unlock the PCA
2686 hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2687 lwz r31,mpAlias+4(r31) ; Chain on to the next
2688 b hwpSrc32 ; Check it out...
2692 hwpNone32: mtctr r28 ; Get the post routine address
2694 lwz r30,ppLink+4(r29) ; Save the old RC
2695 mr r3,r29 ; Get the physent address
2696 bctrl ; Call post routine
2698 bl mapPhysUnlock ; Unlock the physent
2700 mtmsr r26 ; Restore translation/mode/etc.
2703 b hwpReturn ; Go restore registers and return...
2707 hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2708 mr r3,r29 ; Get the physent address
2709 bl mapPhysUnlock ; Unlock the physent
2711 mtmsr r26 ; Restore translation/mode/etc.
2714 b hwpReturn ; Go restore registers and return...
2718 hwp64: bctrl ; Call preop routine
2719 bne-- hwpEarly64 ; preop says to bail now...
2721 cmplwi r24,hwpMergePTE ; Classify operation modifier
2722 mtctr r27 ; Set up the op function address
2725 ld r31,ppLink(r3) ; Get the pointer to the first mapping
2726 rotrdi r24,r24,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
2727 blt hwpSrc64 ; Do TLB invalidate/purge/merge/reload for each mapping
2728 beq hwpMSrc64 ; Do TLB merge for each mapping
2730 hwpQSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2731 beq hwpNone64 ; Did not find one...
2733 bctrl ; Call the op function
2735 bne-- hwpEarly64 ; op says to bail now...
2736 ld r31,mpAlias(r31) ; Chain on to the next
2737 b hwpQSrc64 ; Check it out...
2740 hwpMSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2741 beq hwpNone64 ; Did not find one...
2743 bl mapMergeRC64 ; Merge reference and change into mapping and physent
2744 bctrl ; Call the op function
2746 bne-- hwpEarly64 ; op says to bail now...
2747 ld r31,mpAlias(r31) ; Chain on to the next
2748 b hwpMSrc64 ; Check it out...
2751 hwpSrc64: andc. r31,r31,r24 ; Clean and test mapping address
2752 beq hwpNone64 ; Did not find one...
2754 ; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2755 ; PTE low in R5. PTEG comes back locked if there is one
2757 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2759 bctrl ; Call the op function
2761 crmove cr1_eq,cr0_eq ; Save the return code
2763 mr. r3,r3 ; Was there a previously valid PTE?
2764 beq-- hwpNxt64 ; Nope...
2766 std r5,8(r3) ; Save bottom of PTE
2767 eieio ; Make sure we do not reorder
2768 std r4,0(r3) ; Revalidate the PTE
2770 eieio ; Make sure all updates come first
2771 stw r6,0(r7) ; Unlock the PCA
2773 hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2774 ld r31,mpAlias(r31) ; Chain on to the next
2775 b hwpSrc64 ; Check it out...
2779 hwpNone64: mtctr r28 ; Get the post routine address
2781 lwz r30,ppLink+4(r29) ; Save the old RC
2782 mr r3,r29 ; Get the physent address
2783 bctrl ; Call post routine
2785 bl mapPhysUnlock ; Unlock the physent
2787 mtmsrd r26 ; Restore translation/mode/etc.
2789 b hwpReturn ; Go restore registers and return...
2793 hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2794 mr r3,r29 ; Get the physent address
2795 bl mapPhysUnlock ; Unlock the physent
2797 mtmsrd r26 ; Restore translation/mode/etc.
2800 hwpReturn: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2801 lwz r24,FM_ARG0+0x00(r1) ; Restore a register
2802 lwz r25,FM_ARG0+0x04(r1) ; Restore a register
2803 lwz r26,FM_ARG0+0x08(r1) ; Restore a register
2804 mr r3,r30 ; Pass back the RC
2805 lwz r27,FM_ARG0+0x0C(r1) ; Restore a register
2806 lwz r28,FM_ARG0+0x10(r1) ; Restore a register
2807 mtlr r0 ; Restore the return
2808 lwz r29,FM_ARG0+0x14(r1) ; Restore a register
2809 lwz r30,FM_ARG0+0x18(r1) ; Restore a register
2810 lwz r31,FM_ARG0+0x1C(r1) ; Restore a register
2811 lwz r1,0(r1) ; Pop the stack
2816 ; The preop/op/postop function table.
2817 ; Each function must be 64-byte aligned and be no more than
2818 ; 16 instructions. If more than 16, we must fix address calculations
2819 ; at the start of hwpOpBase
2821 ; The routine must set CR0_EQ in order to continue scan.
2822 ; If CR0_EQ is not set, an early return from the function is made.
2829 ; Function 0 - No operation
2831 hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2832 blr ; Just return...
2836 ; This is the continuation of function 4 - Set attributes in mapping
2838 ; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2839 ; NOTE: Do we have to deal with i-cache here?
2841 hwpSAM: li r11,4096 ; Get page size
2843 hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2844 dcbf r11,r5 ; Flush the line in the data cache
2845 bgt++ hwpSAMinvd ; Go do the rest of it...
2847 sync ; Make sure it is done
2849 li r11,4096 ; Get page size
2851 hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2852 icbi r11,r5 ; Flush the line in the icache
2853 bgt++ hwpSAMinvi ; Go do the rest of it...
2855 sync ; Make sure it is done
2857 cmpw r0,r0 ; Make sure we return CR0_EQ
2861 ; Function 1 - Set protection in physent (obsolete)
2863 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2865 hwpSPrtPhy: cmplw r0,r0 ; Make sure we return CR0_EQ
2869 ; Function 2 - Set protection in mapping
2871 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
2873 hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2874 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2875 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2876 li r0,lo16(mpN|mpPP) ; Get no-execute and protection bits
2877 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2878 rlwinm r2,r25,0,mpNb-32,mpPPe-32 ; Isolate new no-execute and protection bits
2879 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2880 andc r5,r5,r0 ; Clear the old no-execute and prot bits
2881 or r5,r5,r2 ; Move in the new no-execute and prot bits
2882 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2883 cmpw r0,r0 ; Make sure we return CR0_EQ
2884 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2887 ; Function 3 - Set attributes in physent
2889 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
2891 hwpSAtrPhy: li r5,ppLink ; Get offset for flag part of physent
2893 hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
2894 rlwimi r4,r25,0,ppIb,ppGb ; Stick in the new attributes
2895 stwcx. r4,r5,r29 ; Try to stuff it
2896 bne-- hwpSAtrPhX ; Try again...
2897 ; Note: CR0_EQ is set because of stwcx.
2900 ; Function 4 - Set attributes in mapping
2902 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2904 hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2905 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2906 li r2,mpM ; Force on coherent
2907 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2908 li r0,lo16(mpWIMG) ; Get wimg mask
2909 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2910 rlwimi r2,r25,32-(mpIb-32-ppIb),mpIb-32,mpIb-32
2911 ; Copy in the cache inhibited bit
2912 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2913 andc r5,r5,r0 ; Clear the old wimg
2914 rlwimi r2,r25,32-(mpGb-32-ppGb),mpGb-32,mpGb-32
2915 ; Copy in the guarded bit
2916 mfsprg r9,2 ; Feature flags
2917 or r5,r5,r2 ; Move in the new wimg
2918 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2919 lwz r2,mpPAddr(r31) ; Get the physical address
2920 li r0,0xFFF ; Start a mask
2921 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2922 rlwinm r5,r0,0,1,0 ; Copy to top half
2923 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2924 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2925 and r5,r5,r2 ; Clean stuff in top 32 bits
2926 andc r2,r2,r0 ; Clean bottom too
2927 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2928 b hwpSAM ; Join common
2930 ; NOTE: we moved the remainder of the code out of here because it
2931 ; did not fit in the 128 bytes allotted. It got stuck into the free space
2932 ; at the end of the no-op function.
2937 ; Function 5 - Clear reference in physent
2939 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
2941 hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2943 hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
2944 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
2945 stwcx. r4,r5,r29 ; Try to stuff it
2946 bne-- hwpCRefPhX ; Try again...
2947 ; Note: CR0_EQ is set because of stwcx.
2951 ; Function 6 - Clear reference in mapping
2953 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
2955 hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2956 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2957 andc r5,r5,r0 ; Clear in PTE copy
2958 andc r8,r8,r0 ; and in the mapping
2959 cmpw r0,r0 ; Make sure we return CR0_EQ
2960 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2964 ; Function 7 - Clear change in physent
2966 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
2968 hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2970 hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2971 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2972 stwcx. r4,r5,r29 ; Try to stuff it
2973 bne-- hwpCCngPhX ; Try again...
2974 ; Note: CR0_EQ is set because of stwcx.
2978 ; Function 8 - Clear change in mapping
2980 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2982 hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2983 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2984 andc r5,r5,r0 ; Clear in PTE copy
2985 andc r8,r8,r0 ; and in the mapping
2986 cmpw r0,r0 ; Make sure we return CR0_EQ
2987 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2991 ; Function 9 - Set reference in physent
2993 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
2995 hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2997 hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
2998 ori r4,r4,lo16(ppR) ; Set the reference
2999 stwcx. r4,r5,r29 ; Try to stuff it
3000 bne-- hwpSRefPhX ; Try again...
3001 ; Note: CR0_EQ is set because of stwcx.
3005 ; Function 10 - Set reference in mapping
3007 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
3009 hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3010 ori r8,r8,lo16(mpR) ; Set reference in mapping
3011 cmpw r0,r0 ; Make sure we return CR0_EQ
3012 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3015 ; Function 11 - Set change in physent
3017 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
3019 hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
3021 hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
3022 ori r4,r4,lo16(ppC) ; Set the change bit
3023 stwcx. r4,r5,r29 ; Try to stuff it
3024 bne-- hwpSCngPhX ; Try again...
3025 ; Note: CR0_EQ is set because of stwcx.
3028 ; Function 12 - Set change in mapping
3030 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
3032 hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3033 ori r8,r8,lo16(mpC) ; Set chage in mapping
3034 cmpw r0,r0 ; Make sure we return CR0_EQ
3035 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3038 ; Function 13 - Test reference in physent
3040 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
3042 hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3043 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
3044 blr ; Return (CR0_EQ set to continue if reference is off)...
3047 ; Function 14 - Test reference in mapping
3049 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
3051 hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
3052 blr ; Return (CR0_EQ set to continue if reference is off)...
3055 ; Function 15 - Test change in physent
3057 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
3059 hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
3060 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
3061 blr ; Return (CR0_EQ set to continue if change is off)...
3064 ; Function 16 - Test change in mapping
3066 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
3068 hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
3069 blr ; Return (CR0_EQ set to continue if change is off)...
3072 ; Function 17 - Test reference and change in physent
3074 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
3077 lwz r0,ppLink+4(r29) ; Get the flags from physent
3078 rlwinm r0,r0,0,ppRb-32,ppCb-32 ; Isolate reference and change bits
3079 cmplwi r0,lo16(ppR|ppC) ; cr0_eq <- ((R == 1) && (C == 1))
3080 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3081 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3084 ; Function 18 - Test reference and change in mapping
3086 .set .,hwpOpBase+(18*128) ; Generate error if previous function too long
3088 rlwinm r0,r5,0,mpRb-32,mpCb-32 ; Isolate reference and change bits from mapping
3089 cmplwi r0,lo16(mpR|mpC) ; cr0_eq <- ((R == 1) && (C == 1))
3090 crnot cr0_eq,cr0_eq ; cr0_eq <- ((R == 0) || (C == 0))
3091 blr ; Return (CR0_EQ set to continue if either R or C is off)...
3094 ; Function 19 - Clear reference and change in physent
3096 .set .,hwpOpBase+(19*128) ; Generate error if previous function too long
3098 li r5,ppLink+4 ; Get offset for flag part of physent
3101 lwarx r4,r5,r29 ; Get the old flags
3102 andc r4,r4,r25 ; Clear R and C as specified by mask
3103 stwcx. r4,r5,r29 ; Try to stuff it
3104 bne-- hwpCRefCngPhX ; Try again...
3105 ; Note: CR0_EQ is set because of stwcx.
3109 ; Function 20 - Clear reference and change in mapping
3111 .set .,hwpOpBase+(20*128) ; Generate error if previous function too long
3113 srwi r0,r25,(ppRb - mpRb) ; Align reference/change clear mask (phys->map)
3114 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
3115 andc r5,r5,r0 ; Clear in PTE copy
3116 andc r8,r8,r0 ; and in the mapping
3117 cmpw r0,r0 ; Make sure we return CR0_EQ
3118 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
3122 .set .,hwpOpBase+(21*128) ; Generate error if previous function too long
3125 ; unsigned int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
3128 ; mapRtOK - if all is ok
3129 ; mapRtBadLk - if mapping lock fails
3130 ; mapRtPerm - if mapping is permanent
3131 ; mapRtNotFnd - if mapping is not found
3132 ; mapRtBlock - if mapping is a block
3135 .globl EXT(hw_protect)
3138 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3139 mflr r0 ; Save the link register
3140 stw r24,FM_ARG0+0x00(r1) ; Save a register
3141 stw r25,FM_ARG0+0x04(r1) ; Save a register
3142 mr r25,r7 ; Remember address of next va
3143 stw r26,FM_ARG0+0x08(r1) ; Save a register
3144 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3145 stw r28,FM_ARG0+0x10(r1) ; Save a register
3146 mr r24,r6 ; Save the new protection flags
3147 stw r29,FM_ARG0+0x14(r1) ; Save a register
3148 stw r30,FM_ARG0+0x18(r1) ; Save a register
3149 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3150 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3153 lwz r11,pmapFlags(r3) ; Get pmaps flags
3154 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3155 bne hpPanic ; Call not valid for guest shadow assist pmap
3158 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3159 lwz r7,pmapvr+4(r3) ; Get the second part
3162 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3164 mr r27,r11 ; Remember the old MSR
3165 mr r26,r12 ; Remember the feature bits
3167 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3169 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
3171 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3173 hpSF1: mr r29,r4 ; Save top half of vaddr
3174 mr r30,r5 ; Save the bottom half
3176 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3177 bl sxlkShared ; Go get a shared lock on the mapping lists
3178 mr. r3,r3 ; Did we get the lock?
3179 bne-- hpBadLock ; Nope...
3181 mr r3,r28 ; get the pmap address
3182 mr r4,r29 ; Get bits 0:31 to look for
3183 mr r5,r30 ; Get bits 32:64
3185 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
3187 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3188 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3189 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3190 cror cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3191 mr. r31,r3 ; Save the mapping if we found it
3192 mr r29,r4 ; Save next va high half
3193 mr r30,r5 ; Save next va low half
3195 beq-- hpNotFound ; Not found...
3197 bf-- cr1_eq,hpNotAllowed ; Something special is happening...
3199 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
3201 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3203 rlwimi r5,r24,0,mpPPb-32,mpPPe-32 ; Stick in the new pp (note that we ignore no-execute for 32-bit)
3204 mr. r3,r3 ; Was there a previously valid PTE?
3206 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3208 beq-- hpNoOld32 ; Nope...
3210 stw r5,4(r3) ; Store second half of PTE
3211 eieio ; Make sure we do not reorder
3212 stw r4,0(r3) ; Revalidate the PTE
3214 eieio ; Make sure all updates come first
3215 stw r6,0(r7) ; Unlock PCA
3217 hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3218 bl sxlkUnlock ; Unlock the search list
3220 li r3,mapRtOK ; Set normal return
3221 b hpR32 ; Join common...
3226 hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3228 rldimi r5,r24,0,mpNb ; Stick in the new no-exectue and pp bits
3229 mr. r3,r3 ; Was there a previously valid PTE?
3231 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
3233 beq-- hpNoOld64 ; Nope...
3235 std r5,8(r3) ; Store second half of PTE
3236 eieio ; Make sure we do not reorder
3237 std r4,0(r3) ; Revalidate the PTE
3239 eieio ; Make sure all updates come first
3240 stw r6,0(r7) ; Unlock PCA
3242 hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3243 bl sxlkUnlock ; Unlock the search list
3245 li r3,mapRtOK ; Set normal return
3246 b hpR64 ; Join common...
3250 hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
3252 hpR32: mtmsr r27 ; Restore enables/translation/etc.
3254 b hpReturnC ; Join common...
3256 hpR64: mtmsrd r27 ; Restore enables/translation/etc.
3259 hpReturnC: stw r29,0(r25) ; Save the top of the next va
3260 stw r30,4(r25) ; Save the bottom of the next va
3261 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3262 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3263 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3264 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3265 mtlr r0 ; Restore the return
3266 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3267 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3268 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3269 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3270 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3271 lwz r1,0(r1) ; Pop the stack
3276 hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
3277 b hpReturn ; Leave....
3279 hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3280 bl sxlkUnlock ; Unlock the search list
3282 li r3,mapRtNotFnd ; Set that we did not find the requested page
3283 b hpReturn ; Leave....
3286 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
3287 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3288 bne-- hpNotFound ; Yeah...
3289 bl sxlkUnlock ; Unlock the search list
3291 li r3,mapRtBlock ; Assume it was a block
3292 rlwinm r0,r7,0,mpType ; Isolate mapping type
3293 cmplwi r0,mpBlock ; Is this a block mapping?
3294 beq++ hpReturn ; Yes, leave...
3296 li r3,mapRtPerm ; Set that we hit a permanent page
3297 b hpReturn ; Leave....
3299 hpPanic: lis r0,hi16(Choke) ; System abend
3300 ori r0,r0,lo16(Choke) ; System abend
3301 li r3,failMapping ; Show that we failed some kind of mapping thing
3306 ; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
3308 ; Returns following code ORed with RC from mapping
3309 ; mapRtOK - if all is ok
3310 ; mapRtBadLk - if mapping lock fails
3311 ; mapRtNotFnd - if mapping is not found
3314 .globl EXT(hw_test_rc)
3317 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
3318 mflr r0 ; Save the link register
3319 stw r24,FM_ARG0+0x00(r1) ; Save a register
3320 stw r25,FM_ARG0+0x04(r1) ; Save a register
3321 stw r26,FM_ARG0+0x08(r1) ; Save a register
3322 stw r27,FM_ARG0+0x0C(r1) ; Save a register
3323 stw r28,FM_ARG0+0x10(r1) ; Save a register
3324 mr r24,r6 ; Save the reset request
3325 stw r29,FM_ARG0+0x14(r1) ; Save a register
3326 stw r30,FM_ARG0+0x18(r1) ; Save a register
3327 stw r31,FM_ARG0+0x1C(r1) ; Save a register
3328 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3331 lwz r11,pmapFlags(r3) ; Get pmaps flags
3332 rlwinm. r11,r11,0,pmapVMgsaa ; Is guest shadow assist active?
3333 bne htrPanic ; Call not valid for guest shadow assist pmap
3336 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
3337 lwz r7,pmapvr+4(r3) ; Get the second part
3340 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
3342 mr r27,r11 ; Remember the old MSR
3343 mr r26,r12 ; Remember the feature bits
3345 xor r28,r3,r7 ; Change the common 32- and 64-bit half
3347 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
3349 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
3351 htrSF1: mr r29,r4 ; Save top half of vaddr
3352 mr r30,r5 ; Save the bottom half
3354 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3355 bl sxlkShared ; Go get a shared lock on the mapping lists
3356 mr. r3,r3 ; Did we get the lock?
3358 bne-- htrBadLock ; Nope...
3360 mr r3,r28 ; get the pmap address
3361 mr r4,r29 ; Get bits 0:31 to look for
3362 mr r5,r30 ; Get bits 32:64
3364 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
3366 rlwinm. r0,r7,0,mpType ; Is this a normal mapping?
3367 crmove cr1_eq,cr0_eq ; cr1_eq <- this is a normal mapping
3368 andi. r0,r7,mpPerm|mpRIP ; Is it permanent or being removed?
3369 crand cr1_eq,cr0_eq,cr1_eq ; cr1_eq <- normal mapping and not permanent and not being removed
3370 mr. r31,r3 ; Save the mapping if we found it
3371 crandc cr1_eq,cr1_eq,cr0_eq ; cr1_eq <- found & normal & not permanent & not being removed
3373 bf-- cr1_eq,htrNotFound ; Not found, something special, or being removed...
3375 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
3377 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
3379 cmplwi cr1,r24,0 ; Do we want to clear RC?
3380 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3381 mr. r3,r3 ; Was there a previously valid PTE?
3382 li r0,lo16(mpR|mpC) ; Get bits to clear
3384 and r25,r5,r0 ; Save the RC bits
3385 beq++ cr1,htrNoClr32 ; Nope...
3387 andc r12,r12,r0 ; Clear mapping copy of RC
3388 andc r5,r5,r0 ; Clear PTE copy of RC
3389 sth r12,mpVAddr+6(r31) ; Set the new RC
3391 htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
3393 sth r5,6(r3) ; Store updated RC
3394 eieio ; Make sure we do not reorder
3395 stw r4,0(r3) ; Revalidate the PTE
3397 eieio ; Make sure all updates come first
3398 stw r6,0(r7) ; Unlock PCA
3400 htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3401 bl sxlkUnlock ; Unlock the search list
3402 li r3,mapRtOK ; Set normal return
3403 b htrR32 ; Join common...
3408 htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
3410 cmplwi cr1,r24,0 ; Do we want to clear RC?
3411 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
3412 mr. r3,r3 ; Was there a previously valid PTE?
3413 li r0,lo16(mpR|mpC) ; Get bits to clear
3415 and r25,r5,r0 ; Save the RC bits
3416 beq++ cr1,htrNoClr64 ; Nope...
3418 andc r12,r12,r0 ; Clear mapping copy of RC
3419 andc r5,r5,r0 ; Clear PTE copy of RC
3420 sth r12,mpVAddr+6(r31) ; Set the new RC
3422 htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
3424 sth r5,14(r3) ; Store updated RC
3425 eieio ; Make sure we do not reorder
3426 std r4,0(r3) ; Revalidate the PTE
3428 eieio ; Make sure all updates come first
3429 stw r6,0(r7) ; Unlock PCA
3431 htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3432 bl sxlkUnlock ; Unlock the search list
3433 li r3,mapRtOK ; Set normal return
3434 b htrR64 ; Join common...
3438 htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
3440 htrR32: mtmsr r27 ; Restore enables/translation/etc.
3442 b htrReturnC ; Join common...
3444 htrR64: mtmsrd r27 ; Restore enables/translation/etc.
3447 htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
3448 or r3,r3,r25 ; Send the RC bits back
3449 lwz r24,FM_ARG0+0x00(r1) ; Save a register
3450 lwz r25,FM_ARG0+0x04(r1) ; Save a register
3451 lwz r26,FM_ARG0+0x08(r1) ; Save a register
3452 mtlr r0 ; Restore the return
3453 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
3454 lwz r28,FM_ARG0+0x10(r1) ; Save a register
3455 lwz r29,FM_ARG0+0x14(r1) ; Save a register
3456 lwz r30,FM_ARG0+0x18(r1) ; Save a register
3457 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
3458 lwz r1,0(r1) ; Pop the stack
3463 htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
3464 b htrReturn ; Leave....
3467 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3468 bl sxlkUnlock ; Unlock the search list
3470 li r3,mapRtNotFnd ; Set that we did not find the requested page
3471 b htrReturn ; Leave....
3473 htrPanic: lis r0,hi16(Choke) ; System abend
3474 ori r0,r0,lo16(Choke) ; System abend
3475 li r3,failMapping ; Show that we failed some kind of mapping thing
3481 ; mapFindLockPN - find and lock physent for a given page number
3486 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3487 mr r2,r3 ; Save our target
3488 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
3490 mapFLPNitr: lwz r3,mrPhysTab(r9) ; Get the actual table address
3491 lwz r5,mrStart(r9) ; Get start of table entry
3492 lwz r0,mrEnd(r9) ; Get end of table entry
3493 addi r9,r9,mrSize ; Point to the next slot
3494 cmplwi cr7,r3,0 ; Are we at the end of the table?
3495 cmplw r2,r5 ; See if we are in this table
3496 cmplw cr1,r2,r0 ; Check end also
3497 sub r4,r2,r5 ; Calculate index to physical entry
3498 beq-- cr7,mapFLPNmiss ; Leave if we did not find an entry...
3499 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
3500 slwi r4,r4,3 ; Get offset to physical entry
3502 blt-- mapFLPNitr ; Did not find it...
3504 add r3,r3,r4 ; Point right to the slot
3505 b mapPhysLock ; Join common lock code
3508 li r3,0 ; Show that we did not find it
3513 ; mapPhysFindLock - find physent list and lock it
3514 ; R31 points to mapping
3519 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
3520 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
3521 rlwinm r4,r4,2,24,29 ; Mask index bits and convert to byte offset
3522 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
3523 add r3,r3,r4 ; Point to table entry
3524 lwz r5,mpPAddr(r31) ; Get physical page number
3525 lwz r7,mrStart(r3) ; Get the start of range
3526 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
3527 sub r6,r5,r7 ; Get index to physent
3528 rlwinm r6,r6,3,0,28 ; Get offset to physent
3529 add r3,r3,r6 ; Point right to the physent
3530 b mapPhysLock ; Join in the lock...
3533 ; mapPhysLock - lock a physent list
3534 ; R3 contains list header
3539 li r2,lgKillResv ; Get a spot to kill reservation
3540 stwcx. r2,0,r2 ; Kill it...
3543 lwz r2,ppLink(r3) ; Get physent chain header
3544 rlwinm. r2,r2,0,0,0 ; Is lock clear?
3545 bne-- mapPhysLockT ; Nope, still locked...
3548 lwarx r2,0,r3 ; Get the lock
3549 rlwinm. r0,r2,0,0,0 ; Is it locked?
3550 oris r0,r2,0x8000 ; Set the lock bit
3551 bne-- mapPhysLockS ; It is locked, spin on it...
3552 stwcx. r0,0,r3 ; Try to stuff it back...
3553 bne-- mapPhysLock ; Collision, try again...
3554 isync ; Clear any speculations
3559 ; mapPhysUnlock - unlock a physent list
3560 ; R3 contains list header
3565 lwz r0,ppLink(r3) ; Get physent chain header
3566 rlwinm r0,r0,0,1,31 ; Clear the lock bit
3567 eieio ; Make sure unlock comes last
3568 stw r0,ppLink(r3) ; Unlock the list
3572 ; mapPhysMerge - merge the RC bits into the master copy
3573 ; R3 points to the physent
3574 ; R4 contains the RC bits
3576 ; Note: we just return if RC is 0
3581 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
3582 la r5,ppLink+4(r3) ; Point to the RC field
3583 beqlr-- ; Leave if RC is 0...
3586 lwarx r6,0,r5 ; Get the RC part
3587 or r6,r6,r4 ; Merge in the RC
3588 stwcx. r6,0,r5 ; Try to stuff it back...
3589 bne-- mapPhysMergeT ; Collision, try again...
3593 ; Sets the physent link pointer and preserves all flags
3594 ; The list is locked
3595 ; R3 points to physent
3596 ; R4 has link to set
3602 la r5,ppLink+4(r3) ; Point to the link word
3605 lwarx r2,0,r5 ; Get the link and flags
3606 rlwimi r4,r2,0,ppFlags ; Insert the flags
3607 stwcx. r4,0,r5 ; Stick them back
3608 bne-- mapPhyCSetR ; Someone else did something, try again...
3614 li r0,ppLFAmask ; Get mask to clean up mapping pointer
3615 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
3618 ldarx r2,0,r3 ; Get the link and flags
3619 and r5,r2,r0 ; Isolate the flags
3620 or r6,r4,r5 ; Add them to the link
3621 stdcx. r6,0,r3 ; Stick them back
3622 bne-- mapPhyCSet64x ; Someone else did something, try again...
3626 ; mapBumpBusy - increment the busy count on a mapping
3627 ; R3 points to mapping
3633 lwarx r4,0,r3 ; Get mpBusy
3634 addis r4,r4,0x0100 ; Bump the busy count
3635 stwcx. r4,0,r3 ; Save it back
3636 bne-- mapBumpBusy ; This did not work, try again...
3640 ; mapDropBusy - increment the busy count on a mapping
3641 ; R3 points to mapping
3644 .globl EXT(mapping_drop_busy)
3647 LEXT(mapping_drop_busy)
3649 lwarx r4,0,r3 ; Get mpBusy
3650 addis r4,r4,0xFF00 ; Drop the busy count
3651 stwcx. r4,0,r3 ; Save it back
3652 bne-- mapDropBusy ; This did not work, try again...
3656 ; mapDrainBusy - drain the busy count on a mapping
3657 ; R3 points to mapping
3658 ; Note: we already have a busy for ourselves. Only one
3659 ; busy per processor is allowed, so we just spin here
3660 ; waiting for the count to drop to 1.
3661 ; Also, the mapping can not be on any lists when we do this
3662 ; so all we are doing is waiting until it can be released.
3668 lwz r4,mpFlags(r3) ; Get mpBusy
3669 rlwinm r4,r4,8,24,31 ; Clean it up
3670 cmplwi r4,1 ; Is is just our busy?
3671 beqlr++ ; Yeah, it is clear...
3672 b mapDrainBusy ; Try again...
3677 ; handleDSeg - handle a data segment fault
3678 ; handleISeg - handle an instruction segment fault
3680 ; All that we do here is to map these to DSI or ISI and insure
3681 ; that the hash bit is not set. This forces the fault code
3682 ; to also handle the missing segment.
3684 ; At entry R2 contains per_proc, R13 contains savarea pointer,
3685 ; and R11 is the exception code.
3689 .globl EXT(handleDSeg)
3693 li r11,T_DATA_ACCESS ; Change fault to DSI
3694 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3695 b EXT(handlePF) ; Join common...
3698 .globl EXT(handleISeg)
3702 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3703 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3704 b EXT(handlePF) ; Join common...
3708 * handlePF - handle a page fault interruption
3710 * At entry R2 contains per_proc, R13 contains savarea pointer,
3711 * and R11 is the exception code.
3713 * This first part does a quick check to see if we can handle the fault.
3714 * We canot handle any kind of protection exceptions here, so we pass
3715 * them up to the next level.
3717 * NOTE: In order for a page-fault redrive to work, the translation miss
3718 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3719 * before we come here.
3723 .globl EXT(handlePF)
3727 mfsprg r12,2 ; Get feature flags
3728 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3729 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3730 mtcrf 0x02,r12 ; move pf64Bit to cr6
3731 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3732 lwz r18,SAVflags(r13) ; Get the flags
3734 beq-- gotIfetch ; We have an IFETCH here...
3736 lwz r27,savedsisr(r13) ; Get the DSISR
3737 lwz r29,savedar(r13) ; Get the first half of the DAR
3738 lwz r30,savedar+4(r13) ; And second half
3740 b ckIfProt ; Go check if this is a protection fault...
3742 gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3743 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3744 lwz r30,savesrr0+4(r13) ; And second half
3745 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3747 ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3748 li r20,64 ; Set a limit of 64 nests for sanity check
3749 bne-- hpfExit ; Yes... (probably not though)
3752 ; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3753 ; should be loading the user pmap here.
3756 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3757 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3758 mr r19,r2 ; Remember the per_proc
3759 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3760 mr r23,r30 ; Save the low part of faulting address
3761 beq-- hpfInKern ; Skip if we are in the kernel
3762 la r8,ppUserPmap(r19) ; Point to the current user pmap
3764 hpfInKern: mr r22,r29 ; Save the high part of faulting address
3766 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3769 ; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3770 ; predefined value that corresponds to no address space. When we see that value
3771 ; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3772 ; cause the proper SR to be loaded.
3775 lwz r28,4(r8) ; Pick up the pmap
3776 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3777 mr r25,r28 ; Save the original pmap (in case we nest)
3778 lwz r0,pmapFlags(r28) ; Get pmap's flags
3779 bne hpfGVtest ; Segs are not ours if so...
3780 mfsrin r4,r30 ; Get the SR that was used for translation
3781 cmplwi r4,invalSpace ; Is this a simulated segment fault?
3782 bne++ hpfGVtest ; No...
3784 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
3785 b hpfGVtest ; Join on up...
3789 nop ; Push hpfNest to a 32-byte boundary
3790 nop ; Push hpfNest to a 32-byte boundary
3791 nop ; Push hpfNest to a 32-byte boundary
3793 hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3794 mr r25,r28 ; Save the original pmap (in case we nest)
3795 lwz r0,pmapFlags(r28) ; Get pmap's flags
3797 hpfGVtest: rlwinm. r0,r0,0,pmapVMgsaa ; Using guest shadow mapping assist?
3798 bne hpfGVxlate ; Yup, do accelerated shadow stuff
3801 ; This is where we loop descending nested pmaps
3804 hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3805 addi r20,r20,-1 ; Count nest try
3806 bl sxlkShared ; Go get a shared lock on the mapping lists
3807 mr. r3,r3 ; Did we get the lock?
3808 bne-- hpfBadLock ; Nope...
3810 mr r3,r28 ; Get the pmap pointer
3811 mr r4,r22 ; Get top of faulting vaddr
3812 mr r5,r23 ; Get bottom of faulting vaddr
3813 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3815 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3816 mr. r31,r3 ; Save the mapping if we found it
3817 cmplwi cr1,r0,0 ; Check for removal
3818 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3820 bt-- cr0_eq,hpfNotFound ; Not found or removing...
3822 rlwinm r0,r7,0,mpType ; Isolate mapping type
3823 cmplwi r0,mpNest ; Are we again nested?
3824 cmplwi cr1,r0,mpLinkage ; Are we a linkage type?
3825 cror cr0_eq,cr1_eq,cr0_eq ; cr0_eq <- nested or linkage type?
3826 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3828 lhz r21,mpSpace(r31) ; Get the space
3830 bne++ hpfFoundIt ; No, we found our guy...
3833 #if pmapTransSize != 12
3834 #error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3836 cmplwi r0,mpLinkage ; Linkage mapping?
3837 cmplwi cr1,r20,0 ; Too many nestings?
3838 beq-- hpfSpclNest ; Do we need to do special handling?
3840 hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3841 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3842 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3843 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3844 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3845 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3846 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3847 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3848 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3849 slwi r11,r21,3 ; Multiply space by 8
3850 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3851 addc r23,r23,r9 ; Relocate bottom half of vaddr
3852 lwz r10,0(r10) ; Get the actual translation map
3853 slwi r12,r21,2 ; Multiply space by 4
3854 add r10,r10,r11 ; Add in the higher part of the index
3855 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3856 adde r22,r22,r8 ; Relocate the top half of the vaddr
3857 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3858 bl sxlkUnlock ; Unlock the search list
3860 bt++ pf64Bitb,hpfGetPmap64 ; Separate handling for 64-bit machines
3861 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3862 cmplwi r28,0 ; Is the pmap paddr valid?
3863 bne+ hpfNest ; Nest into new pmap...
3864 b hpfBadPmap ; Handle bad pmap
3867 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3868 cmpldi r28,0 ; Is the pmap paddr valid?
3869 bne++ hpfNest ; Nest into new pmap...
3870 b hpfBadPmap ; Handle bad pmap
3874 ; Error condition. We only allow 64 nestings. This keeps us from having to
3875 ; check for recusive nests when we install them.
3881 lwz r20,savedsisr(r13) ; Get the DSISR
3882 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3883 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3884 ori r20,r20,1 ; Indicate that there was a nesting problem
3885 stw r20,savedsisr(r13) ; Stash it
3886 lwz r11,saveexception(r13) ; Restore the exception code
3887 b EXT(PFSExit) ; Yes... (probably not though)
3890 ; Error condition - lock failed - this is fatal
3896 lis r0,hi16(Choke) ; System abend
3897 ori r0,r0,lo16(Choke) ; System abend
3898 li r3,failMapping ; Show mapping failure
3902 ; Error condition - space id selected an invalid pmap - fatal
3908 lis r0,hi16(Choke) ; System abend
3909 ori r0,r0,lo16(Choke) ; System abend
3910 li r3,failPmap ; Show invalid pmap
3914 ; Did not find any kind of mapping
3920 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3921 bl sxlkUnlock ; Unlock it
3922 lwz r11,saveexception(r13) ; Restore the exception code
3924 hpfExit: ; We need this because we can not do a relative branch
3925 b EXT(PFSExit) ; Yes... (probably not though)
3929 ; Here is where we handle special mappings. So far, the only use is to load a
3930 ; processor specific segment register for copy in/out handling.
3932 ; The only (so far implemented) special map is used for copyin/copyout.
3933 ; We keep a mapping of a "linkage" mapping in the per_proc.
3934 ; The linkage mapping is basically a nested pmap that is switched in
3935 ; as part of context switch. It relocates the appropriate user address
3936 ; space slice into the right place in the kernel.
3942 la r31,ppUMWmp(r19) ; Just point to the mapping
3943 oris r27,r27,hi16(dsiLinkage) ; Show that we had a linkage mapping here
3944 b hpfCSrch ; Go continue search...
3948 ; We have now found a mapping for the address we faulted on.
3952 ; Here we go about calculating what the VSID should be. We concatanate
3953 ; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3954 ; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3955 ; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3958 ; This is used both for segment handling and PTE handling
3963 #error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3966 ; Important non-volatile registers at this point ('home' means the final pmap/mapping found
3967 ; when a multi-level mapping has been successfully searched):
3968 ; r21: home space id number
3969 ; r22: relocated high-order 32 bits of vaddr
3970 ; r23: relocated low-order 32 bits of vaddr
3971 ; r25: pmap physical address
3973 ; r28: home pmap physical address
3974 ; r29: high-order 32 bits of faulting vaddr
3975 ; r30: low-order 32 bits of faulting vaddr
3976 ; r31: mapping's physical address
3980 hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
3981 hpfGVfound: rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
3982 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3983 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3984 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
3985 rlwinm r0,r27,0,dsiLinkageb,dsiLinkageb ; Isolate linkage mapping flag
3986 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
3987 cmplwi cr5,r0,0 ; Did we just do a special nesting?
3988 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
3989 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
3990 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
3991 xor r14,r14,r20 ; Calculate the top half of VSID
3992 xor r15,r15,r21 ; Calculate the bottom half of the VSID
3993 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
3994 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
3995 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
3996 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
3997 or r12,r12,r15 ; Add key into the bottom of VSID
3999 ; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
4001 bne++ hpfPteMiss ; Nope, normal PTE miss...
4004 ; Here is the only place that we make an entry in the pmap segment cache.
4006 ; Note that we do not make an entry in the segment cache for special
4007 ; nested mappings. This makes the copy in/out segment get refreshed
4008 ; when switching threads.
4010 ; The first thing that we do is to look up the ESID we are going to load
4011 ; into a segment in the pmap cache. If it is already there, this is
4012 ; a segment that appeared since the last time we switched address spaces.
4013 ; If all is correct, then it was another processors that made the cache
4014 ; entry. If not, well, it is an error that we should die on, but I have
4015 ; not figured a good way to trap it yet.
4017 ; If we get a hit, we just bail, otherwise, lock the pmap cache, select
4018 ; an entry based on the generation number, update the cache entry, and
4019 ; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
4020 ; entries that correspond to the last 4 bits (32:35 for 64-bit and
4021 ; 0:3 for 32-bit) of the ESID.
4023 ; Then we unlock and bail.
4025 ; First lock it. Then select a free slot or steal one based on the generation
4026 ; number. Then store it, update the allocation flags, and unlock.
4028 ; The cache entry contains an image of the ESID/VSID pair we would load for
4029 ; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
4031 ; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
4032 ; the current one, which may have changed because we nested.
4034 ; Also remember that we do not store the valid bit in the ESID. If we
4035 ; od, this will break some other stuff.
4038 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
4040 mr r3,r25 ; Point to the pmap
4041 mr r4,r29 ; ESID high half
4042 mr r5,r30 ; ESID low half
4043 bl pmapCacheLookup ; Go see if this is in the cache already
4045 mr. r3,r3 ; Did we find it?
4046 mr r4,r11 ; Copy this to a different register
4048 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
4050 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
4051 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
4053 cntlzw r7,r4 ; Find a free slot
4055 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
4056 rlwinm r30,r30,0,0,3 ; Clean up the ESID
4057 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
4058 addi r5,r4,1 ; Bump the generation number
4059 and r7,r7,r6 ; Clear bit number if none empty
4060 andc r8,r4,r6 ; Clear generation count if we found an empty
4061 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
4062 or r7,r7,r8 ; Select a slot number
4064 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
4065 oris r8,r8,0x8000 ; Get the high bit on
4066 la r9,pmapSegCache(r25) ; Point to the segment cache
4067 slwi r6,r7,4 ; Get index into the segment cache
4068 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
4069 srw r8,r8,r7 ; Get the mask
4070 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
4072 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
4073 oris r0,r0,0xF000 ; Get the sub-tag mask
4074 add r9,r9,r6 ; Point to the cache slot
4075 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
4076 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
4078 stw r29,sgcESID(r9) ; Save the top of the ESID
4079 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
4080 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
4081 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
4082 or r10,r10,r5 ; Stick in subtag in case top half
4083 or r11,r11,r5 ; Stick in subtag in case bottom half
4084 stw r14,sgcVSID(r9) ; Save the top of the VSID
4085 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
4086 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
4087 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
4089 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
4090 b hpfNoCacheEnt ; Go finish up...
4093 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
4097 eieio ; Make sure cache is updated before lock
4098 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
4102 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
4103 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
4106 ; Make and enter 32-bit segment register
4109 lwz r16,validSegs(r19) ; Get the valid SR flags
4110 xor r12,r12,r4 ; Alter the storage key before loading segment register
4111 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
4112 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
4113 lis r0,0x8000 ; Set bit 0
4114 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
4115 srw r0,r0,r2 ; Get bit corresponding to SR
4116 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
4117 or r16,r16,r0 ; Show that SR is valid
4119 mtsrin r6,r30 ; Set the actual SR
4121 stw r16,validSegs(r19) ; Set the valid SR flags
4123 b hpfPteMiss ; SR loaded, go do a PTE...
4126 ; Make and enter 64-bit segment look-aside buffer entry.
4127 ; Note that the cache entry is the right format except for valid bit.
4128 ; We also need to convert from long long to 64-bit register values.
4135 ld r16,validSegs(r19) ; Get the valid SLB entry flags
4136 sldi r8,r29,32 ; Move high order address over
4137 sldi r10,r14,32 ; Move high part of VSID over
4139 not r3,r16 ; Make valids be 0s
4140 li r0,1 ; Prepare to set bit 0
4142 cntlzd r17,r3 ; Find a free SLB
4143 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
4144 or r9,r8,r30 ; Form full 64-bit address
4145 cmplwi r17,63 ; Did we find a free SLB entry?
4146 sldi r0,r0,63 ; Get bit 0 set
4147 or r10,r10,r12 ; Move in low part and keys
4148 addi r17,r17,1 ; Skip SLB 0 always
4149 blt++ hpfFreeSeg ; Yes, go load it...
4152 ; No free SLB entries, select one that is in use and invalidate it
4154 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
4155 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4156 addi r4,r4,1 ; Set next slot to steal
4157 slbmfee r7,r17 ; Get the entry that is in the selected spot
4158 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
4159 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4160 srawi r2,r2,31 ; Get -1 if steal index still in range
4161 slbie r7 ; Invalidate the in-use SLB entry
4162 and r4,r4,r2 ; Reset steal index when it should wrap
4165 stw r4,ppSegSteal(r19) ; Set the next slot to steal
4167 ; We are now ready to stick the SLB entry in the SLB and mark it in use
4171 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
4172 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
4173 srd r0,r0,r4 ; Set bit mask for allocation
4174 oris r9,r9,0x0800 ; Turn on the valid bit
4175 or r16,r16,r0 ; Turn on the allocation flag
4176 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
4178 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
4179 slbie r7 ; Blow away a potential duplicate
4181 hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
4183 std r16,validSegs(r19) ; Mark as valid
4184 b hpfPteMiss ; STE loaded, go do a PTE...
4187 ; The segment has been set up and loaded if need be. Now we are ready to build the
4188 ; PTE and get it into the hash table.
4190 ; Note that there is actually a race here. If we start fault processing on
4191 ; a different pmap, i.e., we have descended into a nested pmap, it is possible
4192 ; that the nest could have been removed from the original pmap. We would
4193 ; succeed with this translation anyway. I do not think we need to worry
4194 ; about this (famous last words) because nobody should be unnesting anything
4195 ; if there are still people activily using them. It should be up to the
4196 ; higher level VM system to put the kibosh on this.
4198 ; There is also another race here: if we fault on the same mapping on more than
4199 ; one processor at the same time, we could end up with multiple PTEs for the same
4200 ; mapping. This is not a good thing.... We really only need one of the
4201 ; fault handlers to finish, so what we do is to set a "fault in progress" flag in
4202 ; the mapping. If we see that set, we just abandon the handler and hope that by
4203 ; the time we restore context and restart the interrupted code, the fault has
4204 ; been resolved by the other guy. If not, we will take another fault.
4208 ; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
4209 ; It is required to stay there until after we call mapSelSlot!!!!
4214 hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
4215 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
4216 li r3,mpHValid ; Get the PTE valid bit
4217 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
4218 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
4219 crnot cr1_eq,cr0_eq ; Remember if FIP was on
4220 and. r12,r12,r3 ; Isolate the valid bit
4221 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
4222 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
4223 rlwinm r0,r2,0,mpType ; Isolate mapping type
4224 cmplwi r0,mpBlock ; Is this a block mapping?
4225 crnot cr7_eq,cr0_eq ; Remember if we have a block mapping
4226 stwcx. r2,0,r31 ; Store the flags
4227 bne-- hpfPteMiss ; Collision, try again...
4229 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
4232 ; At this point we are about to do the 32-bit PTE generation.
4234 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4238 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4239 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4240 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4242 ; The 24 bits of the 32-bit architecture VSID is in the following:
4246 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4247 ; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4248 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4253 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
4254 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4256 mfsdr1 r27 ; Get the hash table base address
4258 rlwinm r0,r23,0,4,19 ; Isolate just the page index
4259 rlwinm r18,r23,10,26,31 ; Extract the API
4260 xor r19,r15,r0 ; Calculate hash << 12
4261 mr r2,r25 ; Save the flag part of the mapping
4262 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
4263 rlwinm r16,r27,16,7,15 ; Extract the hash table size
4264 rlwinm r25,r25,0,0,19 ; Clear out the flags
4265 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
4266 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
4267 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
4268 rlwinm r27,r27,0,0,15 ; Extract the hash table base
4269 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
4270 add r24,r24,r25 ; Adjust to true physical address
4271 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
4272 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
4273 and r19,r19,r16 ; Wrap hash table offset into the hash table
4274 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
4275 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
4276 add r19,r19,r27 ; Point to the PTEG
4277 subfic r20,r20,-4 ; Get negative offset to PCA
4278 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4279 add r20,r20,r27 ; Point to the PCA slot
4282 ; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
4283 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4285 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4286 ; that some other processor beat us and stuck in a PTE or that
4287 ; all we had was a simple segment exception and the PTE was there the whole time.
4288 ; If we find one a pointer, we are done.
4291 mr r7,r20 ; Copy the PCA pointer
4292 bl mapLockPteg ; Lock the PTEG
4294 lwz r12,mpPte(r31) ; Get the offset to the PTE
4295 mr r17,r6 ; Remember the PCA image
4296 mr r16,r6 ; Prime the post-select PCA image
4297 andi. r0,r12,mpHValid ; Is there a PTE here already?
4298 li r21,8 ; Get the number of slots
4300 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
4302 bne- hpfBailOut ; Someone already did this for us...
4305 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R6 as a
4306 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4307 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4308 ; R4 returns the slot index.
4310 ; REMEMBER: CR7 indicates that we are building a block mapping.
4313 hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
4314 mr r6,r17 ; Get back the original PCA
4315 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
4316 blt- hpfBailOut ; Holy Cow, all slots are locked...
4318 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
4320 cmplwi cr5,r3,1 ; Did we steal a slot?
4321 rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address
4322 mr r16,r6 ; Remember the PCA image after selection
4323 blt+ cr5,hpfInser32 ; Nope, no steal...
4325 lwz r6,0(r19) ; Get the old PTE
4326 lwz r7,4(r19) ; Get the real part of the stealee
4327 rlwinm r6,r6,0,1,31 ; Clear the valid bit
4328 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
4329 srwi r3,r7,12 ; Change phys address to a ppnum
4330 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4331 cmplwi cr1,r3,0 ; Check if this is in RAM
4332 bne- hpfNoPte32 ; Could not get it, try for another...
4334 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4336 hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
4338 sync ; Make sure the invalid is stored
4339 li r9,tlbieLock ; Get the TLBIE lock
4340 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
4342 hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
4343 mfsprg r4,0 ; Get the per_proc
4344 rlwinm r8,r6,25,18,31 ; Extract the space ID
4345 rlwinm r11,r6,25,18,31 ; Extract the space ID
4346 lwz r7,hwSteals(r4) ; Get the steal count
4347 srwi r2,r6,7 ; Align segment number with hash
4348 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
4349 mr. r0,r0 ; Is it locked?
4350 srwi r0,r19,6 ; Align PTEG offset for back hash
4351 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
4352 xor r11,r11,r0 ; Hash backwards to partial vaddr
4353 rlwinm r12,r2,14,0,3 ; Shift segment up
4354 mfsprg r2,2 ; Get feature flags
4355 li r0,1 ; Get our lock word
4356 rlwimi r12,r6,22,4,9 ; Move up the API
4357 bne- hpfTLBIE32 ; It is locked, go wait...
4358 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
4360 stwcx. r0,0,r9 ; Try to get it
4361 bne- hpfTLBIE32 ; We was beat...
4362 addi r7,r7,1 ; Bump the steal count
4364 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
4365 li r0,0 ; Lock clear value
4367 tlbie r12 ; Invalidate it everywhere
4370 beq- hpfNoTS32 ; Can not have MP on this machine...
4372 eieio ; Make sure that the tlbie happens first
4373 tlbsync ; Wait for everyone to catch up
4374 sync ; Make sure of it all
4376 hpfNoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
4378 stw r7,hwSteals(r4) ; Save the steal count
4379 bgt cr5,hpfInser32 ; We just stole a block mapping...
4381 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
4383 la r11,ppLink+4(r3) ; Point to the master RC copy
4384 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
4385 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
4387 hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
4388 or r0,r0,r2 ; Merge in the new RC
4389 stwcx. r0,0,r11 ; Try to stick it back
4390 bne- hpfMrgRC32 ; Try again if we collided...
4393 hpfFPnch: rlwinm. r7,r7,0,~ppFlags ; Clean and test mapping address
4394 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4396 lhz r10,mpSpace(r7) ; Get the space
4397 lwz r9,mpVAddr+4(r7) ; And the vaddr
4398 cmplw cr1,r10,r8 ; Is this one of ours?
4399 xor r9,r12,r9 ; Compare virtual address
4400 cmplwi r9,0x1000 ; See if we really match
4401 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4402 beq+ hpfFPnch2 ; Yes, found ours...
4404 lwz r7,mpAlias+4(r7) ; Chain on to the next
4405 b hpfFPnch ; Check it out...
4407 hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
4408 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
4409 bl mapPhysUnlock ; Unlock the physent now
4411 hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
4413 stw r24,4(r19) ; Stuff in the real part of the PTE
4414 eieio ; Make sure this gets there first
4416 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
4417 mr r17,r16 ; Get the PCA image to save
4418 b hpfFinish ; Go join the common exit code...
4422 ; At this point we are about to do the 64-bit PTE generation.
4424 ; The following is the R14:R15 pair that contains the "shifted" VSID:
4428 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4429 ; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
4430 ; +--------+--------+--------+--------+--------+--------+--------+--------+
4437 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
4438 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
4440 mfsdr1 r27 ; Get the hash table base address
4442 sldi r11,r22,32 ; Slide top of adjusted EA over
4443 sldi r14,r14,32 ; Slide top of VSID over
4444 rlwinm r5,r27,0,27,31 ; Isolate the size
4445 eqv r16,r16,r16 ; Get all foxes here
4446 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
4447 mr r2,r10 ; Save the flag part of the mapping
4448 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
4449 rldicr r27,r27,0,45 ; Clean up the hash table base
4450 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
4451 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
4452 subfic r5,r5,46 ; Get number of leading zeros
4453 xor r19,r0,r15 ; Calculate hash
4454 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
4455 srd r16,r16,r5 ; Shift over to get length of table
4456 srdi r19,r19,5 ; Convert page offset to hash table offset
4457 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
4458 rldicr r10,r10,0,51 ; Clear out flags
4459 sldi r24,r24,12 ; Change ppnum to physical address
4460 sub r11,r11,r10 ; Get the offset from the base mapping
4461 and r19,r19,r16 ; Wrap into hash table
4462 add r24,r24,r11 ; Get actual physical address of this page
4463 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
4464 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
4465 subfic r20,r20,-4 ; Get negative offset to PCA
4466 ori r24,r24,lo16(mpR) ; Force on the reference bit
4467 add r20,r20,r27 ; Point to the PCA slot
4468 add r19,r19,r27 ; Point to the PTEG
4471 ; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
4472 ; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
4474 ; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
4475 ; that some other processor beat us and stuck in a PTE or that
4476 ; all we had was a simple segment exception and the PTE was there the whole time.
4477 ; If we find one a pointer, we are done.
4480 mr r7,r20 ; Copy the PCA pointer
4481 bl mapLockPteg ; Lock the PTEG
4483 lwz r12,mpPte(r31) ; Get the offset to the PTE
4484 mr r17,r6 ; Remember the PCA image
4485 mr r18,r6 ; Prime post-selection PCA image
4486 andi. r0,r12,mpHValid ; See if we have a PTE now
4487 li r21,8 ; Get the number of slots
4489 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
4491 bne-- hpfBailOut ; Someone already did this for us...
4494 ; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
4495 ; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
4496 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
4497 ; R4 returns the slot index.
4499 ; REMEMBER: CR7 indicates that we are building a block mapping.
4502 hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
4503 mr r6,r17 ; Restore original state of PCA
4504 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
4505 blt- hpfBailOut ; Holy Cow, all slots are locked...
4507 bl mapSelSlot ; Go select a slot
4509 cmplwi cr5,r3,1 ; Did we steal a slot?
4510 mr r18,r6 ; Remember the PCA image after selection
4511 insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address
4512 lwz r10,hwSteals(r2) ; Get the steal count
4513 blt++ cr5,hpfInser64 ; Nope, no steal...
4515 ld r6,0(r19) ; Get the old PTE
4516 ld r7,8(r19) ; Get the real part of the stealee
4517 rldicr r6,r6,0,62 ; Clear the valid bit
4518 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
4519 srdi r3,r7,12 ; Change page address to a page address
4520 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
4521 cmplwi cr1,r3,0 ; Check if this is in RAM
4522 bne-- hpfNoPte64 ; Could not get it, try for another...
4524 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
4526 hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
4527 li r9,tlbieLock ; Get the TLBIE lock
4529 srdi r11,r6,5 ; Shift VSID over for back hash
4530 mfsprg r4,0 ; Get the per_proc
4531 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
4532 sync ; Make sure the invalid is stored
4534 sldi r12,r6,16 ; Move AVPN to EA position
4535 sldi r11,r11,5 ; Move this to the page position
4537 hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
4538 mr. r0,r0 ; Is it locked?
4539 li r0,1 ; Get our lock word
4540 bne-- hpfTLBIE65 ; It is locked, go wait...
4542 stwcx. r0,0,r9 ; Try to get it
4543 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
4544 rldicl r8,r6,52,50 ; Isolate the address space ID
4545 bne-- hpfTLBIE64 ; We was beat...
4546 addi r10,r10,1 ; Bump the steal count
4548 rldicl r11,r12,0,16 ; Clear cause the book says so
4549 li r0,0 ; Lock clear value
4551 tlbie r11 ; Invalidate it everywhere
4553 mr r7,r8 ; Get a copy of the space ID
4554 eieio ; Make sure that the tlbie happens first
4555 rldimi r7,r7,14,36 ; Copy address space to make hash value
4556 tlbsync ; Wait for everyone to catch up
4557 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
4558 srdi r2,r6,26 ; Shift original segment down to bottom
4560 ptesync ; Make sure of it all
4561 xor r7,r7,r2 ; Compute original segment
4562 stw r0,tlbieLock(0) ; Clear the tlbie lock
4564 stw r10,hwSteals(r4) ; Save the steal count
4565 bgt cr5,hpfInser64 ; We just stole a block mapping...
4567 rldimi r12,r7,28,0 ; Insert decoded segment
4568 rldicl r4,r12,0,13 ; Trim to max supported address
4570 ld r12,8(r19) ; Get the RC of the just invalidated PTE
4572 la r11,ppLink+4(r3) ; Point to the master RC copy
4573 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
4574 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
4576 hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
4577 li r12,ppLFAmask ; Get mask to clean up alias pointer
4578 or r0,r0,r2 ; Merge in the new RC
4579 rotrdi r12,r12,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
4580 stwcx. r0,0,r11 ; Try to stick it back
4581 bne-- hpfMrgRC64 ; Try again if we collided...
4583 hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
4584 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
4586 lhz r10,mpSpace(r7) ; Get the space
4587 ld r9,mpVAddr(r7) ; And the vaddr
4588 cmplw cr1,r10,r8 ; Is this one of ours?
4589 xor r9,r4,r9 ; Compare virtual address
4590 cmpldi r9,0x1000 ; See if we really match
4591 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
4592 beq++ hpfFPnch2x ; Yes, found ours...
4594 ld r7,mpAlias(r7) ; Chain on to the next
4595 b hpfFPnchx ; Check it out...
4599 hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
4600 stwcx. r7,0,r7 ; Kill reservation
4602 hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
4603 mr. r0,r0 ; Is it locked?
4604 beq++ hpfTLBIE64 ; Yup, wait for it...
4605 b hpfTLBIE63 ; Nope, try again..
4609 hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
4610 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
4611 bl mapPhysUnlock ; Unlock the physent now
4614 hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
4615 eieio ; Make sure this gets there first
4616 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
4617 mr r17,r18 ; Get the PCA image to set
4618 b hpfFinish ; Go join the common exit code...
4621 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
4622 ori r0,r0,lo16(Choke) ; System abend
4626 ; This is the common code we execute when we are finished setting up the PTE.
4631 hpfFinish: sub r4,r19,r27 ; Get offset of PTE
4632 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
4633 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
4634 stw r4,mpPte(r31) ; Remember our PTE
4636 hpfBailOut: eieio ; Make sure all updates come first
4637 stw r17,0(r20) ; Unlock and set the final PCA
4640 ; This is where we go if we have started processing the fault, but find that someone
4641 ; else has taken care of it.
4644 hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
4645 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
4646 sth r2,mpFlags+2(r31) ; Set it
4648 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4649 bl sxlkUnlock ; Unlock the search list
4651 li r11,T_IN_VAIN ; Say that it was handled
4652 b EXT(PFSExit) ; Leave...
4655 ; This is where we go when we find that someone else
4656 ; is in the process of handling the fault.
4659 hpfAbandon: li r3,lgKillResv ; Kill off any reservation
4660 stwcx. r3,0,r3 ; Do it
4662 la r3,pmapSXlk(r28) ; Point to the pmap search lock
4663 bl sxlkUnlock ; Unlock the search list
4665 li r11,T_IN_VAIN ; Say that it was handled
4666 b EXT(PFSExit) ; Leave...
4669 ; Guest shadow assist -- page fault handler
4671 ; Here we handle a fault in a guest pmap that has the guest shadow mapping
4672 ; assist active. We locate the VMM pmap extension block, which contains an
4673 ; index over the discontiguous multi-page shadow hash table. The index
4674 ; corresponding to our vaddr is selected, and the selected group within
4675 ; that page is searched for a valid and active entry that contains
4676 ; our vaddr and space id. The search is pipelined, so that we may fetch
4677 ; the next slot while examining the current slot for a hit. The final
4678 ; search iteration is unrolled so that we don't fetch beyond the end of
4679 ; our group, which could have dire consequences depending upon where the
4680 ; physical hash page is located.
4682 ; The VMM pmap extension block occupies a page. Begining at offset 0, we
4683 ; have the pmap_vmm_ext proper. Aligned at the first 128-byte boundary
4684 ; after the pmap_vmm_ext is the hash table physical address index, a
4685 ; linear list of 64-bit physical addresses of the pages that comprise
4688 ; In the event that we succesfully locate a guest mapping, we re-join
4689 ; the page fault path at hpfGVfound with the mapping's address in r31;
4690 ; otherwise, we re-join at hpfNotFound. In either case, we re-join holding
4691 ; a share of the pmap search lock for the host pmap with the host pmap's
4692 ; address in r28, the guest pmap's space id in r21, and the guest pmap's
4698 bt pf64Bitb,hpfGV64 ; Take 64-bit path for 64-bit machine
4700 lwz r11,pmapVmmExtPhys+4(r28) ; r11 <- VMM pmap extension block paddr
4701 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4702 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4703 lwz r28,vmxHostPmapPhys+4(r11) ; r28 <- host pmap's paddr
4704 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4705 rlwinm r10,r30,0,0xFFFFF000 ; r10 <- page-aligned guest vaddr
4706 lwz r6,vxsGpf(r11) ; Get guest fault count
4708 srwi r3,r10,12 ; Form shadow hash:
4709 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4710 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4711 ; Form index offset from hash page number
4712 add r31,r31,r4 ; r31 <- hash page index entry
4713 lwz r31,4(r31) ; r31 <- hash page paddr
4714 rlwimi r31,r3,GV_HGRP_SHIFT,GV_HGRP_MASK
4715 ; r31 <- hash group paddr
4717 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4718 bl sxlkShared ; Go get a shared lock on the mapping lists
4719 mr. r3,r3 ; Did we get the lock?
4720 bne- hpfBadLock ; Nope...
4722 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4723 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4724 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
4725 addi r6,r6,1 ; Increment guest fault count
4726 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4727 mtctr r0 ; in this group
4728 stw r6,vxsGpf(r11) ; Update guest fault count
4733 mr r6,r3 ; r6 <- current mapping slot's flags
4734 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4735 mr r7,r4 ; r7 <- current mapping slot's space ID
4736 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4737 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4738 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
4739 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
4740 xor r7,r7,r21 ; Compare space ID
4741 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4742 xor r8,r8,r10 ; Compare virtual address
4743 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4744 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4746 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4747 bdnz hpfGVlp32 ; Iterate
4749 clrrwi r5,r5,12 ; Remove flags from virtual address
4750 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4751 xor r4,r4,r21 ; Compare space ID
4752 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4753 xor r5,r5,r10 ; Compare virtual address
4754 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4755 beq hpfGVfound ; Join common patch on hit (r31 points to mapping)
4761 ld r11,pmapVmmExtPhys(r28) ; r11 <- VMM pmap extension block paddr
4762 lwz r12,pmapFlags(r28) ; r12 <- guest pmap's flags
4763 lwz r21,pmapSpace(r28) ; r21 <- guest space ID number
4764 ld r28,vmxHostPmapPhys(r11) ; r28 <- host pmap's paddr
4765 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
4766 rlwinm r10,r30,0,0xFFFFF000 ; Form 64-bit guest vaddr
4767 rldimi r10,r29,32,0 ; cleaning up low-order 12 bits
4768 lwz r6,vxsGpf(r11) ; Get guest fault count
4770 srwi r3,r10,12 ; Form shadow hash:
4771 xor r3,r3,r21 ; spaceID ^ (vaddr >> 12)
4772 rlwinm r4,r3,GV_HPAGE_SHIFT,GV_HPAGE_MASK
4773 ; Form index offset from hash page number
4774 add r31,r31,r4 ; r31 <- hash page index entry
4775 ld r31,0(r31) ; r31 <- hash page paddr
4776 insrdi r31,r3,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
4777 ; r31 <- hash group paddr
4779 la r3,pmapSXlk(r28) ; Point to the host pmap's search lock
4780 bl sxlkShared ; Go get a shared lock on the mapping lists
4781 mr. r3,r3 ; Did we get the lock?
4782 bne-- hpfBadLock ; Nope...
4784 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
4785 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
4786 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
4787 addi r6,r6,1 ; Increment guest fault count
4788 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
4789 mtctr r0 ; in this group
4790 stw r6,vxsGpf(r11) ; Update guest fault count
4795 mr r6,r3 ; r6 <- current mapping slot's flags
4796 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
4797 mr r7,r4 ; r7 <- current mapping slot's space ID
4798 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
4799 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
4800 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
4801 andi. r6,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4802 xor r7,r7,r21 ; Compare space ID
4803 or r0,r6,r7 ; r0 <- !(!free && !dormant && space match)
4804 xor r8,r8,r10 ; Compare virtual address
4805 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4806 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4808 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
4809 bdnz hpfGVlp64 ; Iterate
4811 clrrdi r5,r5,12 ; Remove flags from virtual address
4812 andi. r3,r3,mpgFree+mpgDormant ; Isolate guest free and dormant flag
4813 xor r4,r4,r21 ; Compare space ID
4814 or r0,r3,r4 ; r0 <- !(!free && !dormant && space match)
4815 xor r5,r5,r10 ; Compare virtual address
4816 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
4817 beq hpfGVfound ; Join common path on hit (r31 points to mapping)
4820 lwz r6,vxsGpfMiss(r11) ; Guest guest fault miss count
4821 addi r6,r6,1 ; Increment miss count
4822 stw r6,vxsGpfMiss(r11) ; Update guest fault miss count
4826 * hw_set_user_space(pmap)
4827 * hw_set_user_space_dis(pmap)
4829 * Indicate whether memory space needs to be switched.
4830 * We really need to turn off interrupts here, because we need to be non-preemptable
4832 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
4833 * register usage here. The VMM switch code in vmachmon.s that calls this
4834 * know what registers are in use. Check that if these change.
4840 .globl EXT(hw_set_user_space)
4842 LEXT(hw_set_user_space)
4844 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
4845 mfmsr r10 ; Get the current MSR
4846 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
4847 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
4848 andc r10,r10,r8 ; Turn off VEC, FP for good
4849 andc r9,r10,r9 ; Turn off EE also
4850 mtmsr r9 ; Disable them
4851 isync ; Make sure FP and vec are off
4852 mfsprg r6,1 ; Get the current activation
4853 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4854 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4855 mfsprg r4,2 ; The the feature flags
4856 lwz r7,pmapvr(r3) ; Get the v to r translation
4857 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4858 mtcrf 0x80,r4 ; Get the Altivec flag
4859 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4860 cmplw cr1,r3,r2 ; Same address space as before?
4861 stw r7,ppUserPmap(r6) ; Show our real pmap address
4862 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4863 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4864 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4865 mtmsr r10 ; Restore interruptions
4866 beqlr-- cr1 ; Leave if the same address space or not Altivec
4868 dssall ; Need to kill all data streams if adrsp changed
4873 .globl EXT(hw_set_user_space_dis)
4875 LEXT(hw_set_user_space_dis)
4877 lwz r7,pmapvr(r3) ; Get the v to r translation
4878 mfsprg r4,2 ; The the feature flags
4879 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4880 mfsprg r6,1 ; Get the current activation
4881 lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
4882 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4883 mtcrf 0x80,r4 ; Get the Altivec flag
4884 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4885 cmplw cr1,r3,r2 ; Same address space as before?
4886 stw r7,ppUserPmap(r6) ; Show our real pmap address
4887 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4888 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4889 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4890 beqlr-- cr1 ; Leave if the same
4892 dssall ; Need to kill all data streams if adrsp changed
4896 /* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4898 * Lock must already be held on mapping block list
4899 * returns 0 if all slots filled.
4900 * returns n if a slot is found and it is not the last
4901 * returns -n if a slot is found and it is the last
4902 * when n and -n are returned, the corresponding bit is cleared
4903 * the mapping is zeroed out before return
4911 lwz r4,mbfree(r3) ; Get the 1st mask
4912 lis r0,0x8000 ; Get the mask to clear the first free bit
4913 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4914 mr r12,r3 ; Save the block ptr
4915 cntlzw r3,r4 ; Get first 1-bit in 1st word
4916 srw. r9,r0,r3 ; Get bit corresponding to first free one
4917 cntlzw r10,r5 ; Get first free field in second word
4918 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4919 bne mapalc1f ; Found one in 1st word
4921 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4922 li r3,0 ; assume failure return
4923 andc r5,r5,r9 ; Turn it off
4924 beqlr-- ; There are no 1 bits left...
4925 addi r3,r10,32 ; set the correct number
4928 or. r0,r4,r5 ; any more bits set?
4929 stw r4,mbfree(r12) ; update bitmasks
4930 stw r5,mbfree+4(r12)
4932 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4934 dcbz r6,r12 ; clear the 64-byte mapping
4937 bnelr++ ; return if another bit remains set
4939 neg r3,r3 ; indicate we just returned the last bit
4943 /* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4945 * Lock must already be held on mapping block list
4946 * returns 0 if all slots filled.
4947 * returns n if a slot is found and it is not the last
4948 * returns -n if a slot is found and it is the last
4949 * when n and -n are returned, the corresponding bits are cleared
4950 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4951 * the mapping is zero'd out before return
4957 lwz r4,mbfree(r3) ; Get the first mask
4958 lis r0,0x8000 ; Get the mask to clear the first free bit
4959 lwz r5,mbfree+4(r3) ; Get the second mask
4960 mr r12,r3 ; Save the block ptr
4961 slwi r6,r4,1 ; shift first word over
4962 and r6,r4,r6 ; lite start of double bit runs in 1st word
4963 slwi r7,r5,1 ; shift 2nd word over
4964 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4965 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4966 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4967 cntlzw r10,r7 ; Get first free field in second word
4968 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4969 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4970 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4971 bne mapalc2a ; Found two consecutive free bits in 1st word
4973 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4974 li r3,0 ; assume failure
4975 srwi r11,r9,1 ; get mask for 2nd bit
4976 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4977 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4978 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4979 addi r3,r10,32 ; set the correct number
4982 or. r0,r4,r5 ; any more bits set?
4983 stw r4,mbfree(r12) ; update bitmasks
4984 stw r5,mbfree+4(r12)
4985 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4989 dcbz r6,r12 ; zero out the 128-byte mapping
4990 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
4991 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
4994 bnelr++ ; return if another bit remains set
4996 neg r3,r3 ; indicate we just returned the last bit
5000 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
5001 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
5002 beqlr ; no, we failed
5003 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
5004 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
5005 li r3,31 ; get index of this field
5010 ; This routine initialzes the hash table and PCA.
5011 ; It is done here because we may need to be 64-bit to do it.
5015 .globl EXT(hw_hash_init)
5019 mfsprg r10,2 ; Get feature flags
5020 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5021 mtcrf 0x02,r10 ; move pf64Bit to cr6
5022 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5023 lis r4,0xFF01 ; Set all slots free and start steal at end
5024 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5025 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5027 lwz r12,0(r12) ; Get hash table size
5029 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
5031 lwz r11,4(r11) ; Get hash table base
5033 hhiNext32: cmplw r3,r12 ; Have we reached the end?
5034 bge- hhiCPCA32 ; Yes...
5035 dcbz r3,r11 ; Clear the line
5036 addi r3,r3,32 ; Next one...
5037 b hhiNext32 ; Go on...
5039 hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
5040 li r3,-4 ; Displacement to first PCA entry
5041 neg r12,r12 ; Get negative end of PCA
5043 hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
5044 subi r3,r3,4 ; Next slot
5045 cmpw r3,r12 ; Have we finished?
5046 bge+ hhiNPCA32 ; Not yet...
5049 hhiSF: mfmsr r9 ; Save the MSR
5051 mr r0,r9 ; Get a copy of the MSR
5052 ld r11,0(r11) ; Get hash table base
5053 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
5054 mtmsrd r0 ; Turn on SF
5058 hhiNext64: cmpld r3,r12 ; Have we reached the end?
5059 bge-- hhiCPCA64 ; Yes...
5060 dcbz128 r3,r11 ; Clear the line
5061 addi r3,r3,128 ; Next one...
5062 b hhiNext64 ; Go on...
5064 hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
5065 li r3,-4 ; Displacement to first PCA entry
5066 neg r12,r12 ; Get negative end of PCA
5068 hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
5069 subi r3,r3,4 ; Next slot
5070 cmpd r3,r12 ; Have we finished?
5071 bge++ hhiNPCA64 ; Not yet...
5073 mtmsrd r9 ; Turn off SF if it was off
5079 ; This routine sets up the hardware to start translation.
5080 ; Note that we do NOT start translation.
5084 .globl EXT(hw_setup_trans)
5086 LEXT(hw_setup_trans)
5088 mfsprg r11,0 ; Get the per_proc block
5089 mfsprg r12,2 ; Get feature flags
5092 mtcrf 0x02,r12 ; Move pf64Bit to cr6
5093 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
5094 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
5095 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
5096 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
5098 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
5100 li r9,0 ; Clear out a register
5103 mtdbatu 0,r9 ; Invalidate maps
5104 mtdbatl 0,r9 ; Invalidate maps
5105 mtdbatu 1,r9 ; Invalidate maps
5106 mtdbatl 1,r9 ; Invalidate maps
5107 mtdbatu 2,r9 ; Invalidate maps
5108 mtdbatl 2,r9 ; Invalidate maps
5109 mtdbatu 3,r9 ; Invalidate maps
5110 mtdbatl 3,r9 ; Invalidate maps
5112 mtibatu 0,r9 ; Invalidate maps
5113 mtibatl 0,r9 ; Invalidate maps
5114 mtibatu 1,r9 ; Invalidate maps
5115 mtibatl 1,r9 ; Invalidate maps
5116 mtibatu 2,r9 ; Invalidate maps
5117 mtibatl 2,r9 ; Invalidate maps
5118 mtibatu 3,r9 ; Invalidate maps
5119 mtibatl 3,r9 ; Invalidate maps
5121 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5122 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5123 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5124 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5125 lwz r11,4(r11) ; Get hash table base
5126 lwz r12,0(r12) ; Get hash table size
5127 subi r12,r12,1 ; Back off by 1
5128 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
5130 mtsdr1 r11 ; Ok, we now have the hash table set up
5133 li r12,invalSpace ; Get the invalid segment value
5134 li r10,0 ; Start low
5136 hstsetsr: mtsrin r12,r10 ; Set the SR
5137 addis r10,r10,0x1000 ; Bump the segment
5138 mr. r10,r10 ; Are we finished?
5139 bne+ hstsetsr ; Nope...
5147 hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
5148 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
5149 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
5150 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
5151 ld r11,0(r11) ; Get hash table base
5152 lwz r12,0(r12) ; Get hash table size
5153 cntlzw r10,r12 ; Get the number of bits
5154 subfic r10,r10,13 ; Get the extra bits we need
5155 or r11,r11,r10 ; Add the size field to SDR1
5157 mtsdr1 r11 ; Ok, we now have the hash table set up
5160 li r0,0 ; Set an SLB slot index of 0
5161 slbia ; Trash all SLB entries (except for entry 0 that is)
5162 slbmfee r7,r0 ; Get the entry that is in SLB index 0
5163 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
5164 slbie r7 ; Invalidate it
5170 ; This routine turns on translation for the first time on a processor
5174 .globl EXT(hw_start_trans)
5176 LEXT(hw_start_trans)
5179 mfmsr r10 ; Get the msr
5180 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
5182 mtmsr r10 ; Everything falls apart here
5190 ; This routine validates a segment register.
5191 ; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
5194 ; r4 = segment[0:31]
5195 ; r5 = segment[32:63]
5199 ; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
5200 ; Note that there is no reason to apply the key modifier here because this is only
5201 ; used for kernel accesses.
5205 .globl EXT(hw_map_seg)
5209 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
5210 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
5211 mfsprg r10,2 ; Get feature flags
5214 ; Note: the following code would problably be easier to follow if I split it,
5215 ; but I just wanted to see if I could write this to work on both 32- and 64-bit
5216 ; machines combined.
5220 ; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
5221 ; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
5223 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
5224 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
5225 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
5226 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
5227 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
5228 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
5229 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
5230 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
5231 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
5232 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
5234 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
5235 ; concatenated together. There is garbage
5236 ; at the top for 64-bit but we will clean
5238 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
5242 ; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
5243 ; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
5247 ; What we have now is:
5250 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5251 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5252 ; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
5253 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5254 ; 0 0 1 2 3 - for 32-bit machines
5258 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5259 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5260 ; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
5261 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5262 ; 0 0 1 2 3 - for 32-bit machines
5266 ; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
5267 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5268 ; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
5269 ; +--------+--------+--------+--------+--------+--------+--------+--------+
5270 ; 0 0 1 2 3 - for 32-bit machines
5274 xor r8,r8,r2 ; Calculate VSID
5276 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
5277 mfsprg r12,0 ; Get the per_proc
5278 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5279 mfmsr r6 ; Get current MSR
5280 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5281 mtmsrd r0,1 ; Set only the EE bit to 0
5282 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5283 mfmsr r11 ; Get the MSR right now, after disabling EE
5284 andc r2,r11,r2 ; Turn off translation now
5285 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5286 or r11,r11,r6 ; Turn on the EE bit if it was on
5287 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5288 isync ; Hang out a bit
5290 ld r6,validSegs(r12) ; Get the valid SLB entry flags
5291 sldi r9,r9,9 ; Position the key and noex bit
5293 rldimi r5,r8,12,0 ; Form the VSID/key
5295 not r3,r6 ; Make valids be 0s
5297 cntlzd r7,r3 ; Find a free SLB
5298 cmplwi r7,63 ; Did we find a free SLB entry?
5300 slbie r4 ; Since this ESID may still be in an SLBE, kill it
5302 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
5303 addi r7,r7,1 ; Make sure we skip slb 0
5304 blt++ hmsFreeSeg ; Yes, go load it...
5307 ; No free SLB entries, select one that is in use and invalidate it
5309 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
5310 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
5311 addi r2,r2,1 ; Set next slot to steal
5312 slbmfee r3,r7 ; Get the entry that is in the selected spot
5313 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
5314 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
5315 srawi r8,r8,31 ; Get -1 if steal index still in range
5316 slbie r3 ; Invalidate the in-use SLB entry
5317 and r2,r2,r8 ; Reset steal index when it should wrap
5320 stw r2,ppSegSteal(r12) ; Set the next slot to steal
5322 ; We are now ready to stick the SLB entry in the SLB and mark it in use
5325 hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
5326 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
5327 srd r0,r0,r2 ; Set bit mask for allocation
5328 rldicl r5,r5,0,15 ; Clean out the unsupported bits
5329 or r6,r6,r0 ; Turn on the allocation flag
5331 slbmte r5,r4 ; Make that SLB entry
5333 std r6,validSegs(r12) ; Mark as valid
5334 mtmsrd r11 ; Restore the MSR
5341 mfsprg r12,1 ; Get the current activation
5342 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5343 rlwinm r8,r8,0,8,31 ; Clean up the VSID
5344 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
5345 lis r0,0x8000 ; Set bit 0
5346 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
5347 srw r0,r0,r2 ; Get bit corresponding to SR
5348 addi r7,r12,validSegs ; Point to the valid segment flags directly
5350 mtsrin r8,r4 ; Set the actual SR
5351 isync ; Need to make sure this is done
5353 hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
5354 or r6,r6,r0 ; Show that SR is valid
5355 stwcx. r6,0,r7 ; Set the valid SR flags
5356 bne-- hmsrupt ; Had an interrupt, need to get flags again...
5362 ; This routine invalidates a segment register.
5366 .globl EXT(hw_blow_seg)
5370 mfsprg r10,2 ; Get feature flags
5371 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5373 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
5375 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
5377 li r0,1 ; Prepare to set bit 0 (also to clear EE)
5378 mfmsr r6 ; Get current MSR
5379 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
5380 mtmsrd r0,1 ; Set only the EE bit to 0
5381 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
5382 mfmsr r11 ; Get the MSR right now, after disabling EE
5383 andc r2,r11,r2 ; Turn off translation now
5384 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
5385 or r11,r11,r6 ; Turn on the EE bit if it was on
5386 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
5387 isync ; Hang out a bit
5389 rldimi r9,r3,32,0 ; Insert the top part of the ESID
5391 slbie r9 ; Invalidate the associated SLB entry
5393 mtmsrd r11 ; Restore the MSR
5400 mfsprg r12,1 ; Get the current activation
5401 lwz r12,ACT_PER_PROC(r12) ; Get the per_proc block
5402 addi r7,r12,validSegs ; Point to the valid segment flags directly
5403 lwarx r4,0,r7 ; Get and reserve the valid segment flags
5404 rlwinm r6,r9,4,28,31 ; Convert segment to number
5405 lis r2,0x8000 ; Set up a mask
5406 srw r2,r2,r6 ; Make a mask
5407 and. r0,r4,r2 ; See if this is even valid
5408 li r5,invalSpace ; Set the invalid address space VSID
5409 beqlr ; Leave if already invalid...
5411 mtsrin r5,r9 ; Slam the segment register
5412 isync ; Need to make sure this is done
5414 hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
5415 stwcx. r4,0,r7 ; Set the valid SR flags
5416 beqlr++ ; Stored ok, no interrupt, time to leave...
5418 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
5419 b hbsrupt ; Try again...
5422 ; This routine invadates the entire pmap segment cache
5424 ; Translation is on, interrupts may or may not be enabled.
5428 .globl EXT(invalidateSegs)
5430 LEXT(invalidateSegs)
5432 la r10,pmapCCtl(r3) ; Point to the segment cache control
5433 eqv r2,r2,r2 ; Get all foxes
5435 isInv: lwarx r4,0,r10 ; Get the segment cache control value
5436 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
5437 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5438 bne-- isInv0 ; Yes, try again...
5440 stwcx. r4,0,r10 ; Try to invalidate it
5441 bne-- isInv ; Someone else just stuffed it...
5445 isInv0: li r4,lgKillResv ; Get reservation kill zone
5446 stwcx. r4,0,r4 ; Kill reservation
5448 isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
5449 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
5450 bne-- isInv ; Nope...
5451 b isInv1 ; Still locked do it again...
5454 ; This routine switches segment registers between kernel and user.
5455 ; We have some assumptions and rules:
5456 ; We are in the exception vectors
5457 ; pf64Bitb is set up
5458 ; R3 contains the MSR we going to
5459 ; We can not use R4, R13, R20, R21, R29
5460 ; R13 is the savearea
5461 ; R29 has the per_proc
5463 ; We return R3 as 0 if we did not switch between kernel and user
5464 ; We also maintain and apply the user state key modifier used by VMM support;
5465 ; If we go to the kernel it is set to 0, otherwise it follows the bit
5470 .globl EXT(switchSegs)
5474 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
5475 lwz r9,spcFlags(r29) ; Pick up the special user state flags
5476 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
5477 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
5478 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
5479 or r2,r2,r3 ; This will 1 if we will be using user segments
5480 li r3,0 ; Get a selection mask
5481 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
5482 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
5483 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
5484 la r19,ppUserPmap(r29) ; Point to the current user pmap
5486 ; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
5487 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
5489 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
5490 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
5491 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
5492 or r8,r8,r19 ; Get the pointer to the pmap we are using
5494 beqlr ; We are staying in the same mode, do not touch segs...
5496 lwz r28,0(r8) ; Get top half of pmap address
5497 lwz r10,4(r8) ; Get bottom half
5499 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
5500 rlwinm r28,r28,0,1,0 ; Copy top to top
5501 stw r30,ppMapFlags(r29) ; Set the key modifier
5502 rlwimi r28,r10,0,0,31 ; Insert bottom
5504 la r10,pmapCCtl(r28) ; Point to the segment cache control
5505 la r9,pmapSegCache(r28) ; Point to the segment cache
5507 ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
5508 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5509 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
5510 bne-- ssgLock0 ; Yup, this is in use...
5512 stwcx. r16,0,r10 ; Try to set the lock
5513 bne-- ssgLock ; Did we get contention?
5515 not r11,r15 ; Invert the invalids to valids
5516 li r17,0 ; Set a mask for the SRs we are loading
5517 isync ; Make sure we are all caught up
5519 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
5522 slbia ; Trash all SLB entries (except for entry 0 that is)
5523 li r17,1 ; Get SLB index to load (skip slb 0)
5524 oris r0,r0,0x8000 ; Get set for a mask
5525 b ssg64Enter ; Start on a cache line...
5529 ssgLock0: li r15,lgKillResv ; Killing field
5530 stwcx. r15,0,r15 ; Kill reservation
5532 ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
5533 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
5534 beq++ ssgLock ; Yup, this is in use...
5535 b ssgLock1 ; Nope, try again...
5537 ; This is the 32-bit address space switch code.
5538 ; We take a reservation on the segment cache and walk through.
5539 ; For each entry, we load the specified entries and remember which
5540 ; we did with a mask. Then, we figure out which segments should be
5541 ; invalid and then see which actually are. Then we load those with the
5542 ; defined invalid VSID.
5543 ; Afterwards, we unlock the segment cache.
5548 ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
5549 cmplwi r12,pmapSegCacheUse ; See if we are done
5550 slwi r14,r12,4 ; Index to the cache slot
5551 lis r0,0x8000 ; Get set for a mask
5552 add r14,r14,r9 ; Point to the entry
5554 bge- ssg32Done ; All done...
5556 lwz r5,sgcESID+4(r14) ; Get the ESID part
5557 srw r2,r0,r12 ; Form a mask for the one we are loading
5558 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
5560 andc r11,r11,r2 ; Clear the bit
5561 lwz r6,sgcVSID(r14) ; And get the VSID top
5563 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
5565 xor r7,r7,r30 ; Modify the key before we actually set it
5566 srw r0,r0,r2 ; Get a mask for the SR we are loading
5567 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
5568 or r17,r17,r0 ; Remember the segment
5569 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
5570 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
5572 mtsrin r8,r5 ; Load the segment
5573 b ssg32Enter ; Go enter the next...
5577 ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
5578 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5580 lis r0,0x8000 ; Get set for a mask
5581 li r2,invalSpace ; Set the invalid address space VSID
5585 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
5588 ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
5589 cmplwi r18,16 ; Have we finished?
5590 srw r22,r0,r18 ; Get the mask bit
5591 rlwinm r23,r18,28,0,3 ; Get the segment register we need
5592 andc r16,r16,r22 ; Get rid of the guy we just did
5593 bge ssg32Really ; Yes, we are really done now...
5595 mtsrin r2,r23 ; Invalidate the SR
5596 b ssg32Inval ; Do the next...
5601 stw r17,validSegs(r29) ; Set the valid SR flags
5602 li r3,1 ; Set kernel/user transition
5606 ; This is the 64-bit address space switch code.
5607 ; First we blow away all of the SLB entries.
5609 ; loading the SLB. Afterwards, we release the cache lock
5611 ; Note that because we have to treat SLBE 0 specially, we do not ever use it...
5612 ; Its a performance thing...
5617 ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
5618 cmplwi r12,pmapSegCacheUse ; See if we are done
5619 slwi r14,r12,4 ; Index to the cache slot
5620 srw r16,r0,r12 ; Form a mask for the one we are loading
5621 add r14,r14,r9 ; Point to the entry
5622 andc r11,r11,r16 ; Clear the bit
5623 bge-- ssg64Done ; All done...
5625 ld r5,sgcESID(r14) ; Get the ESID part
5626 ld r6,sgcVSID(r14) ; And get the VSID part
5627 oris r5,r5,0x0800 ; Turn on the valid bit
5628 or r5,r5,r17 ; Insert the SLB slot
5629 xor r6,r6,r30 ; Modify the key before we actually set it
5630 addi r17,r17,1 ; Bump to the next slot
5631 slbmte r6,r5 ; Make that SLB entry
5632 b ssg64Enter ; Go enter the next...
5636 ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
5638 eqv r16,r16,r16 ; Load up with all foxes
5639 subfic r17,r17,64 ; Get the number of 1 bits we need
5641 sld r16,r16,r17 ; Get a mask for the used SLB entries
5642 li r3,1 ; Set kernel/user transition
5643 std r16,validSegs(r29) ; Set the valid SR flags
5647 ; mapSetUp - this function sets initial state for all mapping functions.
5648 ; We turn off all translations (physical), disable interruptions, and
5649 ; enter 64-bit mode if applicable.
5651 ; We also return the original MSR in r11, the feature flags in R12,
5652 ; and CR6 set up so we can do easy branches for 64-bit
5653 ; hw_clear_maps assumes r10, r9 will not be trashed.
5657 .globl EXT(mapSetUp)
5661 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
5662 mfsprg r12,2 ; Get feature flags
5663 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
5664 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5665 mfmsr r11 ; Save the MSR
5666 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
5667 andc r11,r11,r0 ; Clear VEC and FP for good
5668 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
5669 li r2,1 ; Prepare for 64 bit
5670 andc r0,r11,r0 ; Clear the rest
5671 bt pfNoMSRirb,msuNoMSR ; No MSR...
5672 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
5674 mtmsr r0 ; Translation and all off
5675 isync ; Toss prefetch
5680 msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
5681 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
5687 msuNoMSR: mr r2,r3 ; Save R3 across call
5688 mr r3,r0 ; Get the new MSR value
5689 li r0,loadMSR ; Get the MSR setter SC
5691 mr r3,r2 ; Restore R3
5692 blr ; Go back all set up...
5696 ; Guest shadow assist -- remove all guest mappings
5698 ; Remove all mappings for a guest pmap from the shadow hash table.
5701 ; r3 : address of pmap, 32-bit kernel virtual address
5703 ; Non-volatile register usage:
5704 ; r24 : host pmap's physical address
5705 ; r25 : VMM extension block's physical address
5706 ; r26 : physent address
5707 ; r27 : guest pmap's space ID number
5708 ; r28 : current hash table page index
5709 ; r29 : guest pmap's physical address
5710 ; r30 : saved msr image
5711 ; r31 : current mapping
5714 .globl EXT(hw_rem_all_gv)
5718 #define graStackSize ((31-24+1)*4)+4
5719 stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1)
5720 ; Mint a new stack frame
5721 mflr r0 ; Get caller's return address
5722 mfsprg r11,2 ; Get feature flags
5723 mtcrf 0x02,r11 ; Insert feature flags into cr6
5724 stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5725 ; Save caller's return address
5726 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5727 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5728 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5729 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5730 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5731 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5732 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5733 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5735 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5737 bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine
5738 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5739 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5740 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5741 b graStart ; Get to it
5742 gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5743 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5744 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5745 graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5746 xor r29,r3,r9 ; Convert pmap_t virt->real
5747 mr r30,r11 ; Save caller's msr image
5749 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5750 bl sxlkExclusive ; Get lock exclusive
5752 lwz r3,vxsGra(r25) ; Get remove all count
5753 addi r3,r3,1 ; Increment remove all count
5754 stw r3,vxsGra(r25) ; Update remove all count
5756 li r28,0 ; r28 <- first hash page table index to search
5757 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5759 la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
5760 rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK
5761 ; Convert page index into page physical index offset
5762 add r31,r31,r11 ; Calculate page physical index entry address
5763 bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit
5764 lwz r31,4(r31) ; r31 <- first slot in hash table page to examine
5765 b graLoop ; Examine all slots in this page
5766 gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine
5767 b graLoop ; Examine all slots in this page
5770 graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags
5771 lhz r4,mpSpace(r31) ; Get mapping's space ID number
5772 rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag
5773 xor r4,r4,r27 ; Compare space ID number
5774 or. r0,r6,r4 ; cr0_eq <- !free && space id match
5775 bne graMiss ; Not one of ours, skip it
5777 lwz r11,vxsGraHits(r25) ; Get remove hit count
5778 addi r11,r11,1 ; Increment remove hit count
5779 stw r11,vxsGraHits(r25) ; Update remove hit count
5781 rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant?
5782 bne graRemPhys ; Yes, nothing to disconnect
5784 lwz r11,vxsGraActive(r25) ; Get remove active count
5785 addi r11,r11,1 ; Increment remove hit count
5786 stw r11,vxsGraActive(r25) ; Update remove hit count
5788 bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately
5789 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
5790 ; r31 <- mapping's physical address
5791 ; r3 -> PTE slot physical address
5792 ; r4 -> High-order 32 bits of PTE
5793 ; r5 -> Low-order 32 bits of PTE
5795 ; r7 -> PCA physical address
5796 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
5797 b graFreePTE ; Join 64-bit path to release the PTE
5798 graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
5799 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
5800 graFreePTE: mr. r3,r3 ; Was there a valid PTE?
5801 beq- graRemPhys ; No valid PTE, we're almost done
5802 lis r0,0x8000 ; Prepare free bit for this slot
5803 srw r0,r0,r2 ; Position free bit
5804 or r6,r6,r0 ; Set it in our PCA image
5805 lwz r8,mpPte(r31) ; Get PTE pointer
5806 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
5807 stw r8,mpPte(r31) ; Save invalidated PTE pointer
5808 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
5809 stw r6,0(r7) ; Update PCA and unlock the PTEG
5812 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
5813 bl mapFindLockPN ; Find 'n' lock this page's physent
5814 mr. r26,r3 ; Got lock on our physent?
5815 beq-- graBadPLock ; No, time to bail out
5817 crset cr1_eq ; cr1_eq <- previous link is the anchor
5818 bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine
5819 la r11,ppLink+4(r26) ; Point to chain anchor
5820 lwz r9,ppLink+4(r26) ; Get chain anchor
5821 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
5823 graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
5824 cmplw r9,r31 ; Is this the mapping to remove?
5825 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
5826 bne graRemNext ; No, chain onward
5827 bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor
5828 stw r8,0(r11) ; Unchain gpv->phys mapping
5829 b graRemoved ; Exit loop
5831 lwarx r0,0,r11 ; Get previous link
5832 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
5833 stwcx. r0,0,r11 ; Update previous link
5834 bne- graRemRetry ; Lost reservation, retry
5835 b graRemoved ; Good work, let's get outta here
5837 graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
5838 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5839 mr. r9,r8 ; Does next entry exist?
5840 b graRemLoop ; Carry on
5843 li r7,ppLFAmask ; Get mask to clean up mapping pointer
5844 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
5845 la r11,ppLink(r26) ; Point to chain anchor
5846 ld r9,ppLink(r26) ; Get chain anchor
5847 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
5848 graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
5849 cmpld r9,r31 ; Is this the mapping to remove?
5850 ld r8,mpAlias(r9) ; Get forward chain pinter
5851 bne graRem64Nxt ; Not mapping to remove, chain on, dude
5852 bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor
5853 std r8,0(r11) ; Unchain gpv->phys mapping
5854 b graRemoved ; Exit loop
5855 graRem64Rt: ldarx r0,0,r11 ; Get previous link
5856 and r0,r0,r7 ; Get flags
5857 or r0,r0,r8 ; Insert new forward pointer
5858 stdcx. r0,0,r11 ; Slam it back in
5859 bne-- graRem64Rt ; Lost reservation, retry
5860 b graRemoved ; Good work, let's go home
5863 la r11,mpAlias(r9) ; Point to (soon to be) previous link
5864 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
5865 mr. r9,r8 ; Does next entry exist?
5866 b graRem64Lp ; Carry on
5869 mr r3,r26 ; r3 <- physent's address
5870 bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
5872 lwz r3,mpFlags(r31) ; Get mapping's flags
5873 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
5874 ori r3,r3,mpgFree ; Mark mapping free
5875 stw r3,mpFlags(r31) ; Update flags
5877 graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping
5878 rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
5879 bne graLoop ; No, examine next slot
5880 addi r28,r28,1 ; Increment hash table page index
5881 cmplwi r28,GV_HPAGES ; End of hash table?
5882 bne graPgLoop ; Examine next hash table page
5884 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5885 bl sxlkUnlock ; Release host pmap's search lock
5887 bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately
5888 mtmsr r30 ; Restore 'rupts, translation
5889 isync ; Throw a small wrench into the pipeline
5890 b graPopFrame ; Nothing to do now but pop a frame and return
5891 graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
5893 lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5894 ; Get caller's return address
5895 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
5896 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
5897 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
5898 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
5899 mtlr r0 ; Prepare return address
5900 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
5901 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
5902 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
5903 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
5904 lwz r1,0(r1) ; Pop stack frame
5905 blr ; Return to caller
5909 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
5910 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
5911 li r3,failMapping ; The BOMB, Dmitri.
5912 sc ; The hydrogen bomb.
5916 ; Guest shadow assist -- remove local guest mappings
5918 ; Remove local mappings for a guest pmap from the shadow hash table.
5921 ; r3 : address of guest pmap, 32-bit kernel virtual address
5923 ; Non-volatile register usage:
5924 ; r20 : current active map word's physical address
5925 ; r21 : current hash table page address
5926 ; r22 : updated active map word in process
5927 ; r23 : active map word in process
5928 ; r24 : host pmap's physical address
5929 ; r25 : VMM extension block's physical address
5930 ; r26 : physent address
5931 ; r27 : guest pmap's space ID number
5932 ; r28 : current active map index
5933 ; r29 : guest pmap's physical address
5934 ; r30 : saved msr image
5935 ; r31 : current mapping
5938 .globl EXT(hw_rem_local_gv)
5940 LEXT(hw_rem_local_gv)
5942 #define grlStackSize ((31-20+1)*4)+4
5943 stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1)
5944 ; Mint a new stack frame
5945 mflr r0 ; Get caller's return address
5946 mfsprg r11,2 ; Get feature flags
5947 mtcrf 0x02,r11 ; Insert feature flags into cr6
5948 stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
5949 ; Save caller's return address
5950 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
5951 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
5952 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
5953 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
5954 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
5955 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
5956 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
5957 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
5958 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
5959 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
5960 stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21
5961 stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20
5963 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
5965 bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine
5966 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
5967 lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
5968 lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
5969 b grlStart ; Get to it
5970 grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
5971 ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
5972 ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
5974 grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
5975 xor r29,r3,r9 ; Convert pmap_t virt->real
5976 mr r30,r11 ; Save caller's msr image
5978 la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
5979 bl sxlkExclusive ; Get lock exclusive
5981 li r28,0 ; r28 <- index of first active map word to search
5982 lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
5983 b grlMap1st ; Examine first map word
5986 grlNextMap: stw r22,0(r21) ; Save updated map word
5987 addi r28,r28,1 ; Increment map word index
5988 cmplwi r28,GV_MAP_WORDS ; See if we're done
5989 beq grlDone ; Yup, let's get outta here
5991 grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
5992 rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK
5993 ; Convert map index into map index offset
5994 add r20,r20,r11 ; Calculate map array element address
5995 lwz r22,0(r20) ; Get active map word at index
5996 mr. r23,r22 ; Any active mappings indicated?
5997 beq grlNextMap ; Nope, check next word
5999 la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
6000 rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK
6001 ; Extract page index from map word index and convert
6002 ; into page physical index offset
6003 add r21,r21,r11 ; Calculate page physical index entry address
6004 bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit
6005 lwz r21,4(r21) ; Get selected hash table page's address
6006 b grlLoop ; Examine all slots in this page
6007 grl64Page: ld r21,0(r21) ; Get selected hash table page's address
6008 b grlLoop ; Examine all slots in this page
6011 grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word
6012 cmplwi r11,32 ; Any active mappings left in this word?
6013 lis r12,0x8000 ; Prepare mask to reset bit
6014 srw r12,r12,r11 ; Position mask bit
6015 andc r23,r23,r12 ; Reset lit bit
6016 beq grlNextMap ; No bits lit, examine next map word
6018 slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number
6019 rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK
6020 ; Extract slot band number from index and insert
6021 add r31,r31,r21 ; Add hash page address yielding mapping slot address
6023 lwz r3,mpFlags(r31) ; Get mapping's flags
6024 lhz r4,mpSpace(r31) ; Get mapping's space ID number
6025 rlwinm r5,r3,0,mpgGlobal ; Extract global bit
6026 xor r4,r4,r27 ; Compare space ID number
6027 or. r4,r4,r5 ; (space id miss || global)
6028 bne grlLoop ; Not one of ours, skip it
6029 andc r22,r22,r12 ; Reset active bit corresponding to this mapping
6030 ori r3,r3,mpgDormant ; Mark entry dormant
6031 stw r3,mpFlags(r31) ; Update mapping's flags
6033 bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately
6034 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6035 ; r31 <- mapping's physical address
6036 ; r3 -> PTE slot physical address
6037 ; r4 -> High-order 32 bits of PTE
6038 ; r5 -> Low-order 32 bits of PTE
6040 ; r7 -> PCA physical address
6041 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6042 b grlFreePTE ; Join 64-bit path to release the PTE
6043 grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6044 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6045 grlFreePTE: mr. r3,r3 ; Was there a valid PTE?
6046 beq- grlLoop ; No valid PTE, we're done with this mapping
6047 lis r0,0x8000 ; Prepare free bit for this slot
6048 srw r0,r0,r2 ; Position free bit
6049 or r6,r6,r0 ; Set it in our PCA image
6050 lwz r8,mpPte(r31) ; Get PTE pointer
6051 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6052 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6053 eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
6054 stw r6,0(r7) ; Update PCA and unlock the PTEG
6055 b grlLoop ; On to next active mapping in this map word
6057 grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
6058 bl sxlkUnlock ; Release host pmap's search lock
6060 bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately
6061 mtmsr r30 ; Restore 'rupts, translation
6062 isync ; Throw a small wrench into the pipeline
6063 b grlPopFrame ; Nothing to do now but pop a frame and return
6064 grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
6066 lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6067 ; Get caller's return address
6068 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6069 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6070 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6071 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6072 mtlr r0 ; Prepare return address
6073 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6074 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6075 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6076 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6077 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6078 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6079 lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21
6080 lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20
6081 lwz r1,0(r1) ; Pop stack frame
6082 blr ; Return to caller
6086 ; Guest shadow assist -- resume a guest mapping
6088 ; Locates the specified dormant mapping, and if it exists validates it and makes it
6092 ; r3 : address of host pmap, 32-bit kernel virtual address
6093 ; r4 : address of guest pmap, 32-bit kernel virtual address
6094 ; r5 : host virtual address, high-order 32 bits
6095 ; r6 : host virtual address, low-order 32 bits
6096 ; r7 : guest virtual address, high-order 32 bits
6097 ; r8 : guest virtual address, low-order 32 bits
6098 ; r9 : guest mapping protection code
6100 ; Non-volatile register usage:
6101 ; r23 : VMM extension block's physical address
6102 ; r24 : physent physical address
6103 ; r25 : caller's msr image from mapSetUp
6104 ; r26 : guest mapping protection code
6105 ; r27 : host pmap physical address
6106 ; r28 : guest pmap physical address
6107 ; r29 : host virtual address
6108 ; r30 : guest virtual address
6109 ; r31 : gva->phys mapping's physical address
6112 .globl EXT(hw_res_map_gv)
6116 #define grsStackSize ((31-23+1)*4)+4
6118 stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1)
6119 ; Mint a new stack frame
6120 mflr r0 ; Get caller's return address
6121 mfsprg r11,2 ; Get feature flags
6122 mtcrf 0x02,r11 ; Insert feature flags into cr6
6123 stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6124 ; Save caller's return address
6125 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6126 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6127 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6128 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6129 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6130 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6131 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6132 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6133 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6135 rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
6136 rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6137 mr r26,r9 ; Copy guest mapping protection code
6139 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6140 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6141 bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately
6142 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6143 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6144 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6145 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6146 srwi r11,r30,12 ; Form shadow hash:
6147 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6148 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6149 ; Form index offset from hash page number
6150 add r31,r31,r10 ; r31 <- hash page index entry
6151 lwz r31,4(r31) ; r31 <- hash page paddr
6152 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6153 ; r31 <- hash group paddr
6154 b grsStart ; Get to it
6156 grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
6157 rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6158 ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6159 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6160 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6161 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6162 srwi r11,r30,12 ; Form shadow hash:
6163 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6164 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6165 ; Form index offset from hash page number
6166 add r31,r31,r10 ; r31 <- hash page index entry
6167 ld r31,0(r31) ; r31 <- hash page paddr
6168 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6169 ; r31 <- hash group paddr
6171 grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6172 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6173 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6174 mr r25,r11 ; Save caller's msr image
6176 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6177 bl sxlkExclusive ; Get lock exclusive
6179 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6180 mtctr r0 ; in this group
6181 bt++ pf64Bitb,grs64Search ; Test for 64-bit machine
6183 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6184 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6185 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6186 b grs32SrchLp ; Let the search begin!
6190 mr r6,r3 ; r6 <- current mapping slot's flags
6191 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6192 mr r7,r4 ; r7 <- current mapping slot's space ID
6193 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6194 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6195 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6196 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6197 xor r7,r7,r9 ; Compare space ID
6198 or r0,r11,r7 ; r0 <- !(!free && space match)
6199 xor r8,r8,r30 ; Compare virtual address
6200 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6201 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6203 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6204 bdnz grs32SrchLp ; Iterate
6206 mr r6,r3 ; r6 <- current mapping slot's flags
6207 clrrwi r5,r5,12 ; Remove flags from virtual address
6208 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6209 xor r4,r4,r9 ; Compare space ID
6210 or r0,r11,r4 ; r0 <- !(!free && space match)
6211 xor r5,r5,r30 ; Compare virtual address
6212 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6213 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6214 b grsSrchMiss ; No joy in our hash group
6217 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6218 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6219 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6220 b grs64SrchLp ; Let the search begin!
6224 mr r6,r3 ; r6 <- current mapping slot's flags
6225 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6226 mr r7,r4 ; r7 <- current mapping slot's space ID
6227 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6228 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6229 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6230 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6231 xor r7,r7,r9 ; Compare space ID
6232 or r0,r11,r7 ; r0 <- !(!free && space match)
6233 xor r8,r8,r30 ; Compare virtual address
6234 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6235 beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
6237 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6238 bdnz grs64SrchLp ; Iterate
6240 mr r6,r3 ; r6 <- current mapping slot's flags
6241 clrrdi r5,r5,12 ; Remove flags from virtual address
6242 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6243 xor r4,r4,r9 ; Compare space ID
6244 or r0,r11,r4 ; r0 <- !(!free && space match)
6245 xor r5,r5,r30 ; Compare virtual address
6246 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6247 bne grsSrchMiss ; No joy in our hash group
6250 rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant?
6251 bne grsFindHost ; Yes, nothing to disconnect
6253 bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately
6254 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6255 ; r31 <- mapping's physical address
6256 ; r3 -> PTE slot physical address
6257 ; r4 -> High-order 32 bits of PTE
6258 ; r5 -> Low-order 32 bits of PTE
6260 ; r7 -> PCA physical address
6261 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6262 b grsFreePTE ; Join 64-bit path to release the PTE
6263 grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6264 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6265 grsFreePTE: mr. r3,r3 ; Was there a valid PTE?
6266 beq- grsFindHost ; No valid PTE, we're almost done
6267 lis r0,0x8000 ; Prepare free bit for this slot
6268 srw r0,r0,r2 ; Position free bit
6269 or r6,r6,r0 ; Set it in our PCA image
6270 lwz r8,mpPte(r31) ; Get PTE pointer
6271 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6272 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6273 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6274 stw r6,0(r7) ; Update PCA and unlock the PTEG
6278 // We now have a dormant guest mapping that matches our space id and virtual address. Our next
6279 // step is to locate the host mapping that completes the guest mapping's connection to a physical
6280 // frame. The guest and host mappings must connect to the same physical frame, so they must both
6281 // be chained on the same physent. We search the physent chain for a host mapping matching our
6282 // host's space id and the host virtual address. If we succeed, we know that the entire chain
6283 // of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
6284 // resumed. If we fail to find the specified host virtual->physical mapping, it is because the
6285 // host virtual or physical address has changed since the guest mapping was suspended, so it
6286 // is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell
6287 // our caller that it will have to take its long path, translating the host virtual address
6288 // through the host's skiplist and installing a new guest mapping.
6290 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6291 bl mapFindLockPN ; Find 'n' lock this page's physent
6292 mr. r24,r3 ; Got lock on our physent?
6293 beq-- grsBadPLock ; No, time to bail out
6295 bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
6297 lwz r9,ppLink+4(r24) ; Get first mapping on physent
6298 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
6299 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
6300 grsPELoop: mr. r12,r9 ; Got a mapping to look at?
6301 beq- grsPEMiss ; Nope, we've missed hva->phys mapping
6302 lwz r7,mpFlags(r12) ; Get mapping's flags
6303 lhz r4,mpSpace(r12) ; Get mapping's space id number
6304 lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address
6305 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
6307 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6308 rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags
6309 xori r0,r0,mpNormal ; Normal mapping?
6310 xor r4,r4,r6 ; Compare w/ host space id number
6311 xor r5,r5,r29 ; Compare w/ host virtual address
6312 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6313 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6315 b grsPELoop ; Iterate
6317 grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
6318 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6319 ld r9,ppLink(r24) ; Get first mapping on physent
6320 lwz r6,pmapSpace(r27) ; Get pmap's space id number
6321 andc r9,r9,r0 ; Cleanup mapping pointer
6322 grsPELp64: mr. r12,r9 ; Got a mapping to look at?
6323 beq-- grsPEMiss ; Nope, we've missed hva->phys mapping
6324 lwz r7,mpFlags(r12) ; Get mapping's flags
6325 lhz r4,mpSpace(r12) ; Get mapping's space id number
6326 ld r5,mpVAddr(r12) ; Get mapping's virtual address
6327 ld r9,mpAlias(r12) ; Next mapping physent alias chain
6328 rlwinm r0,r7,0,mpType ; Isolate mapping's type
6329 rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
6330 xori r0,r0,mpNormal ; Normal mapping?
6331 xor r4,r4,r6 ; Compare w/ host space id number
6332 xor r5,r5,r29 ; Compare w/ host virtual address
6333 or r0,r0,r4 ; r0 <- (wrong type || !space id)
6334 or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
6336 b grsPELp64 ; Iterate
6338 grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits
6339 rlwimi r0,r26,0,mpPP ; Insert new protection bits
6340 stw r0,mpVAddr+4(r31) ; Write 'em back
6342 eieio ; Ensure previous mapping updates are visible
6343 lwz r0,mpFlags(r31) ; Get flags
6344 rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag
6345 stw r0,mpFlags(r31) ; Set updated flags, entry is now valid
6347 li r31,mapRtOK ; Indicate success
6348 b grsRelPhy ; Exit through physent lock release
6350 grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor
6351 bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine
6352 la r11,ppLink+4(r24) ; Point to chain anchor
6353 lwz r9,ppLink+4(r24) ; Get chain anchor
6354 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6355 grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
6356 cmplw r9,r31 ; Is this the mapping to remove?
6357 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6358 bne grsRemNext ; No, chain onward
6359 bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor
6360 stw r8,0(r11) ; Unchain gpv->phys mapping
6361 b grsDelete ; Finish deleting mapping
6363 lwarx r0,0,r11 ; Get previous link
6364 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6365 stwcx. r0,0,r11 ; Update previous link
6366 bne- grsRemRetry ; Lost reservation, retry
6367 b grsDelete ; Finish deleting mapping
6370 grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6371 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6372 mr. r9,r8 ; Does next entry exist?
6373 b grsRemLoop ; Carry on
6376 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6377 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6378 la r11,ppLink(r24) ; Point to chain anchor
6379 ld r9,ppLink(r24) ; Get chain anchor
6380 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6381 grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
6382 cmpld r9,r31 ; Is this the mapping to remove?
6383 ld r8,mpAlias(r9) ; Get forward chain pinter
6384 bne grsRem64Nxt ; Not mapping to remove, chain on, dude
6385 bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor
6386 std r8,0(r11) ; Unchain gpv->phys mapping
6387 b grsDelete ; Finish deleting mapping
6388 grsRem64Rt: ldarx r0,0,r11 ; Get previous link
6389 and r0,r0,r7 ; Get flags
6390 or r0,r0,r8 ; Insert new forward pointer
6391 stdcx. r0,0,r11 ; Slam it back in
6392 bne-- grsRem64Rt ; Lost reservation, retry
6393 b grsDelete ; Finish deleting mapping
6397 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6398 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6399 mr. r9,r8 ; Does next entry exist?
6400 b grsRem64Lp ; Carry on
6403 lwz r3,mpFlags(r31) ; Get mapping's flags
6404 rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
6405 ori r3,r3,mpgFree ; Mark mapping free
6406 stw r3,mpFlags(r31) ; Update flags
6408 li r31,mapRtNotFnd ; Didn't succeed
6410 grsRelPhy: mr r3,r24 ; r3 <- physent addr
6411 bl mapPhysUnlock ; Unlock physent chain
6413 grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6414 bl sxlkUnlock ; Release host pmap search lock
6416 grsRtn: mr r3,r31 ; r3 <- result code
6417 bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately
6418 mtmsr r25 ; Restore 'rupts, translation
6419 isync ; Throw a small wrench into the pipeline
6420 b grsPopFrame ; Nothing to do now but pop a frame and return
6421 grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6423 lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6424 ; Get caller's return address
6425 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6426 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6427 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6428 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6429 mtlr r0 ; Prepare return address
6430 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6431 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6432 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6433 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6434 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6435 lwz r1,0(r1) ; Pop stack frame
6436 blr ; Return to caller
6440 li r31,mapRtNotFnd ; Could not locate requested mapping
6441 b grsRelPmap ; Exit through host pmap search lock release
6445 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6446 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6447 li r3,failMapping ; The BOMB, Dmitri.
6448 sc ; The hydrogen bomb.
6452 ; Guest shadow assist -- add a guest mapping
6454 ; Adds a guest mapping.
6457 ; r3 : address of host pmap, 32-bit kernel virtual address
6458 ; r4 : address of guest pmap, 32-bit kernel virtual address
6459 ; r5 : guest virtual address, high-order 32 bits
6460 ; r6 : guest virtual address, low-order 32 bits (with mpHWFlags)
6461 ; r7 : new mapping's flags
6462 ; r8 : physical address, 32-bit page number
6464 ; Non-volatile register usage:
6465 ; r22 : hash group's physical address
6466 ; r23 : VMM extension block's physical address
6467 ; r24 : mapping's flags
6468 ; r25 : caller's msr image from mapSetUp
6469 ; r26 : physent physical address
6470 ; r27 : host pmap physical address
6471 ; r28 : guest pmap physical address
6472 ; r29 : physical address, 32-bit 4k-page number
6473 ; r30 : guest virtual address
6474 ; r31 : gva->phys mapping's physical address
6478 .globl EXT(hw_add_map_gv)
6483 #define gadStackSize ((31-22+1)*4)+4
6485 stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1)
6486 ; Mint a new stack frame
6487 mflr r0 ; Get caller's return address
6488 mfsprg r11,2 ; Get feature flags
6489 mtcrf 0x02,r11 ; Insert feature flags into cr6
6490 stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6491 ; Save caller's return address
6492 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6493 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6494 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6495 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6496 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6497 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6498 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
6499 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
6500 stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
6501 stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
6503 rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
6504 rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
6505 mr r24,r7 ; Copy guest mapping's flags
6506 mr r29,r8 ; Copy target frame's physical address
6508 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6509 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6510 bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine
6511 lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
6512 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6513 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6514 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6515 srwi r11,r30,12 ; Form shadow hash:
6516 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6517 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6518 ; Form index offset from hash page number
6519 add r22,r22,r10 ; r22 <- hash page index entry
6520 lwz r22,4(r22) ; r22 <- hash page paddr
6521 rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6522 ; r22 <- hash group paddr
6523 b gadStart ; Get to it
6525 gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
6526 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6527 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6528 la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
6529 srwi r11,r30,12 ; Form shadow hash:
6530 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6531 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6532 ; Form index offset from hash page number
6533 add r22,r22,r10 ; r22 <- hash page index entry
6534 ld r22,0(r22) ; r22 <- hash page paddr
6535 insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6536 ; r22 <- hash group paddr
6538 gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6539 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6540 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6541 mr r25,r11 ; Save caller's msr image
6543 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6544 bl sxlkExclusive ; Get lock exlusive
6546 mr r31,r22 ; Prepare to search this group
6547 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6548 mtctr r0 ; in this group
6549 bt++ pf64Bitb,gad64Search ; Test for 64-bit machine
6551 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6552 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6553 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6554 clrrwi r12,r30,12 ; r12 <- virtual address we're searching for
6555 b gad32SrchLp ; Let the search begin!
6559 mr r6,r3 ; r6 <- current mapping slot's flags
6560 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6561 mr r7,r4 ; r7 <- current mapping slot's space ID
6562 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6563 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6564 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6565 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6566 xor r7,r7,r9 ; Compare space ID
6567 or r0,r11,r7 ; r0 <- !(!free && space match)
6568 xor r8,r8,r12 ; Compare virtual address
6569 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6570 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6572 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6573 bdnz gad32SrchLp ; Iterate
6575 mr r6,r3 ; r6 <- current mapping slot's flags
6576 clrrwi r5,r5,12 ; Remove flags from virtual address
6577 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6578 xor r4,r4,r9 ; Compare space ID
6579 or r0,r11,r4 ; r0 <- !(!free && && space match)
6580 xor r5,r5,r12 ; Compare virtual address
6581 or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match
6582 beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
6583 b gadScan ; No joy in our hash group
6586 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6587 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6588 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6589 clrrdi r12,r30,12 ; r12 <- virtual address we're searching for
6590 b gad64SrchLp ; Let the search begin!
6594 mr r6,r3 ; r6 <- current mapping slot's flags
6595 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6596 mr r7,r4 ; r7 <- current mapping slot's space ID
6597 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6598 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6599 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6600 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6601 xor r7,r7,r9 ; Compare space ID
6602 or r0,r11,r7 ; r0 <- !(!free && space match)
6603 xor r8,r8,r12 ; Compare virtual address
6604 or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
6605 beq gadRelPmap ; Hit, let upper-level redrive sort it out
6607 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6608 bdnz gad64SrchLp ; Iterate
6610 mr r6,r3 ; r6 <- current mapping slot's flags
6611 clrrdi r5,r5,12 ; Remove flags from virtual address
6612 rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
6613 xor r4,r4,r9 ; Compare space ID
6614 or r0,r11,r4 ; r0 <- !(!free && && space match)
6615 xor r5,r5,r12 ; Compare virtual address
6616 or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
6617 bne gadScan ; No joy in our hash group
6618 b gadRelPmap ; Hit, let upper-level redrive sort it out
6620 gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor
6621 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6622 ; Prepare to address slot at cursor
6623 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6624 mtctr r0 ; in this group
6625 or r2,r22,r12 ; r2 <- 1st mapping to search
6626 lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags
6627 li r11,0 ; No dormant entries found yet
6628 b gadScanLoop ; Let the search begin!
6632 addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search
6633 rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6634 ; Trim off any carry, wrapping into slot number range
6635 mr r31,r2 ; r31 <- current mapping's address
6636 or r2,r22,r12 ; r2 <- next mapping to search
6637 mr r6,r3 ; r6 <- current mapping slot's flags
6638 lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags
6639 rlwinm. r0,r6,0,mpgFree ; Test free flag
6640 bne gadFillMap ; Join common path on hit (r31 points to free mapping)
6641 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6642 xori r0,r0,mpgDormant ; Invert dormant flag
6643 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6644 bne gadNotDorm ; Not dormant or we've already seen one
6645 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6646 gadNotDorm: bdnz gadScanLoop ; Iterate
6648 mr r31,r2 ; r31 <- final mapping's address
6649 rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping
6650 bne gadFillMap ; Join common path on hit (r31 points to dormant mapping)
6651 rlwinm r0,r6,0,mpgDormant ; Dormant entry?
6652 xori r0,r0,mpgDormant ; Invert dormant flag
6653 or. r0,r0,r11 ; Skip all but the first dormant entry we see
6654 bne gadCkDormant ; Not dormant or we've already seen one
6655 mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
6658 mr. r31,r11 ; Get dormant mapping, if any, and test
6659 bne gadUpCursor ; Go update the cursor, we'll take the dormant entry
6662 lbz r12,mpgCursor(r22) ; Get group's cursor
6663 rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
6664 ; Prepare to address slot at cursor
6665 or r31,r22,r12 ; r31 <- address of mapping to steal
6667 bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately
6668 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6669 ; r31 <- mapping's physical address
6670 ; r3 -> PTE slot physical address
6671 ; r4 -> High-order 32 bits of PTE
6672 ; r5 -> Low-order 32 bits of PTE
6674 ; r7 -> PCA physical address
6675 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6676 b gadFreePTE ; Join 64-bit path to release the PTE
6677 gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6678 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6679 gadFreePTE: mr. r3,r3 ; Was there a valid PTE?
6680 beq- gadUpCursor ; No valid PTE, we're almost done
6681 lis r0,0x8000 ; Prepare free bit for this slot
6682 srw r0,r0,r2 ; Position free bit
6683 or r6,r6,r0 ; Set it in our PCA image
6684 lwz r8,mpPte(r31) ; Get PTE pointer
6685 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
6686 stw r8,mpPte(r31) ; Save invalidated PTE pointer
6687 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
6688 stw r6,0(r7) ; Update PCA and unlock the PTEG
6691 rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK
6692 ; Recover slot number from stolen mapping's address
6693 addi r12,r12,1 ; Increment slot number
6694 rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range
6695 stb r12,mpgCursor(r22) ; Update group's cursor
6697 lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
6698 bl mapFindLockPN ; Find 'n' lock this page's physent
6699 mr. r26,r3 ; Got lock on our physent?
6700 beq-- gadBadPLock ; No, time to bail out
6702 crset cr1_eq ; cr1_eq <- previous link is the anchor
6703 bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine
6704 la r11,ppLink+4(r26) ; Point to chain anchor
6705 lwz r9,ppLink+4(r26) ; Get chain anchor
6706 rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
6707 gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
6708 cmplw r9,r31 ; Is this the mapping to remove?
6709 lwz r8,mpAlias+4(r9) ; Get forward chain pointer
6710 bne gadRemNext ; No, chain onward
6711 bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor
6712 stw r8,0(r11) ; Unchain gpv->phys mapping
6713 b gadDelDone ; Finish deleting mapping
6715 lwarx r0,0,r11 ; Get previous link
6716 rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
6717 stwcx. r0,0,r11 ; Update previous link
6718 bne- gadRemRetry ; Lost reservation, retry
6719 b gadDelDone ; Finish deleting mapping
6721 gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
6722 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6723 mr. r9,r8 ; Does next entry exist?
6724 b gadRemLoop ; Carry on
6727 li r7,ppLFAmask ; Get mask to clean up mapping pointer
6728 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6729 la r11,ppLink(r26) ; Point to chain anchor
6730 ld r9,ppLink(r26) ; Get chain anchor
6731 andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
6732 gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
6733 cmpld r9,r31 ; Is this the mapping to remove?
6734 ld r8,mpAlias(r9) ; Get forward chain pinter
6735 bne gadRem64Nxt ; Not mapping to remove, chain on, dude
6736 bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor
6737 std r8,0(r11) ; Unchain gpv->phys mapping
6738 b gadDelDone ; Finish deleting mapping
6739 gadRem64Rt: ldarx r0,0,r11 ; Get previous link
6740 and r0,r0,r7 ; Get flags
6741 or r0,r0,r8 ; Insert new forward pointer
6742 stdcx. r0,0,r11 ; Slam it back in
6743 bne-- gadRem64Rt ; Lost reservation, retry
6744 b gadDelDone ; Finish deleting mapping
6748 la r11,mpAlias(r9) ; Point to (soon to be) previous link
6749 crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
6750 mr. r9,r8 ; Does next entry exist?
6751 b gadRem64Lp ; Carry on
6754 mr r3,r26 ; Get physent address
6755 bl mapPhysUnlock ; Unlock physent chain
6758 lwz r12,pmapSpace(r28) ; Get guest space id number
6759 li r2,0 ; Get a zero
6760 stw r24,mpFlags(r31) ; Set mapping's flags
6761 sth r12,mpSpace(r31) ; Set mapping's space id number
6762 stw r2,mpPte(r31) ; Set mapping's pte pointer invalid
6763 stw r29,mpPAddr(r31) ; Set mapping's physical address
6764 bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine
6765 stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags)
6766 b gadChain ; Continue with chaining mapping to physent
6767 gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags)
6769 gadChain: mr r3,r29 ; r3 <- physical frame address
6770 bl mapFindLockPN ; Find 'n' lock this page's physent
6771 mr. r26,r3 ; Got lock on our physent?
6772 beq-- gadBadPLock ; No, time to bail out
6774 bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine
6775 lwz r12,ppLink+4(r26) ; Get forward chain
6776 rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags
6777 rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags
6778 stw r11,mpAlias+4(r31) ; New mapping will head chain
6779 stw r12,ppLink+4(r26) ; Point physent to new mapping
6780 b gadFinish ; All over now...
6782 gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer
6783 rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
6784 ld r12,ppLink(r26) ; Get forward chain
6785 andc r11,r12,r7 ; Get physent's forward chain pointer sans flags
6786 and r12,r12,r7 ; Isolate pointer's flags
6787 or r12,r12,r31 ; Insert new mapping's address forming pointer
6788 std r11,mpAlias(r31) ; New mapping will head chain
6789 std r12,ppLink(r26) ; Point physent to new mapping
6791 gadFinish: eieio ; Ensure new mapping is completely visible
6793 gadRelPhy: mr r3,r26 ; r3 <- physent addr
6794 bl mapPhysUnlock ; Unlock physent chain
6796 gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
6797 bl sxlkUnlock ; Release host pmap search lock
6799 bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately
6800 mtmsr r25 ; Restore 'rupts, translation
6801 isync ; Throw a small wrench into the pipeline
6802 b gadPopFrame ; Nothing to do now but pop a frame and return
6803 gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
6805 lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6806 ; Get caller's return address
6807 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
6808 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
6809 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
6810 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
6811 mtlr r0 ; Prepare return address
6812 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
6813 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
6814 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
6815 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
6816 lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
6817 lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
6818 lwz r1,0(r1) ; Pop stack frame
6819 blr ; Return to caller
6823 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
6824 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
6825 li r3,failMapping ; The BOMB, Dmitri.
6826 sc ; The hydrogen bomb.
6830 ; Guest shadow assist -- supend a guest mapping
6832 ; Suspends a guest mapping.
6835 ; r3 : address of host pmap, 32-bit kernel virtual address
6836 ; r4 : address of guest pmap, 32-bit kernel virtual address
6837 ; r5 : guest virtual address, high-order 32 bits
6838 ; r6 : guest virtual address, low-order 32 bits
6840 ; Non-volatile register usage:
6841 ; r26 : VMM extension block's physical address
6842 ; r27 : host pmap physical address
6843 ; r28 : guest pmap physical address
6844 ; r29 : caller's msr image from mapSetUp
6845 ; r30 : guest virtual address
6846 ; r31 : gva->phys mapping's physical address
6850 .globl EXT(hw_susp_map_gv)
6852 LEXT(hw_susp_map_gv)
6854 #define gsuStackSize ((31-26+1)*4)+4
6856 stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1)
6857 ; Mint a new stack frame
6858 mflr r0 ; Get caller's return address
6859 mfsprg r11,2 ; Get feature flags
6860 mtcrf 0x02,r11 ; Insert feature flags into cr6
6861 stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
6862 ; Save caller's return address
6863 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
6864 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
6865 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
6866 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
6867 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
6868 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
6870 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
6872 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
6873 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
6874 bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine
6876 lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr
6877 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
6878 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
6879 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6880 srwi r11,r30,12 ; Form shadow hash:
6881 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6882 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6883 ; Form index offset from hash page number
6884 add r31,r31,r10 ; r31 <- hash page index entry
6885 lwz r31,4(r31) ; r31 <- hash page paddr
6886 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
6887 ; r31 <- hash group paddr
6888 b gsuStart ; Get to it
6889 gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
6890 ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr
6891 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
6892 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
6893 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
6894 srwi r11,r30,12 ; Form shadow hash:
6895 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
6896 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
6897 ; Form index offset from hash page number
6898 add r31,r31,r10 ; r31 <- hash page index entry
6899 ld r31,0(r31) ; r31 <- hash page paddr
6900 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
6901 ; r31 <- hash group paddr
6903 gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
6904 xor r28,r4,r28 ; Convert guest pmap_t virt->real
6905 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
6906 mr r29,r11 ; Save caller's msr image
6908 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
6909 bl sxlkExclusive ; Get lock exclusive
6911 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
6912 mtctr r0 ; in this group
6913 bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine
6915 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6916 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6917 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
6918 b gsu32SrchLp ; Let the search begin!
6922 mr r6,r3 ; r6 <- current mapping slot's flags
6923 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6924 mr r7,r4 ; r7 <- current mapping slot's space ID
6925 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6926 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6927 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
6928 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6929 xor r7,r7,r9 ; Compare space ID
6930 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6931 xor r8,r8,r30 ; Compare virtual address
6932 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6933 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6935 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6936 bdnz gsu32SrchLp ; Iterate
6938 mr r6,r3 ; r6 <- current mapping slot's flags
6939 clrrwi r5,r5,12 ; Remove flags from virtual address
6940 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6941 xor r4,r4,r9 ; Compare space ID
6942 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6943 xor r5,r5,r30 ; Compare virtual address
6944 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6945 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6946 b gsuSrchMiss ; No joy in our hash group
6949 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
6950 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
6951 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
6952 b gsu64SrchLp ; Let the search begin!
6956 mr r6,r3 ; r6 <- current mapping slot's flags
6957 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
6958 mr r7,r4 ; r7 <- current mapping slot's space ID
6959 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
6960 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
6961 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
6962 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6963 xor r7,r7,r9 ; Compare space ID
6964 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
6965 xor r8,r8,r30 ; Compare virtual address
6966 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6967 beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
6969 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
6970 bdnz gsu64SrchLp ; Iterate
6972 mr r6,r3 ; r6 <- current mapping slot's flags
6973 clrrdi r5,r5,12 ; Remove flags from virtual address
6974 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
6975 xor r4,r4,r9 ; Compare space ID
6976 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
6977 xor r5,r5,r30 ; Compare virtual address
6978 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
6979 bne gsuSrchMiss ; No joy in our hash group
6982 bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately
6983 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
6984 ; r31 <- mapping's physical address
6985 ; r3 -> PTE slot physical address
6986 ; r4 -> High-order 32 bits of PTE
6987 ; r5 -> Low-order 32 bits of PTE
6989 ; r7 -> PCA physical address
6990 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
6991 b gsuFreePTE ; Join 64-bit path to release the PTE
6992 gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
6993 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
6994 gsuFreePTE: mr. r3,r3 ; Was there a valid PTE?
6995 beq- gsuNoPTE ; No valid PTE, we're almost done
6996 lis r0,0x8000 ; Prepare free bit for this slot
6997 srw r0,r0,r2 ; Position free bit
6998 or r6,r6,r0 ; Set it in our PCA image
6999 lwz r8,mpPte(r31) ; Get PTE pointer
7000 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7001 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7002 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7003 stw r6,0(r7) ; Update PCA and unlock the PTEG
7005 gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags
7006 ori r3,r3,mpgDormant ; Mark entry dormant
7007 stw r3,mpFlags(r31) ; Save updated flags
7008 eieio ; Ensure update is visible when we unlock
7011 la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7012 bl sxlkUnlock ; Release host pmap search lock
7014 bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately
7015 mtmsr r29 ; Restore 'rupts, translation
7016 isync ; Throw a small wrench into the pipeline
7017 b gsuPopFrame ; Nothing to do now but pop a frame and return
7018 gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode
7020 lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7021 ; Get caller's return address
7022 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7023 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7024 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7025 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7026 mtlr r0 ; Prepare return address
7027 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7028 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7029 lwz r1,0(r1) ; Pop stack frame
7030 blr ; Return to caller
7033 ; Guest shadow assist -- test guest mapping reference and change bits
7035 ; Locates the specified guest mapping, and if it exists gathers its reference
7036 ; and change bit, optionallyÊresetting them.
7039 ; r3 : address of host pmap, 32-bit kernel virtual address
7040 ; r4 : address of guest pmap, 32-bit kernel virtual address
7041 ; r5 : guest virtual address, high-order 32 bits
7042 ; r6 : guest virtual address, low-order 32 bits
7043 ; r7 : reset boolean
7045 ; Non-volatile register usage:
7046 ; r24 : VMM extension block's physical address
7047 ; r25 : return code (w/reference and change bits)
7048 ; r26 : reset boolean
7049 ; r27 : host pmap physical address
7050 ; r28 : guest pmap physical address
7051 ; r29 : caller's msr image from mapSetUp
7052 ; r30 : guest virtual address
7053 ; r31 : gva->phys mapping's physical address
7057 .globl EXT(hw_test_rc_gv)
7061 #define gtdStackSize ((31-24+1)*4)+4
7063 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7064 ; Mint a new stack frame
7065 mflr r0 ; Get caller's return address
7066 mfsprg r11,2 ; Get feature flags
7067 mtcrf 0x02,r11 ; Insert feature flags into cr6
7068 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7069 ; Save caller's return address
7070 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7071 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7072 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7073 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7074 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7075 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7076 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7077 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7079 rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 20 bits of guest vaddr
7081 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7082 lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
7084 bt++ pf64Bitb,gtd64Salt ; Test for 64-bit machine
7086 lwz r24,pmapVmmExtPhys+4(r3) ; r24 <- VMM pmap extension block paddr
7087 lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
7088 lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
7089 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7090 srwi r11,r30,12 ; Form shadow hash:
7091 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7092 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7093 ; Form index offset from hash page number
7094 add r31,r31,r10 ; r31 <- hash page index entry
7095 lwz r31,4(r31) ; r31 <- hash page paddr
7096 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7097 ; r31 <- hash group paddr
7098 b gtdStart ; Get to it
7100 gtd64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7101 ld r24,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7102 ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
7103 ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
7104 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7105 srwi r11,r30,12 ; Form shadow hash:
7106 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7107 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7108 ; Form index offset from hash page number
7109 add r31,r31,r10 ; r31 <- hash page index entry
7110 ld r31,0(r31) ; r31 <- hash page paddr
7111 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7112 ; r31 <- hash group paddr
7114 gtdStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
7115 xor r28,r4,r28 ; Convert guest pmap_t virt->real
7116 mr r26,r7 ; Save reset boolean
7117 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7118 mr r29,r11 ; Save caller's msr image
7120 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7121 bl sxlkExclusive ; Get lock exclusive
7123 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7124 mtctr r0 ; in this group
7125 bt++ pf64Bitb,gtd64Search ; Test for 64-bit machine
7127 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7128 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7129 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7130 b gtd32SrchLp ; Let the search begin!
7134 mr r6,r3 ; r6 <- current mapping slot's flags
7135 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7136 mr r7,r4 ; r7 <- current mapping slot's space ID
7137 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7138 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7139 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7140 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7141 xor r7,r7,r9 ; Compare space ID
7142 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7143 xor r8,r8,r30 ; Compare virtual address
7144 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7145 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7147 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7148 bdnz gtd32SrchLp ; Iterate
7150 mr r6,r3 ; r6 <- current mapping slot's flags
7151 clrrwi r5,r5,12 ; Remove flags from virtual address
7152 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7153 xor r4,r4,r9 ; Compare space ID
7154 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7155 xor r5,r5,r30 ; Compare virtual address
7156 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7157 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7158 b gtdSrchMiss ; No joy in our hash group
7161 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7162 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7163 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7164 b gtd64SrchLp ; Let the search begin!
7168 mr r6,r3 ; r6 <- current mapping slot's flags
7169 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7170 mr r7,r4 ; r7 <- current mapping slot's space ID
7171 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7172 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7173 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7174 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7175 xor r7,r7,r9 ; Compare space ID
7176 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7177 xor r8,r8,r30 ; Compare virtual address
7178 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7179 beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
7181 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7182 bdnz gtd64SrchLp ; Iterate
7184 mr r6,r3 ; r6 <- current mapping slot's flags
7185 clrrdi r5,r5,12 ; Remove flags from virtual address
7186 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7187 xor r4,r4,r9 ; Compare space ID
7188 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7189 xor r5,r5,r30 ; Compare virtual address
7190 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7191 bne gtdSrchMiss ; No joy in our hash group
7194 bt++ pf64Bitb,gtdDo64 ; Split for 64 bit
7196 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
7198 cmplwi cr1,r26,0 ; Do we want to clear RC?
7199 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7200 mr. r3,r3 ; Was there a previously valid PTE?
7201 li r0,lo16(mpR|mpC) ; Get bits to clear
7203 and r25,r5,r0 ; Copy RC bits into result
7204 beq++ cr1,gtdNoClr32 ; Nope...
7206 andc r12,r12,r0 ; Clear mapping copy of RC
7207 andc r5,r5,r0 ; Clear PTE copy of RC
7208 sth r12,mpVAddr+6(r31) ; Set the new RC in mapping
7210 gtdNoClr32: beq-- gtdNoOld32 ; No previously valid PTE...
7212 sth r5,6(r3) ; Store updated RC in PTE
7213 eieio ; Make sure we do not reorder
7214 stw r4,0(r3) ; Revalidate the PTE
7216 eieio ; Make sure all updates come first
7217 stw r6,0(r7) ; Unlock PCA
7219 gtdNoOld32: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7220 bl sxlkUnlock ; Unlock the search list
7221 b gtdR32 ; Join common...
7226 gtdDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
7228 cmplwi cr1,r26,0 ; Do we want to clear RC?
7229 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
7230 mr. r3,r3 ; Was there a previously valid PTE?
7231 li r0,lo16(mpR|mpC) ; Get bits to clear
7233 and r25,r5,r0 ; Copy RC bits into result
7234 beq++ cr1,gtdNoClr64 ; Nope...
7236 andc r12,r12,r0 ; Clear mapping copy of RC
7237 andc r5,r5,r0 ; Clear PTE copy of RC
7238 sth r12,mpVAddr+6(r31) ; Set the new RC
7240 gtdNoClr64: beq-- gtdNoOld64 ; Nope, no pevious pte...
7242 sth r5,14(r3) ; Store updated RC
7243 eieio ; Make sure we do not reorder
7244 std r4,0(r3) ; Revalidate the PTE
7246 eieio ; Make sure all updates come first
7247 stw r6,0(r7) ; Unlock PCA
7249 gtdNoOld64: la r3,pmapSXlk(r27) ; Point to the pmap search lock
7250 bl sxlkUnlock ; Unlock the search list
7251 b gtdR64 ; Join common...
7254 la r3,pmapSXlk(r27) ; Point to the pmap search lock
7255 bl sxlkUnlock ; Unlock the search list
7256 li r25,mapRtNotFnd ; Get ready to return not found
7257 bt++ pf64Bitb,gtdR64 ; Test for 64-bit machine
7259 gtdR32: mtmsr r29 ; Restore caller's msr image
7263 gtdR64: mtmsrd r29 ; Restore caller's msr image
7265 gtdEpilog: lwz r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7266 ; Get caller's return address
7267 mr r3,r25 ; Get return code
7268 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7269 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7270 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7271 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7272 mtlr r0 ; Prepare return address
7273 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7274 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7275 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7276 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7277 lwz r1,0(r1) ; Pop stack frame
7278 blr ; Return to caller
7281 ; Guest shadow assist -- convert guest to host virtual address
7283 ; Locates the specified guest mapping, and if it exists locates the
7284 ; first mapping belonging to its host on the physical chain and returns
7285 ; its virtual address.
7287 ; Note that if there are multiple mappings belonging to this host
7288 ; chained to the physent to which the guest mapping is chained, then
7289 ; host virtual aliases exist for this physical address. If host aliases
7290 ; exist, then we select the first on the physent chain, making it
7291 ; unpredictable which of the two or more possible host virtual addresses
7295 ; r3 : address of guest pmap, 32-bit kernel virtual address
7296 ; r4 : guest virtual address, high-order 32 bits
7297 ; r5 : guest virtual address, low-order 32 bits
7299 ; Non-volatile register usage:
7300 ; r24 : physent physical address
7301 ; r25 : VMM extension block's physical address
7302 ; r26 : host virtual address
7303 ; r27 : host pmap physical address
7304 ; r28 : guest pmap physical address
7305 ; r29 : caller's msr image from mapSetUp
7306 ; r30 : guest virtual address
7307 ; r31 : gva->phys mapping's physical address
7311 .globl EXT(hw_gva_to_hva)
7315 #define gthStackSize ((31-24+1)*4)+4
7317 stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
7318 ; Mint a new stack frame
7319 mflr r0 ; Get caller's return address
7320 mfsprg r11,2 ; Get feature flags
7321 mtcrf 0x02,r11 ; Insert feature flags into cr6
7322 stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7323 ; Save caller's return address
7324 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7325 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7326 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7327 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7328 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7329 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7330 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7331 stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
7333 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7335 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7336 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7338 bt++ pf64Bitb,gth64Salt ; Test for 64-bit machine
7340 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7341 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7342 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7343 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7344 srwi r11,r30,12 ; Form shadow hash:
7345 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7346 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7347 ; Form index offset from hash page number
7348 add r31,r31,r10 ; r31 <- hash page index entry
7349 lwz r31,4(r31) ; r31 <- hash page paddr
7350 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7351 ; r31 <- hash group paddr
7352 b gthStart ; Get to it
7354 gth64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7355 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7356 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7357 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7358 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7359 srwi r11,r30,12 ; Form shadow hash:
7360 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7361 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7362 ; Form index offset from hash page number
7363 add r31,r31,r10 ; r31 <- hash page index entry
7364 ld r31,0(r31) ; r31 <- hash page paddr
7365 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7366 ; r31 <- hash group paddr
7368 gthStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7369 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7370 mr r29,r11 ; Save caller's msr image
7372 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7373 bl sxlkExclusive ; Get lock exclusive
7375 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7376 mtctr r0 ; in this group
7377 bt++ pf64Bitb,gth64Search ; Test for 64-bit machine
7379 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7380 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7381 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7382 b gth32SrchLp ; Let the search begin!
7386 mr r6,r3 ; r6 <- current mapping slot's flags
7387 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7388 mr r7,r4 ; r7 <- current mapping slot's space ID
7389 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7390 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7391 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7392 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7393 xor r7,r7,r9 ; Compare space ID
7394 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7395 xor r8,r8,r30 ; Compare virtual address
7396 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7397 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7399 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7400 bdnz gth32SrchLp ; Iterate
7402 mr r6,r3 ; r6 <- current mapping slot's flags
7403 clrrwi r5,r5,12 ; Remove flags from virtual address
7404 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7405 xor r4,r4,r9 ; Compare space ID
7406 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7407 xor r5,r5,r30 ; Compare virtual address
7408 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7409 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7410 b gthSrchMiss ; No joy in our hash group
7413 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7414 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7415 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7416 b gth64SrchLp ; Let the search begin!
7420 mr r6,r3 ; r6 <- current mapping slot's flags
7421 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7422 mr r7,r4 ; r7 <- current mapping slot's space ID
7423 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7424 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7425 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7426 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7427 xor r7,r7,r9 ; Compare space ID
7428 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7429 xor r8,r8,r30 ; Compare virtual address
7430 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7431 beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
7433 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7434 bdnz gth64SrchLp ; Iterate
7436 mr r6,r3 ; r6 <- current mapping slot's flags
7437 clrrdi r5,r5,12 ; Remove flags from virtual address
7438 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7439 xor r4,r4,r9 ; Compare space ID
7440 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7441 xor r5,r5,r30 ; Compare virtual address
7442 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7443 bne gthSrchMiss ; No joy in our hash group
7445 gthSrchHit: lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
7446 bl mapFindLockPN ; Find 'n' lock this page's physent
7447 mr. r24,r3 ; Got lock on our physent?
7448 beq-- gthBadPLock ; No, time to bail out
7450 bt++ pf64Bitb,gthPFnd64 ; 64-bit version of physent chain search
7452 lwz r9,ppLink+4(r24) ; Get first mapping on physent
7453 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7454 rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
7455 gthPELoop: mr. r12,r9 ; Got a mapping to look at?
7456 beq- gthPEMiss ; Nope, we've missed hva->phys mapping
7457 lwz r7,mpFlags(r12) ; Get mapping's flags
7458 lhz r4,mpSpace(r12) ; Get mapping's space id number
7459 lwz r26,mpVAddr+4(r12) ; Get mapping's virtual address
7460 lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
7462 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7463 rlwinm r26,r26,0,~mpHWFlags ; Bye-bye unsightly flags
7464 xori r0,r0,mpNormal ; Normal mapping?
7465 xor r4,r4,r6 ; Compare w/ host space id number
7466 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7468 b gthPELoop ; Iterate
7470 gthPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
7471 rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
7472 ld r9,ppLink(r24) ; Get first mapping on physent
7473 lwz r6,pmapSpace(r27) ; Get host pmap's space id number
7474 andc r9,r9,r0 ; Cleanup mapping pointer
7475 gthPELp64: mr. r12,r9 ; Got a mapping to look at?
7476 beq-- gthPEMiss ; Nope, we've missed hva->phys mapping
7477 lwz r7,mpFlags(r12) ; Get mapping's flags
7478 lhz r4,mpSpace(r12) ; Get mapping's space id number
7479 ld r26,mpVAddr(r12) ; Get mapping's virtual address
7480 ld r9,mpAlias(r12) ; Next mapping physent alias chain
7481 rlwinm r0,r7,0,mpType ; Isolate mapping's type
7482 rldicr r26,r26,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
7483 xori r0,r0,mpNormal ; Normal mapping?
7484 xor r4,r4,r6 ; Compare w/ host space id number
7485 or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
7487 b gthPELp64 ; Iterate
7490 gthPEMiss: mr r3,r24 ; Get physent's address
7491 bl mapPhysUnlock ; Unlock physent chain
7493 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7494 bl sxlkUnlock ; Release host pmap search lock
7495 li r3,-1 ; Return 64-bit -1
7497 bt++ pf64Bitb,gthEpi64 ; Take 64-bit exit
7498 b gthEpi32 ; Take 32-bit exit
7501 gthPEHit: mr r3,r24 ; Get physent's address
7502 bl mapPhysUnlock ; Unlock physent chain
7503 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7504 bl sxlkUnlock ; Release host pmap search lock
7506 bt++ pf64Bitb,gthR64 ; Test for 64-bit machine
7508 gthR32: li r3,0 ; High-order 32 bits host virtual address
7509 mr r4,r26 ; Low-order 32 bits host virtual address
7510 gthEpi32: mtmsr r29 ; Restore caller's msr image
7515 gthR64: srdi r3,r26,32 ; High-order 32 bits host virtual address
7516 clrldi r4,r26,32 ; Low-order 32 bits host virtual address
7517 gthEpi64: mtmsrd r29 ; Restore caller's msr image
7519 gthEpilog: lwz r0,(FM_ALIGN(gthStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7520 ; Get caller's return address
7521 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7522 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7523 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7524 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7525 mtlr r0 ; Prepare return address
7526 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7527 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7528 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7529 lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
7530 lwz r1,0(r1) ; Pop stack frame
7531 blr ; Return to caller
7534 lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
7535 ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
7536 li r3,failMapping ; The BOMB, Dmitri.
7537 sc ; The hydrogen bomb.
7541 ; Guest shadow assist -- find a guest mapping
7543 ; Locates the specified guest mapping, and if it exists returns a copy
7547 ; r3 : address of guest pmap, 32-bit kernel virtual address
7548 ; r4 : guest virtual address, high-order 32 bits
7549 ; r5 : guest virtual address, low-order 32 bits
7550 ; r6 : 32 byte copy area, 32-bit kernel virtual address
7552 ; Non-volatile register usage:
7553 ; r25 : VMM extension block's physical address
7554 ; r26 : copy area virtual address
7555 ; r27 : host pmap physical address
7556 ; r28 : guest pmap physical address
7557 ; r29 : caller's msr image from mapSetUp
7558 ; r30 : guest virtual address
7559 ; r31 : gva->phys mapping's physical address
7563 .globl EXT(hw_find_map_gv)
7565 LEXT(hw_find_map_gv)
7567 #define gfmStackSize ((31-25+1)*4)+4
7569 stwu r1,-(FM_ALIGN(gfmStackSize)+FM_SIZE)(r1)
7570 ; Mint a new stack frame
7571 mflr r0 ; Get caller's return address
7572 mfsprg r11,2 ; Get feature flags
7573 mtcrf 0x02,r11 ; Insert feature flags into cr6
7574 stw r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7575 ; Save caller's return address
7576 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7577 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7578 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7579 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7580 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7581 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7582 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7584 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7585 mr r26,r6 ; Copy copy buffer vaddr
7587 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7588 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7590 bt++ pf64Bitb,gfm64Salt ; Test for 64-bit machine
7592 lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
7593 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7594 lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
7595 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7596 srwi r11,r30,12 ; Form shadow hash:
7597 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7598 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7599 ; Form index offset from hash page number
7600 add r31,r31,r10 ; r31 <- hash page index entry
7601 lwz r31,4(r31) ; r31 <- hash page paddr
7602 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7603 ; r31 <- hash group paddr
7604 b gfmStart ; Get to it
7606 gfm64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7607 ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
7608 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7609 ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
7610 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7611 srwi r11,r30,12 ; Form shadow hash:
7612 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7613 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7614 ; Form index offset from hash page number
7615 add r31,r31,r10 ; r31 <- hash page index entry
7616 ld r31,0(r31) ; r31 <- hash page paddr
7617 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7618 ; r31 <- hash group paddr
7620 gfmStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
7621 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7622 mr r29,r11 ; Save caller's msr image
7624 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7625 bl sxlkExclusive ; Get lock exclusive
7627 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7628 mtctr r0 ; in this group
7629 bt++ pf64Bitb,gfm64Search ; Test for 64-bit machine
7631 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7632 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7633 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7634 b gfm32SrchLp ; Let the search begin!
7638 mr r6,r3 ; r6 <- current mapping slot's flags
7639 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7640 mr r7,r4 ; r7 <- current mapping slot's space ID
7641 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7642 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7643 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7644 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7645 xor r7,r7,r9 ; Compare space ID
7646 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7647 xor r8,r8,r30 ; Compare virtual address
7648 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7649 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7651 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7652 bdnz gfm32SrchLp ; Iterate
7654 mr r6,r3 ; r6 <- current mapping slot's flags
7655 clrrwi r5,r5,12 ; Remove flags from virtual address
7656 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7657 xor r4,r4,r9 ; Compare space ID
7658 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7659 xor r5,r5,r30 ; Compare virtual address
7660 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7661 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7662 b gfmSrchMiss ; No joy in our hash group
7665 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7666 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7667 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7668 b gfm64SrchLp ; Let the search begin!
7672 mr r6,r3 ; r6 <- current mapping slot's flags
7673 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7674 mr r7,r4 ; r7 <- current mapping slot's space ID
7675 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7676 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7677 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7678 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7679 xor r7,r7,r9 ; Compare space ID
7680 or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
7681 xor r8,r8,r30 ; Compare virtual address
7682 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7683 beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
7685 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7686 bdnz gfm64SrchLp ; Iterate
7688 mr r6,r3 ; r6 <- current mapping slot's flags
7689 clrrdi r5,r5,12 ; Remove flags from virtual address
7690 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
7691 xor r4,r4,r9 ; Compare space ID
7692 or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
7693 xor r5,r5,r30 ; Compare virtual address
7694 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7695 bne gfmSrchMiss ; No joy in our hash group
7697 gfmSrchHit: lwz r5,0(r31) ; Fetch 32 bytes of mapping from physical
7700 lwz r8,12(r31) ; +12
7701 lwz r9,16(r31) ; +16
7702 lwz r10,20(r31) ; +20
7703 lwz r11,24(r31) ; +24
7704 lwz r12,28(r31) ; +28
7706 li r31,mapRtOK ; Return found mapping
7708 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7709 bl sxlkUnlock ; Release host pmap search lock
7711 bt++ pf64Bitb,gfmEpi64 ; Test for 64-bit machine
7713 gfmEpi32: mtmsr r29 ; Restore caller's msr image
7714 isync ; A small wrench
7715 b gfmEpilog ; and a larger bubble
7718 gfmEpi64: mtmsrd r29 ; Restore caller's msr image
7720 gfmEpilog: mr. r3,r31 ; Copy/test mapping address
7721 beq gfmNotFound ; Skip copy if no mapping found
7723 stw r5,0(r26) ; Store 32 bytes of mapping into virtual
7726 stw r8,12(r26) ; +12
7727 stw r9,16(r26) ; +16
7728 stw r10,20(r26) ; +20
7729 stw r11,24(r26) ; +24
7730 stw r12,28(r26) ; +28
7733 lwz r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7734 ; Get caller's return address
7735 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7736 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7737 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7738 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7739 mtlr r0 ; Prepare return address
7740 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7741 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7742 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7743 lwz r1,0(r1) ; Pop stack frame
7744 blr ; Return to caller
7748 li r31,mapRtNotFnd ; Indicate mapping not found
7749 la r3,pmapSXlk(r27) ; Get host pmap search lock address
7750 bl sxlkUnlock ; Release host pmap search lock
7751 bt++ pf64Bitb,gfmEpi64 ; Take 64-bit exit
7752 b gfmEpi32 ; Take 32-bit exit
7756 ; Guest shadow assist -- change guest page protection
7758 ; Locates the specified dormant mapping, and if it is active, changes its
7762 ; r3 : address of guest pmap, 32-bit kernel virtual address
7763 ; r4 : guest virtual address, high-order 32 bits
7764 ; r5 : guest virtual address, low-order 32 bits
7765 ; r6 : guest mapping protection code
7767 ; Non-volatile register usage:
7768 ; r25 : caller's msr image from mapSetUp
7769 ; r26 : guest mapping protection code
7770 ; r27 : host pmap physical address
7771 ; r28 : guest pmap physical address
7772 ; r29 : VMM extension block's physical address
7773 ; r30 : guest virtual address
7774 ; r31 : gva->phys mapping's physical address
7777 .globl EXT(hw_protect_gv)
7781 #define gcpStackSize ((31-24+1)*4)+4
7783 stwu r1,-(FM_ALIGN(gcpStackSize)+FM_SIZE)(r1)
7784 ; Mint a new stack frame
7785 mflr r0 ; Get caller's return address
7786 mfsprg r11,2 ; Get feature flags
7787 mtcrf 0x02,r11 ; Insert feature flags into cr6
7788 stw r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7789 ; Save caller's return address
7790 stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
7791 stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
7792 stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
7793 stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
7794 stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
7795 stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
7796 stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
7798 rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
7799 mr r26,r6 ; Copy guest mapping protection code
7801 lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
7802 lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
7803 bt++ pf64Bitb,gcp64Salt ; Handle 64-bit machine separately
7804 lwz r29,pmapVmmExtPhys+4(r3) ; r29 <- VMM pmap extension block paddr
7805 lwz r27,vmxHostPmapPhys+4(r11) ; r27 <- host pmap paddr
7806 lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
7807 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7808 srwi r11,r30,12 ; Form shadow hash:
7809 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7810 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7811 ; Form index offset from hash page number
7812 add r31,r31,r10 ; r31 <- hash page index entry
7813 lwz r31,4(r31) ; r31 <- hash page paddr
7814 rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
7815 ; r31 <- hash group paddr
7816 b gcpStart ; Get to it
7818 gcp64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
7819 ld r29,pmapVmmExtPhys(r3) ; r29 <- VMM pmap extension block paddr
7820 ld r27,vmxHostPmapPhys(r11) ; r27 <- host pmap paddr
7821 ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
7822 la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
7823 srwi r11,r30,12 ; Form shadow hash:
7824 xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
7825 rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
7826 ; Form index offset from hash page number
7827 add r31,r31,r10 ; r31 <- hash page index entry
7828 ld r31,0(r31) ; r31 <- hash page paddr
7829 insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
7830 ; r31 <- hash group paddr
7832 gcpStart: xor r28,r4,r28 ; Convert guest pmap_t virt->real
7833 bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
7834 mr r25,r11 ; Save caller's msr image
7836 la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
7837 bl sxlkExclusive ; Get lock exclusive
7839 li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
7840 mtctr r0 ; in this group
7841 bt++ pf64Bitb,gcp64Search ; Test for 64-bit machine
7843 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7844 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7845 lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
7846 b gcp32SrchLp ; Let the search begin!
7850 mr r6,r3 ; r6 <- current mapping slot's flags
7851 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7852 mr r7,r4 ; r7 <- current mapping slot's space ID
7853 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7854 clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7855 lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
7856 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7857 xor r7,r7,r9 ; Compare space ID
7858 or r0,r11,r7 ; r0 <- free || dormant || !space match
7859 xor r8,r8,r30 ; Compare virtual address
7860 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7861 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7863 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7864 bdnz gcp32SrchLp ; Iterate
7866 mr r6,r3 ; r6 <- current mapping slot's flags
7867 clrrwi r5,r5,12 ; Remove flags from virtual address
7868 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7869 xor r4,r4,r9 ; Compare space ID
7870 or r0,r11,r4 ; r0 <- free || dormant || !space match
7871 xor r5,r5,r30 ; Compare virtual address
7872 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7873 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7874 b gcpSrchMiss ; No joy in our hash group
7877 lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
7878 lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
7879 ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
7880 b gcp64SrchLp ; Let the search begin!
7884 mr r6,r3 ; r6 <- current mapping slot's flags
7885 lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
7886 mr r7,r4 ; r7 <- current mapping slot's space ID
7887 lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
7888 clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
7889 ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
7890 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7891 xor r7,r7,r9 ; Compare space ID
7892 or r0,r11,r7 ; r0 <- free || dormant || !space match
7893 xor r8,r8,r30 ; Compare virtual address
7894 or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7895 beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
7897 addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
7898 bdnz gcp64SrchLp ; Iterate
7900 mr r6,r3 ; r6 <- current mapping slot's flags
7901 clrrdi r5,r5,12 ; Remove flags from virtual address
7902 andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
7903 xor r4,r4,r9 ; Compare space ID
7904 or r0,r11,r4 ; r0 <- free || dormant || !space match
7905 xor r5,r5,r30 ; Compare virtual address
7906 or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
7907 bne gcpSrchMiss ; No joy in our hash group
7910 bt++ pf64Bitb,gcpDscon64 ; Handle 64-bit disconnect separately
7911 bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
7912 ; r31 <- mapping's physical address
7913 ; r3 -> PTE slot physical address
7914 ; r4 -> High-order 32 bits of PTE
7915 ; r5 -> Low-order 32 bits of PTE
7917 ; r7 -> PCA physical address
7918 rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
7919 b gcpFreePTE ; Join 64-bit path to release the PTE
7920 gcpDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
7921 rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
7922 gcpFreePTE: mr. r3,r3 ; Was there a valid PTE?
7923 beq- gcpSetKey ; No valid PTE, we're almost done
7924 lis r0,0x8000 ; Prepare free bit for this slot
7925 srw r0,r0,r2 ; Position free bit
7926 or r6,r6,r0 ; Set it in our PCA image
7927 lwz r8,mpPte(r31) ; Get PTE pointer
7928 rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
7929 stw r8,mpPte(r31) ; Save invalidated PTE pointer
7930 eieio ; Synchronize all previous updates (mapInvPtexx didn't)
7931 stw r6,0(r7) ; Update PCA and unlock the PTEG
7933 gcpSetKey: lwz r0,mpVAddr+4(r31) ; Get va word containing protection bits
7934 rlwimi r0,r26,0,mpPP ; Insert new protection bits
7935 stw r0,mpVAddr+4(r31) ; Write 'em back
7936 eieio ; Ensure previous mapping updates are visible
7937 li r31,mapRtOK ; I'm a success
7939 gcpRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
7940 bl sxlkUnlock ; Release host pmap search lock
7942 mr r3,r31 ; r3 <- result code
7943 bt++ pf64Bitb,gcpRtn64 ; Handle 64-bit separately
7944 mtmsr r25 ; Restore 'rupts, translation
7945 isync ; Throw a small wrench into the pipeline
7946 b gcpPopFrame ; Nothing to do now but pop a frame and return
7947 gcpRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
7949 lwz r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
7950 ; Get caller's return address
7951 lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
7952 lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
7953 lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
7954 lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
7955 mtlr r0 ; Prepare return address
7956 lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
7957 lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
7958 lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
7959 lwz r1,0(r1) ; Pop stack frame
7960 blr ; Return to caller
7964 li r31,mapRtNotFnd ; Could not locate requested mapping
7965 b gcpRelPmap ; Exit through host pmap search lock release
7969 ; Find the physent based on a physical page and try to lock it (but not too hard)
7970 ; Note that this table always has an entry that with a 0 table pointer at the end
7972 ; R3 contains ppnum on entry
7973 ; R3 is 0 if no entry was found
7974 ; R3 is physent if found
7975 ; cr0_eq is true if lock was obtained or there was no entry to lock
7976 ; cr0_eq is false of there was an entry and it was locked
7982 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7983 mr r2,r3 ; Save our target
7984 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
7986 mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address
7987 lwz r5,mrStart(r9) ; Get start of table entry
7988 lwz r0,mrEnd(r9) ; Get end of table entry
7989 addi r9,r9,mrSize ; Point to the next slot
7990 cmplwi cr2,r3,0 ; Are we at the end of the table?
7991 cmplw r2,r5 ; See if we are in this table
7992 cmplw cr1,r2,r0 ; Check end also
7993 sub r4,r2,r5 ; Calculate index to physical entry
7994 beq-- cr2,mapFindNo ; Leave if we did not find an entry...
7995 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
7996 slwi r4,r4,3 ; Get offset to physical entry
7998 blt-- mapFindPhz ; Did not find it...
8000 add r3,r3,r4 ; Point right to the slot
8002 mapFindOv: lwz r2,0(r3) ; Get the lock contents right now
8003 rlwinm. r0,r2,0,0,0 ; Is it locked?
8004 bnelr-- ; Yes it is...
8006 lwarx r2,0,r3 ; Get the lock
8007 rlwinm. r0,r2,0,0,0 ; Is it locked?
8008 oris r0,r2,0x8000 ; Set the lock bit
8009 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
8010 stwcx. r0,0,r3 ; Try to stuff it back...
8011 bne-- mapFindOv ; Collision, try again...
8012 isync ; Clear any speculations
8015 mapFindKl: li r2,lgKillResv ; Killing field
8016 stwcx. r2,0,r2 ; Trash reservation...
8017 crclr cr0_eq ; Make sure we do not think we got the lock
8020 mapFindNo: crset cr0_eq ; Make sure that we set this
8021 li r3,0 ; Show that we did not find it
8024 ; pmapCacheLookup - This function will look up an entry in the pmap segment cache.
8026 ; How the pmap cache lookup works:
8028 ; We use a combination of three things: a mask of valid entries, a sub-tag, and the
8029 ; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
8030 ; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits
8031 ; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
8032 ; entry contains the full 36 bit ESID.
8034 ; The purpose of the sub-tag is to limit the number of searches necessary when looking
8035 ; for an existing cache entry. Because there are 16 slots in the cache, we could end up
8036 ; searching all 16 if an match is not found.
8038 ; Essentially, we will search only the slots that have a valid entry and whose sub-tag
8039 ; matches. More than likely, we will eliminate almost all of the searches.
8043 ; R4 = ESID high half
8044 ; R5 = ESID low half
8047 ; R3 = pmap cache slot if found, 0 if not
8048 ; R10 = pmapCCtl address
8049 ; R11 = pmapCCtl image
8050 ; pmapCCtl locked on exit
8056 la r10,pmapCCtl(r3) ; Point to the segment cache control
8059 lwarx r11,0,r10 ; Get the segment cache control value
8060 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8061 ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit
8062 bne-- pmapCacheLookur ; Nope...
8063 stwcx. r0,0,r10 ; Try to take the lock
8064 bne-- pmapCacheLookuq ; Someone else just stuffed it, try again...
8066 isync ; Make sure we get reservation first
8067 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
8068 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
8069 lwz r10,pmapSCSubTag+4(r3) ; And the bottom half
8070 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
8071 lis r8,0x8888 ; Get some eights
8072 rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right
8073 ori r8,r8,0x8888 ; Fill the rest with eights
8075 eqv r10,r10,r5 ; Get 0xF where we hit in bottom half
8076 eqv r9,r9,r5 ; Get 0xF where we hit in top half
8078 rlwinm r2,r10,1,0,30 ; Shift over 1
8079 rlwinm r0,r9,1,0,30 ; Shift over 1
8080 and r2,r2,r10 ; AND the even/odd pair into the even
8081 and r0,r0,r9 ; AND the even/odd pair into the even
8082 rlwinm r10,r2,2,0,28 ; Shift over 2
8083 rlwinm r9,r0,2,0,28 ; Shift over 2
8084 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8085 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
8087 and r10,r10,r8 ; Clear out extras
8088 and r9,r9,r8 ; Clear out extras
8090 rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other
8091 rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other
8092 or r10,r0,r10 ; Merge them
8093 or r9,r2,r9 ; Merge them
8094 rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other
8095 rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other
8096 or r10,r0,r10 ; Merge them
8097 or r9,r2,r9 ; Merge them
8098 rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad
8099 rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad
8100 not r6,r11 ; Turn invalid into valid
8101 rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask
8103 la r10,pmapSegCache(r3) ; Point at the cache slots
8104 and. r6,r9,r6 ; Get mask of valid and hit
8106 li r3,0 ; Assume not found
8107 oris r0,r0,0x8000 ; Start a mask
8108 beqlr++ ; Leave, should usually be no hits...
8110 pclNextEnt: cntlzw r5,r6 ; Find an in use one
8111 cmplwi cr1,r5,pmapSegCacheUse ; Did we find one?
8112 rlwinm r7,r5,4,0,27 ; Index to the cache entry
8113 srw r2,r0,r5 ; Get validity mask bit
8114 add r7,r7,r10 ; Point to the cache slot
8115 andc r6,r6,r2 ; Clear the validity bit we just tried
8116 bgelr-- cr1 ; Leave if there are no more to check...
8118 lwz r5,sgcESID(r7) ; Get the top half
8120 cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half
8122 bne++ pclNextEnt ; Nope, try again...
8124 mr r3,r7 ; Point to the slot
8130 li r11,lgKillResv ; The killing spot
8131 stwcx. r11,0,r11 ; Kill the reservation
8134 lwz r11,pmapCCtl(r3) ; Get the segment cache control
8135 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
8136 beq++ pmapCacheLookup ; Nope...
8137 b pmapCacheLookus ; Yup, keep waiting...
8141 ; mapMergeRC -- Given a physical mapping address in R31, locate its
8142 ; connected PTE (if any) and merge the PTE referenced and changed bits
8143 ; into the mapping and physent.
8149 lwz r0,mpPte(r31) ; Grab the PTE offset
8150 mfsdr1 r7 ; Get the pointer to the hash table
8151 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8152 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8153 andi. r3,r0,mpHValid ; Is there a possible PTE?
8154 srwi r7,r0,4 ; Convert to PCA units
8155 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8156 mflr r2 ; Save the return
8157 subfic r7,r7,-4 ; Convert to -4 based negative index
8158 add r7,r10,r7 ; Point to the PCA directly
8159 beqlr-- ; There was no PTE to start with...
8161 bl mapLockPteg ; Lock the PTEG
8163 lwz r0,mpPte(r31) ; Grab the PTE offset
8164 mtlr r2 ; Restore the LR
8165 andi. r3,r0,mpHValid ; Is there a possible PTE?
8166 beq- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8168 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8169 add r3,r3,r10 ; Point to actual PTE
8170 lwz r5,4(r3) ; Get the real part of the PTE
8171 srwi r10,r5,12 ; Change physical address to a ppnum
8173 mMNmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8174 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8175 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8176 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8177 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8178 add r11,r11,r8 ; Point to the bank table
8179 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8180 lwz r11,mrStart(r11) ; Get the start of bank
8181 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8182 addi r2,r2,4 ; Offset to last half of field
8183 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8184 sub r11,r10,r11 ; Get the index into the table
8185 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8187 mMmrgRC: lwarx r10,r11,r2 ; Get the master RC
8188 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8189 or r0,r0,r10 ; Merge in the new RC
8190 stwcx. r0,r11,r2 ; Try to stick it back
8191 bne-- mMmrgRC ; Try again if we collided...
8192 eieio ; Commit all updates
8195 stw r6,0(r7) ; Unlock PTEG
8199 ; 64-bit version of mapMergeRC
8204 lwz r0,mpPte(r31) ; Grab the PTE offset
8205 ld r5,mpVAddr(r31) ; Grab the virtual address
8206 mfsdr1 r7 ; Get the pointer to the hash table
8207 rldicr r10,r7,0,45 ; Clean up the hash table base
8208 andi. r3,r0,mpHValid ; Is there a possible PTE?
8209 srdi r7,r0,5 ; Convert to PCA units
8210 rldicr r7,r7,0,61 ; Clean up PCA
8211 subfic r7,r7,-4 ; Convert to -4 based negative index
8212 mflr r2 ; Save the return
8213 add r7,r10,r7 ; Point to the PCA directly
8214 beqlr-- ; There was no PTE to start with...
8216 bl mapLockPteg ; Lock the PTEG
8218 lwz r0,mpPte(r31) ; Grab the PTE offset again
8219 mtlr r2 ; Restore the LR
8220 andi. r3,r0,mpHValid ; Is there a possible PTE?
8221 beq-- mMPUnlock ; There is no PTE, someone took it so just unlock and leave...
8223 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8224 add r3,r3,r10 ; Point to the actual PTE
8225 ld r5,8(r3) ; Get the real part
8226 srdi r10,r5,12 ; Change physical address to a ppnum
8227 b mMNmerge ; Join the common 32-64-bit code...
8231 ; This routine, given a mapping, will find and lock the PTEG
8232 ; If mpPte does not point to a PTE (checked before and after lock), it will unlock the
8233 ; PTEG and return. In this case we will have undefined in R4
8234 ; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
8236 ; If the mapping is still valid, we will invalidate the PTE and merge
8237 ; the RC bits into the physent and also save them into the mapping.
8239 ; We then return with R3 pointing to the PTE slot, R4 is the
8240 ; top of the PTE and R5 is the bottom. R6 contains the PCA.
8241 ; R7 points to the PCA entry.
8243 ; Note that we should NEVER be called on a block or special mapping.
8244 ; We could do many bad things.
8250 lwz r0,mpPte(r31) ; Grab the PTE offset
8251 mfsdr1 r7 ; Get the pointer to the hash table
8252 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
8253 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
8254 andi. r3,r0,mpHValid ; Is there a possible PTE?
8255 srwi r7,r0,4 ; Convert to PCA units
8256 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
8257 mflr r2 ; Save the return
8258 subfic r7,r7,-4 ; Convert to -4 based negative index
8259 add r7,r10,r7 ; Point to the PCA directly
8260 beqlr-- ; There was no PTE to start with...
8262 bl mapLockPteg ; Lock the PTEG
8264 lwz r0,mpPte(r31) ; Grab the PTE offset
8265 mtlr r2 ; Restore the LR
8266 andi. r3,r0,mpHValid ; Is there a possible PTE?
8267 beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8269 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8270 add r3,r3,r10 ; Point to actual PTE
8271 lwz r4,0(r3) ; Get the top of the PTE
8273 li r8,tlbieLock ; Get the TLBIE lock
8274 rlwinm r0,r4,0,1,31 ; Clear the valid bit
8275 stw r0,0(r3) ; Invalidate the PTE
8277 sync ; Make sure everyone sees the invalidate
8279 mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock
8280 mfsprg r2,2 ; Get feature flags
8281 mr. r0,r0 ; Is it locked?
8282 li r0,1 ; Get our lock word
8283 bne- mITLBIE32 ; It is locked, go wait...
8285 stwcx. r0,0,r8 ; Try to get it
8286 bne- mITLBIE32 ; We was beat...
8288 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
8289 li r0,0 ; Lock clear value
8291 tlbie r5 ; Invalidate it everywhere
8293 beq- mINoTS32 ; Can not have MP on this machine...
8295 eieio ; Make sure that the tlbie happens first
8296 tlbsync ; Wait for everyone to catch up
8297 sync ; Make sure of it all
8299 mINoTS32: stw r0,tlbieLock(0) ; Clear the tlbie lock
8300 lwz r5,4(r3) ; Get the real part
8301 srwi r10,r5,12 ; Change physical address to a ppnum
8303 mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
8304 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
8305 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
8306 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
8307 rlwinm r11,r11,2,24,29 ; Mask index bits and convert to byte offset
8308 add r11,r11,r8 ; Point to the bank table
8309 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
8310 lwz r11,mrStart(r11) ; Get the start of bank
8311 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
8312 addi r2,r2,4 ; Offset to last half of field
8313 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
8314 sub r11,r10,r11 ; Get the index into the table
8315 rlwinm r11,r11,3,0,28 ; Get offset to the physent
8318 mImrgRC: lwarx r10,r11,r2 ; Get the master RC
8319 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
8320 or r0,r0,r10 ; Merge in the new RC
8321 stwcx. r0,r11,r2 ; Try to stick it back
8322 bne-- mImrgRC ; Try again if we collided...
8324 blr ; Leave with the PCA still locked up...
8326 mIPUnlock: eieio ; Make sure all updates come first
8328 stw r6,0(r7) ; Unlock
8337 lwz r0,mpPte(r31) ; Grab the PTE offset
8338 ld r5,mpVAddr(r31) ; Grab the virtual address
8339 mfsdr1 r7 ; Get the pointer to the hash table
8340 rldicr r10,r7,0,45 ; Clean up the hash table base
8341 andi. r3,r0,mpHValid ; Is there a possible PTE?
8342 srdi r7,r0,5 ; Convert to PCA units
8343 rldicr r7,r7,0,61 ; Clean up PCA
8344 subfic r7,r7,-4 ; Convert to -4 based negative index
8345 mflr r2 ; Save the return
8346 add r7,r10,r7 ; Point to the PCA directly
8347 beqlr-- ; There was no PTE to start with...
8349 bl mapLockPteg ; Lock the PTEG
8351 lwz r0,mpPte(r31) ; Grab the PTE offset again
8352 mtlr r2 ; Restore the LR
8353 andi. r3,r0,mpHValid ; Is there a possible PTE?
8354 beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
8356 rlwinm r3,r0,0,0,30 ; Clear the valid bit
8357 add r3,r3,r10 ; Point to the actual PTE
8358 ld r4,0(r3) ; Get the top of the PTE
8360 li r8,tlbieLock ; Get the TLBIE lock
8361 rldicr r0,r4,0,62 ; Clear the valid bit
8362 std r0,0(r3) ; Invalidate the PTE
8364 rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN
8365 sync ; Make sure everyone sees the invalidate
8366 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
8368 mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock
8369 mr. r0,r0 ; Is it locked?
8370 li r0,1 ; Get our lock word
8371 bne-- mITLBIE64a ; It is locked, toss reservation and wait...
8373 stwcx. r0,0,r8 ; Try to get it
8374 bne-- mITLBIE64 ; We was beat...
8376 rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders
8378 li r0,0 ; Lock clear value
8380 tlbie r2 ; Invalidate it everywhere
8382 eieio ; Make sure that the tlbie happens first
8383 tlbsync ; Wait for everyone to catch up
8384 ptesync ; Wait for quiet again
8386 stw r0,tlbieLock(0) ; Clear the tlbie lock
8388 ld r5,8(r3) ; Get the real part
8389 srdi r10,r5,12 ; Change physical address to a ppnum
8390 b mINmerge ; Join the common 32-64-bit code...
8392 mITLBIE64a: li r5,lgKillResv ; Killing field
8393 stwcx. r5,0,r5 ; Kill reservation
8395 mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock
8396 mr. r0,r0 ; Is it locked?
8397 beq++ mITLBIE64 ; Nope, try again...
8398 b mITLBIE64b ; Yup, wait for it...
8401 ; mapLockPteg - Locks a PTEG
8402 ; R7 points to PCA entry
8403 ; R6 contains PCA on return
8410 lwarx r6,0,r7 ; Pick up the PCA
8411 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8412 ori r0,r6,PCAlock ; Set the lock bit
8413 bne-- mLSkill ; It is locked...
8415 stwcx. r0,0,r7 ; Try to lock the PTEG
8416 bne-- mapLockPteg ; We collided...
8418 isync ; Nostradamus lied
8421 mLSkill: li r6,lgKillResv ; Get killing field
8422 stwcx. r6,0,r6 ; Kill it
8425 lwz r6,0(r7) ; Pick up the PCA
8426 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
8427 beq++ mapLockPteg ; Nope, try again...
8428 b mapLockPteh ; Yes, wait for it...
8432 ; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6
8433 ; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was
8434 ; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
8435 ; R4 returns the slot index.
8437 ; CR7 also indicates that we have a block mapping
8439 ; The PTEG allocation controls are a bit map of the state of the PTEG.
8440 ; PCAfree indicates that the PTE slot is empty.
8441 ; PCAauto means that it comes from an autogen area. These
8442 ; guys do not keep track of reference and change and are actually "wired".
8443 ; They are easy to maintain. PCAsteal
8444 ; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
8445 ; fields fit in a single word and are loaded and stored under control of the
8446 ; PTEG control area lock (PCAlock).
8448 ; Note that PCAauto does not contribute to the steal calculations at all. Originally
8449 ; it did, autogens were second in priority. This can result in a pathalogical
8450 ; case where an instruction can not make forward progress, or one PTE slot
8453 ; Note that the PCA must be locked when we get here.
8455 ; Physically, the fields are arranged:
8462 ; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched)
8467 ; R3 = 1 - steal regular
8468 ; R3 = 2 - steal autogen
8469 ; R4 contains slot number
8470 ; R6 contains updated PCA image
8475 mapSelSlot: lis r10,0 ; Clear autogen mask
8476 li r9,0 ; Start a mask
8477 beq cr7,mSSnotblk ; Skip if this is not a block mapping
8478 ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen)
8480 mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask
8481 oris r9,r9,0x8000 ; Get a mask
8482 cntlzw r4,r6 ; Find a slot or steal one
8483 ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000
8484 rlwinm r4,r4,0,29,31 ; Isolate bit position
8485 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
8486 srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags
8487 srwi r11,r11,1 ; Slide steal mask right
8488 and r8,r6,r2 ; Isolate the old in use and autogen bits
8489 andc r6,r6,r2 ; Allocate the slot and also clear autogen flag
8490 addi r0,r8,0x7F00 ; Push autogen flag to bit 16
8491 and r2,r2,r10 ; Keep the autogen part if autogen
8492 addis r8,r8,0xFF00 ; Push in use to bit 0 and invert
8493 or r6,r6,r2 ; Add in the new autogen bit
8494 rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use)
8495 rlwinm r8,r8,1,31,31 ; Isolate old in use
8496 rlwimi r6,r11,16,8,15 ; Stick the new steal slot in
8498 add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen
8502 ; Shared/Exclusive locks
8504 ; A shared/exclusive lock allows multiple shares of a lock to be taken
8505 ; but only one exclusive. A shared lock can be "promoted" to exclusive
8506 ; when it is the only share. If there are multiple sharers, the lock
8507 ; must be "converted". A promotion drops the share and gains exclusive as
8508 ; an atomic operation. If anyone else has a share, the operation fails.
8509 ; A conversion first drops the share and then takes an exclusive lock.
8511 ; We will want to add a timeout to this eventually.
8513 ; R3 is set to 0 for success, non-zero for failure
8517 ; Convert a share into an exclusive
8524 lis r0,0x8000 ; Get the locked lock image
8526 mflr r0 ; (TEST/DEBUG)
8527 oris r0,r0,0x8000 ; (TEST/DEBUG)
8530 sxlkCTry: lwarx r2,0,r3 ; Get the lock word
8531 cmplwi r2,1 ; Does it just have our share?
8532 subi r2,r2,1 ; Drop our share in case we do not get it
8533 bne-- sxlkCnotfree ; No, we need to unlock...
8534 stwcx. r0,0,r3 ; Try to take it exclusively
8535 bne-- sxlkCTry ; Collision, try again...
8542 stwcx. r2,0,r3 ; Try to drop our share...
8543 bne-- sxlkCTry ; Try again if we collided...
8544 b sxlkExclusive ; Go take it exclusively...
8547 ; Promote shared to exclusive
8553 lis r0,0x8000 ; Get the locked lock image
8555 mflr r0 ; (TEST/DEBUG)
8556 oris r0,r0,0x8000 ; (TEST/DEBUG)
8559 sxlkPTry: lwarx r2,0,r3 ; Get the lock word
8560 cmplwi r2,1 ; Does it just have our share?
8561 bne-- sxlkPkill ; No, just fail (R3 is non-zero)...
8562 stwcx. r0,0,r3 ; Try to take it exclusively
8563 bne-- sxlkPTry ; Collision, try again...
8569 sxlkPkill: li r2,lgKillResv ; Point to killing field
8570 stwcx. r2,0,r2 ; Kill reservation
8576 ; Take lock exclusivily
8582 lis r0,0x8000 ; Get the locked lock image
8584 mflr r0 ; (TEST/DEBUG)
8585 oris r0,r0,0x8000 ; (TEST/DEBUG)
8588 sxlkXTry: lwarx r2,0,r3 ; Get the lock word
8589 mr. r2,r2 ; Is it locked?
8590 bne-- sxlkXWait ; Yes...
8591 stwcx. r0,0,r3 ; Try to take it
8592 bne-- sxlkXTry ; Collision, try again...
8594 isync ; Toss anything younger than us
8600 sxlkXWait: li r2,lgKillResv ; Point to killing field
8601 stwcx. r2,0,r2 ; Kill reservation
8603 sxlkXWaiu: lwz r2,0(r3) ; Get the lock again
8604 mr. r2,r2 ; Is it free yet?
8605 beq++ sxlkXTry ; Yup...
8606 b sxlkXWaiu ; Hang around a bit more...
8609 ; Take a share of the lock
8614 sxlkShared: lwarx r2,0,r3 ; Get the lock word
8615 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8616 addi r2,r2,1 ; Up the share count
8617 bne-- sxlkSWait ; Yes...
8618 stwcx. r2,0,r3 ; Try to take it
8619 bne-- sxlkShared ; Collision, try again...
8621 isync ; Toss anything younger than us
8627 sxlkSWait: li r2,lgKillResv ; Point to killing field
8628 stwcx. r2,0,r2 ; Kill reservation
8630 sxlkSWaiu: lwz r2,0(r3) ; Get the lock again
8631 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
8632 beq++ sxlkShared ; Nope...
8633 b sxlkSWaiu ; Hang around a bit more...
8636 ; Unlock either exclusive or shared.
8641 sxlkUnlock: eieio ; Make sure we order our stores out
8643 sxlkUnTry: lwarx r2,0,r3 ; Get the lock
8644 rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively?
8645 subi r2,r2,1 ; Remove our share if we have one
8646 li r0,0 ; Clear this
8647 bne-- sxlkUExclu ; We hold exclusive...
8649 stwcx. r2,0,r3 ; Try to lose our share
8650 bne-- sxlkUnTry ; Collision...
8653 sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation
8654 beqlr++ ; Leave if ok...
8655 b sxlkUnTry ; Could not store, try over...
8659 .globl EXT(fillPage)
8663 mfsprg r0,2 ; Get feature flags
8664 mtcrf 0x02,r0 ; move pf64Bit to cr
8666 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
8667 lis r2,0x0200 ; Get vec
8669 ori r2,r2,0x2000 ; Get FP
8673 andc r5,r5,r2 ; Clear out permanent turn-offs
8675 ori r2,r2,0x8030 ; Clear IR, DR and EE
8677 andc r0,r5,r2 ; Kill them
8680 bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint)
8682 slwi r3,r3,12 ; Make into a physical address
8683 mtmsr r2 ; Interrupts and translation off
8686 li r2,4096/32 ; Get number of cache lines
8688 fp32again: dcbz 0,r3 ; Clear
8689 addic. r2,r2,-1 ; Count down
8693 stw r8,12(r3) ; Fill
8694 stw r9,16(r3) ; Fill
8695 stw r10,20(r3) ; Fill
8696 stw r11,24(r3) ; Fill
8697 stw r12,28(r3) ; Fill
8698 addi r3,r3,32 ; Point next
8699 bgt+ fp32again ; Keep going
8701 mtmsr r5 ; Restore all
8708 sldi r2,r2,63 ; Get 64-bit bit
8709 or r0,r0,r2 ; Turn on 64-bit
8710 sldi r3,r3,12 ; Make into a physical address
8712 mtmsrd r0 ; Interrupts and translation off
8715 li r2,4096/128 ; Get number of cache lines
8717 fp64again: dcbz128 0,r3 ; Clear
8718 addic. r2,r2,-1 ; Count down
8721 std r7,16(r3) ; Fill
8722 std r8,24(r3) ; Fill
8723 std r9,32(r3) ; Fill
8724 std r10,40(r3) ; Fill
8725 std r11,48(r3) ; Fill
8726 std r12,56(r3) ; Fill
8727 std r4,64+0(r3) ; Fill
8728 std r6,64+8(r3) ; Fill
8729 std r7,64+16(r3) ; Fill
8730 std r8,64+24(r3) ; Fill
8731 std r9,64+32(r3) ; Fill
8732 std r10,64+40(r3) ; Fill
8733 std r11,64+48(r3) ; Fill
8734 std r12,64+56(r3) ; Fill
8735 addi r3,r3,128 ; Point next
8736 bgt+ fp64again ; Keep going
8738 mtmsrd r5 ; Restore all
8748 lis r11,hi16(EXT(mapdebug))
8749 ori r11,r11,lo16(EXT(mapdebug))
8754 mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1
8769 .globl EXT(checkBogus)
8774 blr ; No-op normally