+;
+; Guest shadow assist -- remove all guest mappings
+;
+; Remove all mappings for a guest pmap from the shadow hash table.
+;
+; Parameters:
+; r3 : address of pmap, 32-bit kernel virtual address
+;
+; Non-volatile register usage:
+; r24 : host pmap's physical address
+; r25 : VMM extension block's physical address
+; r26 : physent address
+; r27 : guest pmap's space ID number
+; r28 : current hash table page index
+; r29 : guest pmap's physical address
+; r30 : saved msr image
+; r31 : current mapping
+;
+ .align 5
+ .globl EXT(hw_rem_all_gv)
+
+LEXT(hw_rem_all_gv)
+
+#define graStackSize ((31-24+1)*4)+4
+ stwu r1,-(FM_ALIGN(graStackSize)+FM_SIZE)(r1)
+ ; Mint a new stack frame
+ mflr r0 ; Get caller's return address
+ mfsprg r11,2 ; Get feature flags
+ mtcrf 0x02,r11 ; Insert feature flags into cr6
+ stw r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Save caller's return address
+ stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
+ stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
+ stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
+ stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
+ stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
+ stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
+ stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
+ stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
+
+ lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
+
+ bt++ pf64Bitb,gra64Salt ; Test for 64-bit machine
+ lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
+ lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
+ lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
+ b graStart ; Get to it
+gra64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
+ ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
+ ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
+graStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
+ xor r29,r3,r9 ; Convert pmap_t virt->real
+ mr r30,r11 ; Save caller's msr image
+
+ la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
+ bl sxlkExclusive ; Get lock exclusive
+
+ lwz r3,vxsGra(r25) ; Get remove all count
+ addi r3,r3,1 ; Increment remove all count
+ stw r3,vxsGra(r25) ; Update remove all count
+
+ li r28,0 ; r28 <- first hash page table index to search
+ lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
+graPgLoop:
+ la r31,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
+ rlwinm r11,r28,GV_PGIDX_SZ_LG2,GV_HPAGE_MASK
+ ; Convert page index into page physical index offset
+ add r31,r31,r11 ; Calculate page physical index entry address
+ bt++ pf64Bitb,gra64Page ; Separate handling for 64-bit
+ lwz r31,4(r31) ; r31 <- first slot in hash table page to examine
+ b graLoop ; Examine all slots in this page
+gra64Page: ld r31,0(r31) ; r31 <- first slot in hash table page to examine
+ b graLoop ; Examine all slots in this page
+
+ .align 5
+graLoop: lwz r3,mpFlags(r31) ; Get mapping's flags
+ lhz r4,mpSpace(r31) ; Get mapping's space ID number
+ rlwinm r6,r3,0,mpgFree ; Isolate guest free mapping flag
+ xor r4,r4,r27 ; Compare space ID number
+ or. r0,r6,r4 ; cr0_eq <- !free && space id match
+ bne graMiss ; Not one of ours, skip it
+
+ lwz r11,vxsGraHits(r25) ; Get remove hit count
+ addi r11,r11,1 ; Increment remove hit count
+ stw r11,vxsGraHits(r25) ; Update remove hit count
+
+ rlwinm. r0,r3,0,mpgDormant ; Is this entry dormant?
+ bne graRemPhys ; Yes, nothing to disconnect
+
+ lwz r11,vxsGraActive(r25) ; Get remove active count
+ addi r11,r11,1 ; Increment remove hit count
+ stw r11,vxsGraActive(r25) ; Update remove hit count
+
+ bt++ pf64Bitb,graDscon64 ; Handle 64-bit disconnect separately
+ bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
+ ; r31 <- mapping's physical address
+ ; r3 -> PTE slot physical address
+ ; r4 -> High-order 32 bits of PTE
+ ; r5 -> Low-order 32 bits of PTE
+ ; r6 -> PCA
+ ; r7 -> PCA physical address
+ rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
+ b graFreePTE ; Join 64-bit path to release the PTE
+graDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
+ rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
+graFreePTE: mr. r3,r3 ; Was there a valid PTE?
+ beq- graRemPhys ; No valid PTE, we're almost done
+ lis r0,0x8000 ; Prepare free bit for this slot
+ srw r0,r0,r2 ; Position free bit
+ or r6,r6,r0 ; Set it in our PCA image
+ lwz r8,mpPte(r31) ; Get PTE pointer
+ rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
+ stw r8,mpPte(r31) ; Save invalidated PTE pointer
+ eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
+ stw r6,0(r7) ; Update PCA and unlock the PTEG
+
+graRemPhys:
+ lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
+ bl mapFindLockPN ; Find 'n' lock this page's physent
+ mr. r26,r3 ; Got lock on our physent?
+ beq-- graBadPLock ; No, time to bail out
+
+ crset cr1_eq ; cr1_eq <- previous link is the anchor
+ bt++ pf64Bitb,graRemove64 ; Use 64-bit version on 64-bit machine
+ la r11,ppLink+4(r26) ; Point to chain anchor
+ lwz r9,ppLink+4(r26) ; Get chain anchor
+ rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
+
+graRemLoop: beq- graRemoveMiss ; End of chain, this is not good
+ cmplw r9,r31 ; Is this the mapping to remove?
+ lwz r8,mpAlias+4(r9) ; Get forward chain pointer
+ bne graRemNext ; No, chain onward
+ bt cr1_eq,graRemRetry ; Mapping to remove is chained from anchor
+ stw r8,0(r11) ; Unchain gpv->phys mapping
+ b graRemoved ; Exit loop
+graRemRetry:
+ lwarx r0,0,r11 ; Get previous link
+ rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
+ stwcx. r0,0,r11 ; Update previous link
+ bne- graRemRetry ; Lost reservation, retry
+ b graRemoved ; Good work, let's get outta here
+
+graRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
+ crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
+ mr. r9,r8 ; Does next entry exist?
+ b graRemLoop ; Carry on
+
+graRemove64:
+ li r7,ppLFAmask ; Get mask to clean up mapping pointer
+ rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
+ la r11,ppLink(r26) ; Point to chain anchor
+ ld r9,ppLink(r26) ; Get chain anchor
+ andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
+graRem64Lp: beq-- graRemoveMiss ; End of chain, this is not good
+ cmpld r9,r31 ; Is this the mapping to remove?
+ ld r8,mpAlias(r9) ; Get forward chain pinter
+ bne graRem64Nxt ; Not mapping to remove, chain on, dude
+ bt cr1_eq,graRem64Rt ; Mapping to remove is chained from anchor
+ std r8,0(r11) ; Unchain gpv->phys mapping
+ b graRemoved ; Exit loop
+graRem64Rt: ldarx r0,0,r11 ; Get previous link
+ and r0,r0,r7 ; Get flags
+ or r0,r0,r8 ; Insert new forward pointer
+ stdcx. r0,0,r11 ; Slam it back in
+ bne-- graRem64Rt ; Lost reservation, retry
+ b graRemoved ; Good work, let's go home
+
+graRem64Nxt:
+ la r11,mpAlias(r9) ; Point to (soon to be) previous link
+ crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
+ mr. r9,r8 ; Does next entry exist?
+ b graRem64Lp ; Carry on
+
+graRemoved:
+ mr r3,r26 ; r3 <- physent's address
+ bl mapPhysUnlock ; Unlock the physent (and its chain of mappings)
+
+ lwz r3,mpFlags(r31) ; Get mapping's flags
+ rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
+ ori r3,r3,mpgFree ; Mark mapping free
+ stw r3,mpFlags(r31) ; Update flags
+
+graMiss: addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping
+ rlwinm. r0,r31,0,GV_PAGE_MASK ; End of hash table page?
+ bne graLoop ; No, examine next slot
+ addi r28,r28,1 ; Increment hash table page index
+ cmplwi r28,GV_HPAGES ; End of hash table?
+ bne graPgLoop ; Examine next hash table page
+
+ la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
+ bl sxlkUnlock ; Release host pmap's search lock
+
+ bt++ pf64Bitb,graRtn64 ; Handle 64-bit separately
+ mtmsr r30 ; Restore 'rupts, translation
+ isync ; Throw a small wrench into the pipeline
+ b graPopFrame ; Nothing to do now but pop a frame and return
+graRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
+graPopFrame:
+ lwz r0,(FM_ALIGN(graStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Get caller's return address
+ lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
+ lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
+ lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
+ lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
+ mtlr r0 ; Prepare return address
+ lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
+ lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
+ lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
+ lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
+ lwz r1,0(r1) ; Pop stack frame
+ blr ; Return to caller
+
+graBadPLock:
+graRemoveMiss:
+ lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
+ ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
+ li r3,failMapping ; The BOMB, Dmitri.
+ sc ; The hydrogen bomb.
+
+
+;
+; Guest shadow assist -- remove local guest mappings
+;
+; Remove local mappings for a guest pmap from the shadow hash table.
+;
+; Parameters:
+; r3 : address of guest pmap, 32-bit kernel virtual address
+;
+; Non-volatile register usage:
+; r20 : current active map word's physical address
+; r21 : current hash table page address
+; r22 : updated active map word in process
+; r23 : active map word in process
+; r24 : host pmap's physical address
+; r25 : VMM extension block's physical address
+; r26 : physent address
+; r27 : guest pmap's space ID number
+; r28 : current active map index
+; r29 : guest pmap's physical address
+; r30 : saved msr image
+; r31 : current mapping
+;
+ .align 5
+ .globl EXT(hw_rem_local_gv)
+
+LEXT(hw_rem_local_gv)
+
+#define grlStackSize ((31-20+1)*4)+4
+ stwu r1,-(FM_ALIGN(grlStackSize)+FM_SIZE)(r1)
+ ; Mint a new stack frame
+ mflr r0 ; Get caller's return address
+ mfsprg r11,2 ; Get feature flags
+ mtcrf 0x02,r11 ; Insert feature flags into cr6
+ stw r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Save caller's return address
+ stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
+ stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
+ stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
+ stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
+ stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
+ stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
+ stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
+ stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
+ stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
+ stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
+ stw r21,FM_ARG0+0x28(r1) ; Save non-volatile r21
+ stw r20,FM_ARG0+0x2C(r1) ; Save non-volatile r20
+
+ lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
+
+ bt++ pf64Bitb,grl64Salt ; Test for 64-bit machine
+ lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
+ lwz r9,pmapvr+4(r3) ; Get 32-bit virt<->real conversion salt
+ lwz r24,vmxHostPmapPhys+4(r11) ; r24 <- host pmap's paddr
+ b grlStart ; Get to it
+grl64Salt: ld r25,pmapVmmExtPhys(r3) ; r25 <- VMM pmap extension block paddr
+ ld r9,pmapvr(r3) ; Get 64-bit virt<->real conversion salt
+ ld r24,vmxHostPmapPhys(r11) ; r24 <- host pmap's paddr
+
+grlStart: bl EXT(mapSetUp) ; Disable 'rupts, translation, enter 64-bit mode
+ xor r29,r3,r9 ; Convert pmap_t virt->real
+ mr r30,r11 ; Save caller's msr image
+
+ la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
+ bl sxlkExclusive ; Get lock exclusive
+
+ li r28,0 ; r28 <- index of first active map word to search
+ lwz r27,pmapSpace(r29) ; r27 <- guest pmap's space ID number
+ b grlMap1st ; Examine first map word
+
+ .align 5
+grlNextMap: stw r22,0(r21) ; Save updated map word
+ addi r28,r28,1 ; Increment map word index
+ cmplwi r28,GV_MAP_WORDS ; See if we're done
+ beq grlDone ; Yup, let's get outta here
+
+grlMap1st: la r20,VMX_ACTMAP_OFFSET(r25) ; Get base of active map word array
+ rlwinm r11,r28,GV_MAPWD_SZ_LG2,GV_MAP_MASK
+ ; Convert map index into map index offset
+ add r20,r20,r11 ; Calculate map array element address
+ lwz r22,0(r20) ; Get active map word at index
+ mr. r23,r22 ; Any active mappings indicated?
+ beq grlNextMap ; Nope, check next word
+
+ la r21,VMX_HPIDX_OFFSET(r25) ; Get base of hash page physical index
+ rlwinm r11,r28,GV_MAP_SHIFT,GV_HPAGE_MASK
+ ; Extract page index from map word index and convert
+ ; into page physical index offset
+ add r21,r21,r11 ; Calculate page physical index entry address
+ bt++ pf64Bitb,grl64Page ; Separate handling for 64-bit
+ lwz r21,4(r21) ; Get selected hash table page's address
+ b grlLoop ; Examine all slots in this page
+grl64Page: ld r21,0(r21) ; Get selected hash table page's address
+ b grlLoop ; Examine all slots in this page
+
+ .align 5
+grlLoop: cntlzw r11,r23 ; Get next active bit lit in map word
+ cmplwi r11,32 ; Any active mappings left in this word?
+ lis r12,0x8000 ; Prepare mask to reset bit
+ srw r12,r12,r11 ; Position mask bit
+ andc r23,r23,r12 ; Reset lit bit
+ beq grlNextMap ; No bits lit, examine next map word
+
+ slwi r31,r11,GV_SLOT_SZ_LG2 ; Get slot offset in slot band from lit bit number
+ rlwinm r31,r28,GV_BAND_SHIFT,GV_BAND_MASK
+ ; Extract slot band number from index and insert
+ add r31,r31,r21 ; Add hash page address yielding mapping slot address
+
+ lwz r3,mpFlags(r31) ; Get mapping's flags
+ lhz r4,mpSpace(r31) ; Get mapping's space ID number
+ rlwinm r5,r3,0,mpgGlobal ; Extract global bit
+ xor r4,r4,r27 ; Compare space ID number
+ or. r4,r4,r5 ; (space id miss || global)
+ bne grlLoop ; Not one of ours, skip it
+ andc r22,r22,r12 ; Reset active bit corresponding to this mapping
+ ori r3,r3,mpgDormant ; Mark entry dormant
+ stw r3,mpFlags(r31) ; Update mapping's flags
+
+ bt++ pf64Bitb,grlDscon64 ; Handle 64-bit disconnect separately
+ bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
+ ; r31 <- mapping's physical address
+ ; r3 -> PTE slot physical address
+ ; r4 -> High-order 32 bits of PTE
+ ; r5 -> Low-order 32 bits of PTE
+ ; r6 -> PCA
+ ; r7 -> PCA physical address
+ rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
+ b grlFreePTE ; Join 64-bit path to release the PTE
+grlDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
+ rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
+grlFreePTE: mr. r3,r3 ; Was there a valid PTE?
+ beq- grlLoop ; No valid PTE, we're done with this mapping
+ lis r0,0x8000 ; Prepare free bit for this slot
+ srw r0,r0,r2 ; Position free bit
+ or r6,r6,r0 ; Set it in our PCA image
+ lwz r8,mpPte(r31) ; Get PTE pointer
+ rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
+ stw r8,mpPte(r31) ; Save invalidated PTE pointer
+ eieio ; Synchronize all previous updates (mapInvPtexx doesn't)
+ stw r6,0(r7) ; Update PCA and unlock the PTEG
+ b grlLoop ; On to next active mapping in this map word
+
+grlDone: la r3,pmapSXlk(r24) ; r3 <- host pmap's search lock
+ bl sxlkUnlock ; Release host pmap's search lock
+
+ bt++ pf64Bitb,grlRtn64 ; Handle 64-bit separately
+ mtmsr r30 ; Restore 'rupts, translation
+ isync ; Throw a small wrench into the pipeline
+ b grlPopFrame ; Nothing to do now but pop a frame and return
+grlRtn64: mtmsrd r30 ; Restore 'rupts, translation, 32-bit mode
+grlPopFrame:
+ lwz r0,(FM_ALIGN(grlStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Get caller's return address
+ lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
+ lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
+ lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
+ lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
+ mtlr r0 ; Prepare return address
+ lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
+ lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
+ lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
+ lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
+ lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
+ lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
+ lwz r21,FM_ARG0+0x28(r1) ; Restore non-volatile r21
+ lwz r20,FM_ARG0+0x2C(r1) ; Restore non-volatile r20
+ lwz r1,0(r1) ; Pop stack frame
+ blr ; Return to caller
+
+
+;
+; Guest shadow assist -- resume a guest mapping
+;
+; Locates the specified dormant mapping, and if it exists validates it and makes it
+; active.
+;
+; Parameters:
+; r3 : address of host pmap, 32-bit kernel virtual address
+; r4 : address of guest pmap, 32-bit kernel virtual address
+; r5 : host virtual address, high-order 32 bits
+; r6 : host virtual address, low-order 32 bits
+; r7 : guest virtual address, high-order 32 bits
+; r8 : guest virtual address, low-order 32 bits
+; r9 : guest mapping protection code
+;
+; Non-volatile register usage:
+; r23 : VMM extension block's physical address
+; r24 : physent physical address
+; r25 : caller's msr image from mapSetUp
+; r26 : guest mapping protection code
+; r27 : host pmap physical address
+; r28 : guest pmap physical address
+; r29 : host virtual address
+; r30 : guest virtual address
+; r31 : gva->phys mapping's physical address
+;
+ .align 5
+ .globl EXT(hw_res_map_gv)
+
+LEXT(hw_res_map_gv)
+
+#define grsStackSize ((31-23+1)*4)+4
+
+ stwu r1,-(FM_ALIGN(grsStackSize)+FM_SIZE)(r1)
+ ; Mint a new stack frame
+ mflr r0 ; Get caller's return address
+ mfsprg r11,2 ; Get feature flags
+ mtcrf 0x02,r11 ; Insert feature flags into cr6
+ stw r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Save caller's return address
+ stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
+ stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
+ stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
+ stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
+ stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
+ stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
+ stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
+ stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
+ stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
+
+ rlwinm r29,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of host vaddr
+ rlwinm r30,r8,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
+ mr r26,r9 ; Copy guest mapping protection code
+
+ lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
+ lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
+ bt++ pf64Bitb,grs64Salt ; Handle 64-bit machine separately
+ lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
+ lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
+ lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ lwz r31,4(r31) ; r31 <- hash page paddr
+ rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
+ ; r31 <- hash group paddr
+ b grsStart ; Get to it
+
+grs64Salt: rldimi r29,r5,32,0 ; Insert high-order 32 bits of 64-bit host vaddr
+ rldimi r30,r7,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
+ ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
+ ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
+ ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ ld r31,0(r31) ; r31 <- hash page paddr
+ insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
+ ; r31 <- hash group paddr
+
+grsStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
+ xor r28,r4,r28 ; Convert guest pmap_t virt->real
+ bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
+ mr r25,r11 ; Save caller's msr image
+
+ la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
+ bl sxlkExclusive ; Get lock exclusive
+
+ li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
+ mtctr r0 ; in this group
+ bt++ pf64Bitb,grs64Search ; Test for 64-bit machine
+
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
+ b grs32SrchLp ; Let the search begin!
+
+ .align 5
+grs32SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
+ rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
+ beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz grs32SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrwi r5,r5,12 ; Remove flags from virtual address
+ rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
+ beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
+ b grsSrchMiss ; No joy in our hash group
+
+grs64Search:
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
+ b grs64SrchLp ; Let the search begin!
+
+ .align 5
+grs64SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
+ rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
+ beq grsSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz grs64SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrdi r5,r5,12 ; Remove flags from virtual address
+ rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
+ bne grsSrchMiss ; No joy in our hash group
+
+grsSrchHit:
+ rlwinm. r0,r6,0,mpgDormant ; Is the mapping dormant?
+ bne grsFindHost ; Yes, nothing to disconnect
+
+ bt++ pf64Bitb,grsDscon64 ; Handle 64-bit disconnect separately
+ bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
+ ; r31 <- mapping's physical address
+ ; r3 -> PTE slot physical address
+ ; r4 -> High-order 32 bits of PTE
+ ; r5 -> Low-order 32 bits of PTE
+ ; r6 -> PCA
+ ; r7 -> PCA physical address
+ rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
+ b grsFreePTE ; Join 64-bit path to release the PTE
+grsDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
+ rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
+grsFreePTE: mr. r3,r3 ; Was there a valid PTE?
+ beq- grsFindHost ; No valid PTE, we're almost done
+ lis r0,0x8000 ; Prepare free bit for this slot
+ srw r0,r0,r2 ; Position free bit
+ or r6,r6,r0 ; Set it in our PCA image
+ lwz r8,mpPte(r31) ; Get PTE pointer
+ rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
+ stw r8,mpPte(r31) ; Save invalidated PTE pointer
+ eieio ; Synchronize all previous updates (mapInvPtexx didn't)
+ stw r6,0(r7) ; Update PCA and unlock the PTEG
+
+grsFindHost:
+
+// We now have a dormant guest mapping that matches our space id and virtual address. Our next
+// step is to locate the host mapping that completes the guest mapping's connection to a physical
+// frame. The guest and host mappings must connect to the same physical frame, so they must both
+// be chained on the same physent. We search the physent chain for a host mapping matching our
+// host's space id and the host virtual address. If we succeed, we know that the entire chain
+// of mappings (guest virtual->host virtual->physical) is valid, so the dormant mapping can be
+// resumed. If we fail to find the specified host virtual->physical mapping, it is because the
+// host virtual or physical address has changed since the guest mapping was suspended, so it
+// is no longer valid and cannot be resumed -- we therefore delete the guest mappping and tell
+// our caller that it will have to take its long path, translating the host virtual address
+// through the host's skiplist and installing a new guest mapping.
+
+ lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
+ bl mapFindLockPN ; Find 'n' lock this page's physent
+ mr. r24,r3 ; Got lock on our physent?
+ beq-- grsBadPLock ; No, time to bail out
+
+ bt++ pf64Bitb,grsPFnd64 ; 64-bit version of physent chain search
+
+ lwz r9,ppLink+4(r24) ; Get first mapping on physent
+ lwz r6,pmapSpace(r27) ; Get host pmap's space id number
+ rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
+grsPELoop: mr. r12,r9 ; Got a mapping to look at?
+ beq- grsPEMiss ; Nope, we've missed hva->phys mapping
+ lwz r7,mpFlags(r12) ; Get mapping's flags
+ lhz r4,mpSpace(r12) ; Get mapping's space id number
+ lwz r5,mpVAddr+4(r12) ; Get mapping's virtual address
+ lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
+
+ rlwinm r0,r7,0,mpType ; Isolate mapping's type
+ rlwinm r5,r5,0,~mpHWFlags ; Bye-bye unsightly flags
+ xori r0,r0,mpNormal ; Normal mapping?
+ xor r4,r4,r6 ; Compare w/ host space id number
+ xor r5,r5,r29 ; Compare w/ host virtual address
+ or r0,r0,r4 ; r0 <- (wrong type || !space id)
+ or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
+ beq grsPEHit ; Hit
+ b grsPELoop ; Iterate
+
+grsPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
+ rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
+ ld r9,ppLink(r24) ; Get first mapping on physent
+ lwz r6,pmapSpace(r27) ; Get pmap's space id number
+ andc r9,r9,r0 ; Cleanup mapping pointer
+grsPELp64: mr. r12,r9 ; Got a mapping to look at?
+ beq-- grsPEMiss ; Nope, we've missed hva->phys mapping
+ lwz r7,mpFlags(r12) ; Get mapping's flags
+ lhz r4,mpSpace(r12) ; Get mapping's space id number
+ ld r5,mpVAddr(r12) ; Get mapping's virtual address
+ ld r9,mpAlias(r12) ; Next mapping physent alias chain
+ rlwinm r0,r7,0,mpType ; Isolate mapping's type
+ rldicr r5,r5,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
+ xori r0,r0,mpNormal ; Normal mapping?
+ xor r4,r4,r6 ; Compare w/ host space id number
+ xor r5,r5,r29 ; Compare w/ host virtual address
+ or r0,r0,r4 ; r0 <- (wrong type || !space id)
+ or. r0,r0,r5 ; cr0_eq <- (right type && space id hit && hva hit)
+ beq grsPEHit ; Hit
+ b grsPELp64 ; Iterate
+
+grsPEHit: lwz r0,mpVAddr+4(r31) ; Get va byte containing protection bits
+ rlwimi r0,r26,0,mpPP ; Insert new protection bits
+ stw r0,mpVAddr+4(r31) ; Write 'em back
+
+ eieio ; Ensure previous mapping updates are visible
+ lwz r0,mpFlags(r31) ; Get flags
+ rlwinm r0,r0,0,~mpgDormant ; Turn off dormant flag
+ stw r0,mpFlags(r31) ; Set updated flags, entry is now valid
+
+ li r31,mapRtOK ; Indicate success
+ b grsRelPhy ; Exit through physent lock release
+
+grsPEMiss: crset cr1_eq ; cr1_eq <- previous link is the anchor
+ bt++ pf64Bitb,grsRemove64 ; Use 64-bit version on 64-bit machine
+ la r11,ppLink+4(r24) ; Point to chain anchor
+ lwz r9,ppLink+4(r24) ; Get chain anchor
+ rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
+grsRemLoop: beq- grsPEMissMiss ; End of chain, this is not good
+ cmplw r9,r31 ; Is this the mapping to remove?
+ lwz r8,mpAlias+4(r9) ; Get forward chain pointer
+ bne grsRemNext ; No, chain onward
+ bt cr1_eq,grsRemRetry ; Mapping to remove is chained from anchor
+ stw r8,0(r11) ; Unchain gpv->phys mapping
+ b grsDelete ; Finish deleting mapping
+grsRemRetry:
+ lwarx r0,0,r11 ; Get previous link
+ rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
+ stwcx. r0,0,r11 ; Update previous link
+ bne- grsRemRetry ; Lost reservation, retry
+ b grsDelete ; Finish deleting mapping
+
+ .align 5
+grsRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
+ crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
+ mr. r9,r8 ; Does next entry exist?
+ b grsRemLoop ; Carry on
+
+grsRemove64:
+ li r7,ppLFAmask ; Get mask to clean up mapping pointer
+ rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
+ la r11,ppLink(r24) ; Point to chain anchor
+ ld r9,ppLink(r24) ; Get chain anchor
+ andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
+grsRem64Lp: beq-- grsPEMissMiss ; End of chain, this is not good
+ cmpld r9,r31 ; Is this the mapping to remove?
+ ld r8,mpAlias(r9) ; Get forward chain pinter
+ bne grsRem64Nxt ; Not mapping to remove, chain on, dude
+ bt cr1_eq,grsRem64Rt ; Mapping to remove is chained from anchor
+ std r8,0(r11) ; Unchain gpv->phys mapping
+ b grsDelete ; Finish deleting mapping
+grsRem64Rt: ldarx r0,0,r11 ; Get previous link
+ and r0,r0,r7 ; Get flags
+ or r0,r0,r8 ; Insert new forward pointer
+ stdcx. r0,0,r11 ; Slam it back in
+ bne-- grsRem64Rt ; Lost reservation, retry
+ b grsDelete ; Finish deleting mapping
+
+ .align 5
+grsRem64Nxt:
+ la r11,mpAlias(r9) ; Point to (soon to be) previous link
+ crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
+ mr. r9,r8 ; Does next entry exist?
+ b grsRem64Lp ; Carry on
+
+grsDelete:
+ lwz r3,mpFlags(r31) ; Get mapping's flags
+ rlwinm r3,r3,0,~mpgFlags ; Clear all guest flags
+ ori r3,r3,mpgFree ; Mark mapping free
+ stw r3,mpFlags(r31) ; Update flags
+
+ li r31,mapRtNotFnd ; Didn't succeed
+
+grsRelPhy: mr r3,r24 ; r3 <- physent addr
+ bl mapPhysUnlock ; Unlock physent chain
+
+grsRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
+ bl sxlkUnlock ; Release host pmap search lock
+
+grsRtn: mr r3,r31 ; r3 <- result code
+ bt++ pf64Bitb,grsRtn64 ; Handle 64-bit separately
+ mtmsr r25 ; Restore 'rupts, translation
+ isync ; Throw a small wrench into the pipeline
+ b grsPopFrame ; Nothing to do now but pop a frame and return
+grsRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
+grsPopFrame:
+ lwz r0,(FM_ALIGN(grsStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Get caller's return address
+ lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
+ lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
+ lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
+ lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
+ mtlr r0 ; Prepare return address
+ lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
+ lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
+ lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
+ lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
+ lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
+ lwz r1,0(r1) ; Pop stack frame
+ blr ; Return to caller
+
+ .align 5
+grsSrchMiss:
+ li r31,mapRtNotFnd ; Could not locate requested mapping
+ b grsRelPmap ; Exit through host pmap search lock release
+
+grsBadPLock:
+grsPEMissMiss:
+ lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
+ ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
+ li r3,failMapping ; The BOMB, Dmitri.
+ sc ; The hydrogen bomb.
+
+
+;
+; Guest shadow assist -- add a guest mapping
+;
+; Adds a guest mapping.
+;
+; Parameters:
+; r3 : address of host pmap, 32-bit kernel virtual address
+; r4 : address of guest pmap, 32-bit kernel virtual address
+; r5 : guest virtual address, high-order 32 bits
+; r6 : guest virtual address, low-order 32 bits (with mpHWFlags)
+; r7 : new mapping's flags
+; r8 : physical address, 32-bit page number
+;
+; Non-volatile register usage:
+; r22 : hash group's physical address
+; r23 : VMM extension block's physical address
+; r24 : mapping's flags
+; r25 : caller's msr image from mapSetUp
+; r26 : physent physical address
+; r27 : host pmap physical address
+; r28 : guest pmap physical address
+; r29 : physical address, 32-bit 4k-page number
+; r30 : guest virtual address
+; r31 : gva->phys mapping's physical address
+;
+
+ .align 5
+ .globl EXT(hw_add_map_gv)
+
+
+LEXT(hw_add_map_gv)
+
+#define gadStackSize ((31-22+1)*4)+4
+
+ stwu r1,-(FM_ALIGN(gadStackSize)+FM_SIZE)(r1)
+ ; Mint a new stack frame
+ mflr r0 ; Get caller's return address
+ mfsprg r11,2 ; Get feature flags
+ mtcrf 0x02,r11 ; Insert feature flags into cr6
+ stw r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Save caller's return address
+ stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
+ stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
+ stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
+ stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
+ stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
+ stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
+ stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
+ stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
+ stw r23,FM_ARG0+0x20(r1) ; Save non-volatile r23
+ stw r22,FM_ARG0+0x24(r1) ; Save non-volatile r22
+
+ rlwinm r30,r5,0,1,0 ; Get high-order 32 bits of guest vaddr
+ rlwimi r30,r6,0,0,31 ; Get low-order 32 bits of guest vaddr
+ mr r24,r7 ; Copy guest mapping's flags
+ mr r29,r8 ; Copy target frame's physical address
+
+ lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
+ lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
+ bt++ pf64Bitb,gad64Salt ; Test for 64-bit machine
+ lwz r23,pmapVmmExtPhys+4(r3) ; r23 <- VMM pmap extension block paddr
+ lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
+ lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
+ la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r22,r22,r10 ; r22 <- hash page index entry
+ lwz r22,4(r22) ; r22 <- hash page paddr
+ rlwimi r22,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
+ ; r22 <- hash group paddr
+ b gadStart ; Get to it
+
+gad64Salt: ld r23,pmapVmmExtPhys(r3) ; r23 <- VMM pmap extension block paddr
+ ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
+ ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
+ la r22,VMX_HPIDX_OFFSET(r11) ; r22 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r22,r22,r10 ; r22 <- hash page index entry
+ ld r22,0(r22) ; r22 <- hash page paddr
+ insrdi r22,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
+ ; r22 <- hash group paddr
+
+gadStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
+ xor r28,r4,r28 ; Convert guest pmap_t virt->real
+ bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
+ mr r25,r11 ; Save caller's msr image
+
+ la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
+ bl sxlkExclusive ; Get lock exlusive
+
+ mr r31,r22 ; Prepare to search this group
+ li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
+ mtctr r0 ; in this group
+ bt++ pf64Bitb,gad64Search ; Test for 64-bit machine
+
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
+ clrrwi r12,r30,12 ; r12 <- virtual address we're searching for
+ b gad32SrchLp ; Let the search begin!
+
+ .align 5
+gad32SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
+ rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && space match)
+ xor r8,r8,r12 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
+ beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gad32SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrwi r5,r5,12 ; Remove flags from virtual address
+ rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && && space match)
+ xor r5,r5,r12 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- free && space match && virtual addr match
+ beq gadRelPmap ; Join common path on hit (r31 points to guest mapping)
+ b gadScan ; No joy in our hash group
+
+gad64Search:
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
+ clrrdi r12,r30,12 ; r12 <- virtual address we're searching for
+ b gad64SrchLp ; Let the search begin!
+
+ .align 5
+gad64SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
+ rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && space match)
+ xor r8,r8,r12 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && space match && virtual addr match
+ beq gadRelPmap ; Hit, let upper-level redrive sort it out
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gad64SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrdi r5,r5,12 ; Remove flags from virtual address
+ rlwinm r11,r6,0,mpgFree ; Isolate guest free flag
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && && space match)
+ xor r5,r5,r12 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && space match && virtual addr match
+ bne gadScan ; No joy in our hash group
+ b gadRelPmap ; Hit, let upper-level redrive sort it out
+
+gadScan: lbz r12,mpgCursor(r22) ; Get group's cursor
+ rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
+ ; Prepare to address slot at cursor
+ li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
+ mtctr r0 ; in this group
+ or r2,r22,r12 ; r2 <- 1st mapping to search
+ lwz r3,mpFlags(r2) ; r3 <- 1st mapping slot's flags
+ li r11,0 ; No dormant entries found yet
+ b gadScanLoop ; Let the search begin!
+
+ .align 5
+gadScanLoop:
+ addi r12,r12,GV_SLOT_SZ ; Calculate next slot number to search
+ rlwinm r12,r12,0,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
+ ; Trim off any carry, wrapping into slot number range
+ mr r31,r2 ; r31 <- current mapping's address
+ or r2,r22,r12 ; r2 <- next mapping to search
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags(r2) ; r3 <- next mapping slot's flags
+ rlwinm. r0,r6,0,mpgFree ; Test free flag
+ bne gadFillMap ; Join common path on hit (r31 points to free mapping)
+ rlwinm r0,r6,0,mpgDormant ; Dormant entry?
+ xori r0,r0,mpgDormant ; Invert dormant flag
+ or. r0,r0,r11 ; Skip all but the first dormant entry we see
+ bne gadNotDorm ; Not dormant or we've already seen one
+ mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
+gadNotDorm: bdnz gadScanLoop ; Iterate
+
+ mr r31,r2 ; r31 <- final mapping's address
+ rlwinm. r0,r6,0,mpgFree ; Test free flag in final mapping
+ bne gadFillMap ; Join common path on hit (r31 points to dormant mapping)
+ rlwinm r0,r6,0,mpgDormant ; Dormant entry?
+ xori r0,r0,mpgDormant ; Invert dormant flag
+ or. r0,r0,r11 ; Skip all but the first dormant entry we see
+ bne gadCkDormant ; Not dormant or we've already seen one
+ mr r11,r31 ; We'll use this dormant entry if we don't find a free one first
+
+gadCkDormant:
+ mr. r31,r11 ; Get dormant mapping, if any, and test
+ bne gadUpCursor ; Go update the cursor, we'll take the dormant entry
+
+gadSteal:
+ lbz r12,mpgCursor(r22) ; Get group's cursor
+ rlwinm r12,r12,GV_SLOT_SZ_LG2,(GV_SLOT_MASK << GV_SLOT_SZ_LG2)
+ ; Prepare to address slot at cursor
+ or r31,r22,r12 ; r31 <- address of mapping to steal
+
+ bt++ pf64Bitb,gadDscon64 ; Handle 64-bit disconnect separately
+ bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
+ ; r31 <- mapping's physical address
+ ; r3 -> PTE slot physical address
+ ; r4 -> High-order 32 bits of PTE
+ ; r5 -> Low-order 32 bits of PTE
+ ; r6 -> PCA
+ ; r7 -> PCA physical address
+ rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
+ b gadFreePTE ; Join 64-bit path to release the PTE
+gadDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
+ rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
+gadFreePTE: mr. r3,r3 ; Was there a valid PTE?
+ beq- gadUpCursor ; No valid PTE, we're almost done
+ lis r0,0x8000 ; Prepare free bit for this slot
+ srw r0,r0,r2 ; Position free bit
+ or r6,r6,r0 ; Set it in our PCA image
+ lwz r8,mpPte(r31) ; Get PTE pointer
+ rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
+ stw r8,mpPte(r31) ; Save invalidated PTE pointer
+ eieio ; Synchronize all previous updates (mapInvPtexx didn't)
+ stw r6,0(r7) ; Update PCA and unlock the PTEG
+
+gadUpCursor:
+ rlwinm r12,r31,(32-GV_SLOT_SZ_LG2),GV_SLOT_MASK
+ ; Recover slot number from stolen mapping's address
+ addi r12,r12,1 ; Increment slot number
+ rlwinm r12,r12,0,GV_SLOT_MASK ; Clip to slot number range
+ stb r12,mpgCursor(r22) ; Update group's cursor
+
+ lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
+ bl mapFindLockPN ; Find 'n' lock this page's physent
+ mr. r26,r3 ; Got lock on our physent?
+ beq-- gadBadPLock ; No, time to bail out
+
+ crset cr1_eq ; cr1_eq <- previous link is the anchor
+ bt++ pf64Bitb,gadRemove64 ; Use 64-bit version on 64-bit machine
+ la r11,ppLink+4(r26) ; Point to chain anchor
+ lwz r9,ppLink+4(r26) ; Get chain anchor
+ rlwinm. r9,r9,0,~ppFlags ; Remove flags, yielding 32-bit physical chain pointer
+gadRemLoop: beq- gadPEMissMiss ; End of chain, this is not good
+ cmplw r9,r31 ; Is this the mapping to remove?
+ lwz r8,mpAlias+4(r9) ; Get forward chain pointer
+ bne gadRemNext ; No, chain onward
+ bt cr1_eq,gadRemRetry ; Mapping to remove is chained from anchor
+ stw r8,0(r11) ; Unchain gpv->phys mapping
+ b gadDelDone ; Finish deleting mapping
+gadRemRetry:
+ lwarx r0,0,r11 ; Get previous link
+ rlwimi r0,r8,0,~ppFlags ; Insert new forward pointer whilst preserving flags
+ stwcx. r0,0,r11 ; Update previous link
+ bne- gadRemRetry ; Lost reservation, retry
+ b gadDelDone ; Finish deleting mapping
+
+gadRemNext: la r11,mpAlias+4(r9) ; Point to (soon to be) previous link
+ crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
+ mr. r9,r8 ; Does next entry exist?
+ b gadRemLoop ; Carry on
+
+gadRemove64:
+ li r7,ppLFAmask ; Get mask to clean up mapping pointer
+ rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
+ la r11,ppLink(r26) ; Point to chain anchor
+ ld r9,ppLink(r26) ; Get chain anchor
+ andc. r9,r9,r7 ; Remove flags, yielding 64-bit physical chain pointer
+gadRem64Lp: beq-- gadPEMissMiss ; End of chain, this is not good
+ cmpld r9,r31 ; Is this the mapping to remove?
+ ld r8,mpAlias(r9) ; Get forward chain pinter
+ bne gadRem64Nxt ; Not mapping to remove, chain on, dude
+ bt cr1_eq,gadRem64Rt ; Mapping to remove is chained from anchor
+ std r8,0(r11) ; Unchain gpv->phys mapping
+ b gadDelDone ; Finish deleting mapping
+gadRem64Rt: ldarx r0,0,r11 ; Get previous link
+ and r0,r0,r7 ; Get flags
+ or r0,r0,r8 ; Insert new forward pointer
+ stdcx. r0,0,r11 ; Slam it back in
+ bne-- gadRem64Rt ; Lost reservation, retry
+ b gadDelDone ; Finish deleting mapping
+
+ .align 5
+gadRem64Nxt:
+ la r11,mpAlias(r9) ; Point to (soon to be) previous link
+ crclr cr1_eq ; ~cr1_eq <- Previous link is not the anchor
+ mr. r9,r8 ; Does next entry exist?
+ b gadRem64Lp ; Carry on
+
+gadDelDone:
+ mr r3,r26 ; Get physent address
+ bl mapPhysUnlock ; Unlock physent chain
+
+gadFillMap:
+ lwz r12,pmapSpace(r28) ; Get guest space id number
+ li r2,0 ; Get a zero
+ stw r24,mpFlags(r31) ; Set mapping's flags
+ sth r12,mpSpace(r31) ; Set mapping's space id number
+ stw r2,mpPte(r31) ; Set mapping's pte pointer invalid
+ stw r29,mpPAddr(r31) ; Set mapping's physical address
+ bt++ pf64Bitb,gadVA64 ; Use 64-bit version on 64-bit machine
+ stw r30,mpVAddr+4(r31) ; Set mapping's virtual address (w/flags)
+ b gadChain ; Continue with chaining mapping to physent
+gadVA64: std r30,mpVAddr(r31) ; Set mapping's virtual address (w/flags)
+
+gadChain: mr r3,r29 ; r3 <- physical frame address
+ bl mapFindLockPN ; Find 'n' lock this page's physent
+ mr. r26,r3 ; Got lock on our physent?
+ beq-- gadBadPLock ; No, time to bail out
+
+ bt++ pf64Bitb,gadChain64 ; Use 64-bit version on 64-bit machine
+ lwz r12,ppLink+4(r26) ; Get forward chain
+ rlwinm r11,r12,0,~ppFlags ; Get physent's forward pointer sans flags
+ rlwimi r12,r31,0,~ppFlags ; Insert new mapping, preserve physent flags
+ stw r11,mpAlias+4(r31) ; New mapping will head chain
+ stw r12,ppLink+4(r26) ; Point physent to new mapping
+ b gadFinish ; All over now...
+
+gadChain64: li r7,ppLFAmask ; Get mask to clean up mapping pointer
+ rotrdi r7,r7,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
+ ld r12,ppLink(r26) ; Get forward chain
+ andc r11,r12,r7 ; Get physent's forward chain pointer sans flags
+ and r12,r12,r7 ; Isolate pointer's flags
+ or r12,r12,r31 ; Insert new mapping's address forming pointer
+ std r11,mpAlias(r31) ; New mapping will head chain
+ std r12,ppLink(r26) ; Point physent to new mapping
+
+gadFinish: eieio ; Ensure new mapping is completely visible
+
+gadRelPhy: mr r3,r26 ; r3 <- physent addr
+ bl mapPhysUnlock ; Unlock physent chain
+
+gadRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
+ bl sxlkUnlock ; Release host pmap search lock
+
+ bt++ pf64Bitb,gadRtn64 ; Handle 64-bit separately
+ mtmsr r25 ; Restore 'rupts, translation
+ isync ; Throw a small wrench into the pipeline
+ b gadPopFrame ; Nothing to do now but pop a frame and return
+gadRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
+gadPopFrame:
+ lwz r0,(FM_ALIGN(gadStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Get caller's return address
+ lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
+ lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
+ lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
+ lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
+ mtlr r0 ; Prepare return address
+ lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
+ lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
+ lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
+ lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
+ lwz r23,FM_ARG0+0x20(r1) ; Restore non-volatile r23
+ lwz r22,FM_ARG0+0x24(r1) ; Restore non-volatile r22
+ lwz r1,0(r1) ; Pop stack frame
+ blr ; Return to caller
+
+gadPEMissMiss:
+gadBadPLock:
+ lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
+ ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
+ li r3,failMapping ; The BOMB, Dmitri.
+ sc ; The hydrogen bomb.
+
+
+;
+; Guest shadow assist -- supend a guest mapping
+;
+; Suspends a guest mapping.
+;
+; Parameters:
+; r3 : address of host pmap, 32-bit kernel virtual address
+; r4 : address of guest pmap, 32-bit kernel virtual address
+; r5 : guest virtual address, high-order 32 bits
+; r6 : guest virtual address, low-order 32 bits
+;
+; Non-volatile register usage:
+; r26 : VMM extension block's physical address
+; r27 : host pmap physical address
+; r28 : guest pmap physical address
+; r29 : caller's msr image from mapSetUp
+; r30 : guest virtual address
+; r31 : gva->phys mapping's physical address
+;
+
+ .align 5
+ .globl EXT(hw_susp_map_gv)
+
+LEXT(hw_susp_map_gv)
+
+#define gsuStackSize ((31-26+1)*4)+4
+
+ stwu r1,-(FM_ALIGN(gsuStackSize)+FM_SIZE)(r1)
+ ; Mint a new stack frame
+ mflr r0 ; Get caller's return address
+ mfsprg r11,2 ; Get feature flags
+ mtcrf 0x02,r11 ; Insert feature flags into cr6
+ stw r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Save caller's return address
+ stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
+ stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
+ stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
+ stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
+ stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
+ stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
+
+ rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
+
+ lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
+ lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
+ bt++ pf64Bitb,gsu64Salt ; Test for 64-bit machine
+
+ lwz r26,pmapVmmExtPhys+4(r3) ; r26 <- VMM pmap extension block paddr
+ lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
+ lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ lwz r31,4(r31) ; r31 <- hash page paddr
+ rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
+ ; r31 <- hash group paddr
+ b gsuStart ; Get to it
+gsu64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
+ ld r26,pmapVmmExtPhys(r3) ; r26 <- VMM pmap extension block paddr
+ ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
+ ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ ld r31,0(r31) ; r31 <- hash page paddr
+ insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
+ ; r31 <- hash group paddr
+
+gsuStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
+ xor r28,r4,r28 ; Convert guest pmap_t virt->real
+ bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
+ mr r29,r11 ; Save caller's msr image
+
+ la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
+ bl sxlkExclusive ; Get lock exclusive
+
+ li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
+ mtctr r0 ; in this group
+ bt++ pf64Bitb,gsu64Search ; Test for 64-bit machine
+
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
+ b gsu32SrchLp ; Let the search begin!
+
+ .align 5
+gsu32SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gsu32SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrwi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
+ b gsuSrchMiss ; No joy in our hash group
+
+gsu64Search:
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
+ b gsu64SrchLp ; Let the search begin!
+
+ .align 5
+gsu64SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gsuSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gsu64SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrdi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ bne gsuSrchMiss ; No joy in our hash group
+
+gsuSrchHit:
+ bt++ pf64Bitb,gsuDscon64 ; Handle 64-bit disconnect separately
+ bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
+ ; r31 <- mapping's physical address
+ ; r3 -> PTE slot physical address
+ ; r4 -> High-order 32 bits of PTE
+ ; r5 -> Low-order 32 bits of PTE
+ ; r6 -> PCA
+ ; r7 -> PCA physical address
+ rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
+ b gsuFreePTE ; Join 64-bit path to release the PTE
+gsuDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
+ rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
+gsuFreePTE: mr. r3,r3 ; Was there a valid PTE?
+ beq- gsuNoPTE ; No valid PTE, we're almost done
+ lis r0,0x8000 ; Prepare free bit for this slot
+ srw r0,r0,r2 ; Position free bit
+ or r6,r6,r0 ; Set it in our PCA image
+ lwz r8,mpPte(r31) ; Get PTE pointer
+ rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
+ stw r8,mpPte(r31) ; Save invalidated PTE pointer
+ eieio ; Synchronize all previous updates (mapInvPtexx didn't)
+ stw r6,0(r7) ; Update PCA and unlock the PTEG
+
+gsuNoPTE: lwz r3,mpFlags(r31) ; Get mapping's flags
+ ori r3,r3,mpgDormant ; Mark entry dormant
+ stw r3,mpFlags(r31) ; Save updated flags
+ eieio ; Ensure update is visible when we unlock
+
+gsuSrchMiss:
+ la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
+ bl sxlkUnlock ; Release host pmap search lock
+
+ bt++ pf64Bitb,gsuRtn64 ; Handle 64-bit separately
+ mtmsr r29 ; Restore 'rupts, translation
+ isync ; Throw a small wrench into the pipeline
+ b gsuPopFrame ; Nothing to do now but pop a frame and return
+gsuRtn64: mtmsrd r29 ; Restore 'rupts, translation, 32-bit mode
+gsuPopFrame:
+ lwz r0,(FM_ALIGN(gsuStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Get caller's return address
+ lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
+ lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
+ lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
+ lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
+ mtlr r0 ; Prepare return address
+ lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
+ lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
+ lwz r1,0(r1) ; Pop stack frame
+ blr ; Return to caller
+
+;
+; Guest shadow assist -- test guest mapping reference and change bits
+;
+; Locates the specified guest mapping, and if it exists gathers its reference
+; and change bit, optionallyÊresetting them.
+;
+; Parameters:
+; r3 : address of host pmap, 32-bit kernel virtual address
+; r4 : address of guest pmap, 32-bit kernel virtual address
+; r5 : guest virtual address, high-order 32 bits
+; r6 : guest virtual address, low-order 32 bits
+; r7 : reset boolean
+;
+; Non-volatile register usage:
+; r24 : VMM extension block's physical address
+; r25 : return code (w/reference and change bits)
+; r26 : reset boolean
+; r27 : host pmap physical address
+; r28 : guest pmap physical address
+; r29 : caller's msr image from mapSetUp
+; r30 : guest virtual address
+; r31 : gva->phys mapping's physical address
+;
+
+ .align 5
+ .globl EXT(hw_test_rc_gv)
+
+LEXT(hw_test_rc_gv)
+
+#define gtdStackSize ((31-24+1)*4)+4
+
+ stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
+ ; Mint a new stack frame
+ mflr r0 ; Get caller's return address
+ mfsprg r11,2 ; Get feature flags
+ mtcrf 0x02,r11 ; Insert feature flags into cr6
+ stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Save caller's return address
+ stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
+ stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
+ stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
+ stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
+ stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
+ stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
+ stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
+ stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
+
+ rlwinm r30,r6,0,0xFFFFF000 ; Clean up low-order 20 bits of guest vaddr
+
+ lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
+ lwz r9,pmapSpace(r4) ; r9 <- guest space ID number
+
+ bt++ pf64Bitb,gtd64Salt ; Test for 64-bit machine
+
+ lwz r24,pmapVmmExtPhys+4(r3) ; r24 <- VMM pmap extension block paddr
+ lwz r27,pmapvr+4(r3) ; Get 32-bit virt<->real host pmap conversion salt
+ lwz r28,pmapvr+4(r4) ; Get 32-bit virt<->real guest pmap conversion salt
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ lwz r31,4(r31) ; r31 <- hash page paddr
+ rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
+ ; r31 <- hash group paddr
+ b gtdStart ; Get to it
+
+gtd64Salt: rldimi r30,r5,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
+ ld r24,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
+ ld r27,pmapvr(r3) ; Get 64-bit virt<->real host pmap conversion salt
+ ld r28,pmapvr(r4) ; Get 64-bit virt<->real guest pmap conversion salt
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ ld r31,0(r31) ; r31 <- hash page paddr
+ insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
+ ; r31 <- hash group paddr
+
+gtdStart: xor r27,r3,r27 ; Convert host pmap_t virt->real
+ xor r28,r4,r28 ; Convert guest pmap_t virt->real
+ mr r26,r7 ; Save reset boolean
+ bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
+ mr r29,r11 ; Save caller's msr image
+
+ la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
+ bl sxlkExclusive ; Get lock exclusive
+
+ li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
+ mtctr r0 ; in this group
+ bt++ pf64Bitb,gtd64Search ; Test for 64-bit machine
+
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
+ b gtd32SrchLp ; Let the search begin!
+
+ .align 5
+gtd32SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gtd32SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrwi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
+ b gtdSrchMiss ; No joy in our hash group
+
+gtd64Search:
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
+ b gtd64SrchLp ; Let the search begin!
+
+ .align 5
+gtd64SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gtdSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gtd64SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrdi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ bne gtdSrchMiss ; No joy in our hash group
+
+gtdSrchHit:
+ bt++ pf64Bitb,gtdDo64 ; Split for 64 bit
+
+ bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
+
+ cmplwi cr1,r26,0 ; Do we want to clear RC?
+ lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
+ mr. r3,r3 ; Was there a previously valid PTE?
+ li r0,lo16(mpR|mpC) ; Get bits to clear
+
+ and r25,r5,r0 ; Copy RC bits into result
+ beq++ cr1,gtdNoClr32 ; Nope...
+
+ andc r12,r12,r0 ; Clear mapping copy of RC
+ andc r5,r5,r0 ; Clear PTE copy of RC
+ sth r12,mpVAddr+6(r31) ; Set the new RC in mapping
+
+gtdNoClr32: beq-- gtdNoOld32 ; No previously valid PTE...
+
+ sth r5,6(r3) ; Store updated RC in PTE
+ eieio ; Make sure we do not reorder
+ stw r4,0(r3) ; Revalidate the PTE
+
+ eieio ; Make sure all updates come first
+ stw r6,0(r7) ; Unlock PCA
+
+gtdNoOld32: la r3,pmapSXlk(r27) ; Point to the pmap search lock
+ bl sxlkUnlock ; Unlock the search list
+ b gtdR32 ; Join common...
+
+ .align 5
+
+
+gtdDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
+
+ cmplwi cr1,r26,0 ; Do we want to clear RC?
+ lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
+ mr. r3,r3 ; Was there a previously valid PTE?
+ li r0,lo16(mpR|mpC) ; Get bits to clear
+
+ and r25,r5,r0 ; Copy RC bits into result
+ beq++ cr1,gtdNoClr64 ; Nope...
+
+ andc r12,r12,r0 ; Clear mapping copy of RC
+ andc r5,r5,r0 ; Clear PTE copy of RC
+ sth r12,mpVAddr+6(r31) ; Set the new RC
+
+gtdNoClr64: beq-- gtdNoOld64 ; Nope, no pevious pte...
+
+ sth r5,14(r3) ; Store updated RC
+ eieio ; Make sure we do not reorder
+ std r4,0(r3) ; Revalidate the PTE
+
+ eieio ; Make sure all updates come first
+ stw r6,0(r7) ; Unlock PCA
+
+gtdNoOld64: la r3,pmapSXlk(r27) ; Point to the pmap search lock
+ bl sxlkUnlock ; Unlock the search list
+ b gtdR64 ; Join common...
+
+gtdSrchMiss:
+ la r3,pmapSXlk(r27) ; Point to the pmap search lock
+ bl sxlkUnlock ; Unlock the search list
+ li r25,mapRtNotFnd ; Get ready to return not found
+ bt++ pf64Bitb,gtdR64 ; Test for 64-bit machine
+
+gtdR32: mtmsr r29 ; Restore caller's msr image
+ isync
+ b gtdEpilog
+
+gtdR64: mtmsrd r29 ; Restore caller's msr image
+
+gtdEpilog: lwz r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Get caller's return address
+ mr r3,r25 ; Get return code
+ lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
+ lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
+ lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
+ lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
+ mtlr r0 ; Prepare return address
+ lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
+ lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
+ lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
+ lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
+ lwz r1,0(r1) ; Pop stack frame
+ blr ; Return to caller
+
+;
+; Guest shadow assist -- convert guest to host virtual address
+;
+; Locates the specified guest mapping, and if it exists locates the
+; first mapping belonging to its host on the physical chain and returns
+; its virtual address.
+;
+; Note that if there are multiple mappings belonging to this host
+; chained to the physent to which the guest mapping is chained, then
+; host virtual aliases exist for this physical address. If host aliases
+; exist, then we select the first on the physent chain, making it
+; unpredictable which of the two or more possible host virtual addresses
+; will be returned.
+;
+; Parameters:
+; r3 : address of guest pmap, 32-bit kernel virtual address
+; r4 : guest virtual address, high-order 32 bits
+; r5 : guest virtual address, low-order 32 bits
+;
+; Non-volatile register usage:
+; r24 : physent physical address
+; r25 : VMM extension block's physical address
+; r26 : host virtual address
+; r27 : host pmap physical address
+; r28 : guest pmap physical address
+; r29 : caller's msr image from mapSetUp
+; r30 : guest virtual address
+; r31 : gva->phys mapping's physical address
+;
+
+ .align 5
+ .globl EXT(hw_gva_to_hva)
+
+LEXT(hw_gva_to_hva)
+
+#define gthStackSize ((31-24+1)*4)+4
+
+ stwu r1,-(FM_ALIGN(gtdStackSize)+FM_SIZE)(r1)
+ ; Mint a new stack frame
+ mflr r0 ; Get caller's return address
+ mfsprg r11,2 ; Get feature flags
+ mtcrf 0x02,r11 ; Insert feature flags into cr6
+ stw r0,(FM_ALIGN(gtdStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Save caller's return address
+ stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
+ stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
+ stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
+ stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
+ stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
+ stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
+ stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
+ stw r24,FM_ARG0+0x1C(r1) ; Save non-volatile r24
+
+ rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
+
+ lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
+ lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
+
+ bt++ pf64Bitb,gth64Salt ; Test for 64-bit machine
+
+ lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
+ lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
+ lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ lwz r31,4(r31) ; r31 <- hash page paddr
+ rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
+ ; r31 <- hash group paddr
+ b gthStart ; Get to it
+
+gth64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
+ ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
+ ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
+ ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ ld r31,0(r31) ; r31 <- hash page paddr
+ insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
+ ; r31 <- hash group paddr
+
+gthStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
+ bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
+ mr r29,r11 ; Save caller's msr image
+
+ la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
+ bl sxlkExclusive ; Get lock exclusive
+
+ li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
+ mtctr r0 ; in this group
+ bt++ pf64Bitb,gth64Search ; Test for 64-bit machine
+
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
+ b gth32SrchLp ; Let the search begin!
+
+ .align 5
+gth32SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gth32SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrwi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
+ b gthSrchMiss ; No joy in our hash group
+
+gth64Search:
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
+ b gth64SrchLp ; Let the search begin!
+
+ .align 5
+gth64SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gthSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gth64SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrdi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ bne gthSrchMiss ; No joy in our hash group
+
+gthSrchHit: lwz r3,mpPAddr(r31) ; r3 <- physical 4K-page number
+ bl mapFindLockPN ; Find 'n' lock this page's physent
+ mr. r24,r3 ; Got lock on our physent?
+ beq-- gthBadPLock ; No, time to bail out
+
+ bt++ pf64Bitb,gthPFnd64 ; 64-bit version of physent chain search
+
+ lwz r9,ppLink+4(r24) ; Get first mapping on physent
+ lwz r6,pmapSpace(r27) ; Get host pmap's space id number
+ rlwinm r9,r9,0,~ppFlags ; Be-gone, unsightly flags
+gthPELoop: mr. r12,r9 ; Got a mapping to look at?
+ beq- gthPEMiss ; Nope, we've missed hva->phys mapping
+ lwz r7,mpFlags(r12) ; Get mapping's flags
+ lhz r4,mpSpace(r12) ; Get mapping's space id number
+ lwz r26,mpVAddr+4(r12) ; Get mapping's virtual address
+ lwz r9,mpAlias+4(r12) ; Next mapping in physent alias chain
+
+ rlwinm r0,r7,0,mpType ; Isolate mapping's type
+ rlwinm r26,r26,0,~mpHWFlags ; Bye-bye unsightly flags
+ xori r0,r0,mpNormal ; Normal mapping?
+ xor r4,r4,r6 ; Compare w/ host space id number
+ or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
+ beq gthPEHit ; Hit
+ b gthPELoop ; Iterate
+
+gthPFnd64: li r0,ppLFAmask ; Get mask to clean up mapping pointer
+ rotrdi r0,r0,ppLFArrot ; Rotate clean up mask to get 0xF0000000000000000F
+ ld r9,ppLink(r24) ; Get first mapping on physent
+ lwz r6,pmapSpace(r27) ; Get host pmap's space id number
+ andc r9,r9,r0 ; Cleanup mapping pointer
+gthPELp64: mr. r12,r9 ; Got a mapping to look at?
+ beq-- gthPEMiss ; Nope, we've missed hva->phys mapping
+ lwz r7,mpFlags(r12) ; Get mapping's flags
+ lhz r4,mpSpace(r12) ; Get mapping's space id number
+ ld r26,mpVAddr(r12) ; Get mapping's virtual address
+ ld r9,mpAlias(r12) ; Next mapping physent alias chain
+ rlwinm r0,r7,0,mpType ; Isolate mapping's type
+ rldicr r26,r26,0,mpHWFlagsb-1 ; Bye-bye unsightly flags
+ xori r0,r0,mpNormal ; Normal mapping?
+ xor r4,r4,r6 ; Compare w/ host space id number
+ or. r0,r0,r4 ; cr0_eq <- (normal && space id hit)
+ beq gthPEHit ; Hit
+ b gthPELp64 ; Iterate
+
+ .align 5
+gthPEMiss: mr r3,r24 ; Get physent's address
+ bl mapPhysUnlock ; Unlock physent chain
+gthSrchMiss:
+ la r3,pmapSXlk(r27) ; Get host pmap search lock address
+ bl sxlkUnlock ; Release host pmap search lock
+ li r3,-1 ; Return 64-bit -1
+ li r4,-1
+ bt++ pf64Bitb,gthEpi64 ; Take 64-bit exit
+ b gthEpi32 ; Take 32-bit exit
+
+ .align 5
+gthPEHit: mr r3,r24 ; Get physent's address
+ bl mapPhysUnlock ; Unlock physent chain
+ la r3,pmapSXlk(r27) ; Get host pmap search lock address
+ bl sxlkUnlock ; Release host pmap search lock
+
+ bt++ pf64Bitb,gthR64 ; Test for 64-bit machine
+
+gthR32: li r3,0 ; High-order 32 bits host virtual address
+ mr r4,r26 ; Low-order 32 bits host virtual address
+gthEpi32: mtmsr r29 ; Restore caller's msr image
+ isync
+ b gthEpilog
+
+ .align 5
+gthR64: srdi r3,r26,32 ; High-order 32 bits host virtual address
+ clrldi r4,r26,32 ; Low-order 32 bits host virtual address
+gthEpi64: mtmsrd r29 ; Restore caller's msr image
+
+gthEpilog: lwz r0,(FM_ALIGN(gthStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Get caller's return address
+ lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
+ lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
+ lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
+ lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
+ mtlr r0 ; Prepare return address
+ lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
+ lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
+ lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
+ lwz r24,FM_ARG0+0x1C(r1) ; Restore non-volatile r24
+ lwz r1,0(r1) ; Pop stack frame
+ blr ; Return to caller
+
+gthBadPLock:
+ lis r0,hi16(Choke) ; Dmitri, you know how we've always talked about the
+ ori r0,r0,lo16(Choke) ; possibility of something going wrong with the bomb?
+ li r3,failMapping ; The BOMB, Dmitri.
+ sc ; The hydrogen bomb.
+
+
+;
+; Guest shadow assist -- find a guest mapping
+;
+; Locates the specified guest mapping, and if it exists returns a copy
+; of it.
+;
+; Parameters:
+; r3 : address of guest pmap, 32-bit kernel virtual address
+; r4 : guest virtual address, high-order 32 bits
+; r5 : guest virtual address, low-order 32 bits
+; r6 : 32 byte copy area, 32-bit kernel virtual address
+;
+; Non-volatile register usage:
+; r25 : VMM extension block's physical address
+; r26 : copy area virtual address
+; r27 : host pmap physical address
+; r28 : guest pmap physical address
+; r29 : caller's msr image from mapSetUp
+; r30 : guest virtual address
+; r31 : gva->phys mapping's physical address
+;
+
+ .align 5
+ .globl EXT(hw_find_map_gv)
+
+LEXT(hw_find_map_gv)
+
+#define gfmStackSize ((31-25+1)*4)+4
+
+ stwu r1,-(FM_ALIGN(gfmStackSize)+FM_SIZE)(r1)
+ ; Mint a new stack frame
+ mflr r0 ; Get caller's return address
+ mfsprg r11,2 ; Get feature flags
+ mtcrf 0x02,r11 ; Insert feature flags into cr6
+ stw r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Save caller's return address
+ stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
+ stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
+ stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
+ stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
+ stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
+ stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
+ stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
+
+ rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
+ mr r26,r6 ; Copy copy buffer vaddr
+
+ lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
+ lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
+
+ bt++ pf64Bitb,gfm64Salt ; Test for 64-bit machine
+
+ lwz r25,pmapVmmExtPhys+4(r3) ; r25 <- VMM pmap extension block paddr
+ lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
+ lwz r27,vmxHostPmapPhys+4(r11) ; Get host pmap physical address
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ lwz r31,4(r31) ; r31 <- hash page paddr
+ rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
+ ; r31 <- hash group paddr
+ b gfmStart ; Get to it
+
+gfm64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
+ ld r25,pmapVmmExtPhys(r3) ; r24 <- VMM pmap extension block paddr
+ ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
+ ld r27,vmxHostPmapPhys(r11) ; Get host pmap physical address
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ ld r31,0(r31) ; r31 <- hash page paddr
+ insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
+ ; r31 <- hash group paddr
+
+gfmStart: xor r28,r3,r28 ; Convert guest pmap_t virt->real
+ bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
+ mr r29,r11 ; Save caller's msr image
+
+ la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
+ bl sxlkExclusive ; Get lock exclusive
+
+ li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
+ mtctr r0 ; in this group
+ bt++ pf64Bitb,gfm64Search ; Test for 64-bit machine
+
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
+ b gfm32SrchLp ; Let the search begin!
+
+ .align 5
+gfm32SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gfm32SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrwi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
+ b gfmSrchMiss ; No joy in our hash group
+
+gfm64Search:
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
+ b gfm64SrchLp ; Let the search begin!
+
+ .align 5
+gfm64SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- !(!free && !dormant && space match)
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gfmSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gfm64SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrdi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free and dormant flags
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- !(!free && !dormant && space match)
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ bne gfmSrchMiss ; No joy in our hash group
+
+gfmSrchHit: lwz r5,0(r31) ; Fetch 32 bytes of mapping from physical
+ lwz r6,4(r31) ; +4
+ lwz r7,8(r31) ; +8
+ lwz r8,12(r31) ; +12
+ lwz r9,16(r31) ; +16
+ lwz r10,20(r31) ; +20
+ lwz r11,24(r31) ; +24
+ lwz r12,28(r31) ; +28
+
+ li r31,mapRtOK ; Return found mapping
+
+ la r3,pmapSXlk(r27) ; Get host pmap search lock address
+ bl sxlkUnlock ; Release host pmap search lock
+
+ bt++ pf64Bitb,gfmEpi64 ; Test for 64-bit machine
+
+gfmEpi32: mtmsr r29 ; Restore caller's msr image
+ isync ; A small wrench
+ b gfmEpilog ; and a larger bubble
+
+ .align 5
+gfmEpi64: mtmsrd r29 ; Restore caller's msr image
+
+gfmEpilog: mr. r3,r31 ; Copy/test mapping address
+ beq gfmNotFound ; Skip copy if no mapping found
+
+ stw r5,0(r26) ; Store 32 bytes of mapping into virtual
+ stw r6,4(r26) ; +4
+ stw r7,8(r26) ; +8
+ stw r8,12(r26) ; +12
+ stw r9,16(r26) ; +16
+ stw r10,20(r26) ; +20
+ stw r11,24(r26) ; +24
+ stw r12,28(r26) ; +28
+
+gfmNotFound:
+ lwz r0,(FM_ALIGN(gfmStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Get caller's return address
+ lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
+ lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
+ lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
+ lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
+ mtlr r0 ; Prepare return address
+ lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
+ lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
+ lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
+ lwz r1,0(r1) ; Pop stack frame
+ blr ; Return to caller
+
+ .align 5
+gfmSrchMiss:
+ li r31,mapRtNotFnd ; Indicate mapping not found
+ la r3,pmapSXlk(r27) ; Get host pmap search lock address
+ bl sxlkUnlock ; Release host pmap search lock
+ bt++ pf64Bitb,gfmEpi64 ; Take 64-bit exit
+ b gfmEpi32 ; Take 32-bit exit
+
+
+;
+; Guest shadow assist -- change guest page protection
+;
+; Locates the specified dormant mapping, and if it is active, changes its
+; protection.
+;
+; Parameters:
+; r3 : address of guest pmap, 32-bit kernel virtual address
+; r4 : guest virtual address, high-order 32 bits
+; r5 : guest virtual address, low-order 32 bits
+; r6 : guest mapping protection code
+;
+; Non-volatile register usage:
+; r25 : caller's msr image from mapSetUp
+; r26 : guest mapping protection code
+; r27 : host pmap physical address
+; r28 : guest pmap physical address
+; r29 : VMM extension block's physical address
+; r30 : guest virtual address
+; r31 : gva->phys mapping's physical address
+;
+ .align 5
+ .globl EXT(hw_protect_gv)
+
+LEXT(hw_protect_gv)
+
+#define gcpStackSize ((31-24+1)*4)+4
+
+ stwu r1,-(FM_ALIGN(gcpStackSize)+FM_SIZE)(r1)
+ ; Mint a new stack frame
+ mflr r0 ; Get caller's return address
+ mfsprg r11,2 ; Get feature flags
+ mtcrf 0x02,r11 ; Insert feature flags into cr6
+ stw r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Save caller's return address
+ stw r31,FM_ARG0+0x00(r1) ; Save non-volatile r31
+ stw r30,FM_ARG0+0x04(r1) ; Save non-volatile r30
+ stw r29,FM_ARG0+0x08(r1) ; Save non-volatile r29
+ stw r28,FM_ARG0+0x0C(r1) ; Save non-volatile r28
+ stw r27,FM_ARG0+0x10(r1) ; Save non-volatile r27
+ stw r26,FM_ARG0+0x14(r1) ; Save non-volatile r26
+ stw r25,FM_ARG0+0x18(r1) ; Save non-volatile r25
+
+ rlwinm r30,r5,0,0xFFFFF000 ; Clean up low-order 32 bits of guest vaddr
+ mr r26,r6 ; Copy guest mapping protection code
+
+ lwz r11,pmapVmmExt(r3) ; r11 <- VMM pmap extension block vaddr
+ lwz r9,pmapSpace(r3) ; r9 <- guest space ID number
+ bt++ pf64Bitb,gcp64Salt ; Handle 64-bit machine separately
+ lwz r29,pmapVmmExtPhys+4(r3) ; r29 <- VMM pmap extension block paddr
+ lwz r27,vmxHostPmapPhys+4(r11) ; r27 <- host pmap paddr
+ lwz r28,pmapvr+4(r3) ; Get 32-bit virt<->real guest pmap conversion salt
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ lwz r31,4(r31) ; r31 <- hash page paddr
+ rlwimi r31,r11,GV_HGRP_SHIFT,GV_HGRP_MASK
+ ; r31 <- hash group paddr
+ b gcpStart ; Get to it
+
+gcp64Salt: rldimi r30,r4,32,0 ; Insert high-order 32 bits of 64-bit guest vaddr
+ ld r29,pmapVmmExtPhys(r3) ; r29 <- VMM pmap extension block paddr
+ ld r27,vmxHostPmapPhys(r11) ; r27 <- host pmap paddr
+ ld r28,pmapvr(r3) ; Get 64-bit virt<->real guest pmap conversion salt
+ la r31,VMX_HPIDX_OFFSET(r11) ; r31 <- base of hash page physical index
+ srwi r11,r30,12 ; Form shadow hash:
+ xor r11,r11,r9 ; spaceID ^ (vaddr >> 12)
+ rlwinm r10,r11,GV_HPAGE_SHIFT,GV_HPAGE_MASK
+ ; Form index offset from hash page number
+ add r31,r31,r10 ; r31 <- hash page index entry
+ ld r31,0(r31) ; r31 <- hash page paddr
+ insrdi r31,r11,GV_GRPS_PPG_LG2,64-(GV_HGRP_SHIFT+GV_GRPS_PPG_LG2)
+ ; r31 <- hash group paddr
+
+gcpStart: xor r28,r4,r28 ; Convert guest pmap_t virt->real
+ bl EXT(mapSetUp) ; Disable 'rupts, translation, maybe enter 64-bit mode
+ mr r25,r11 ; Save caller's msr image
+
+ la r3,pmapSXlk(r27) ; r3 <- host pmap's search lock address
+ bl sxlkExclusive ; Get lock exclusive
+
+ li r0,(GV_SLOTS - 1) ; Prepare to iterate over mapping slots
+ mtctr r0 ; in this group
+ bt++ pf64Bitb,gcp64Search ; Test for 64-bit machine
+
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ lwz r5,mpVAddr+4(r31) ; r5 <- 1st mapping slot's virtual address
+ b gcp32SrchLp ; Let the search begin!
+
+ .align 5
+gcp32SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrwi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ lwz r5,mpVAddr+4+GV_SLOT_SZ(r31); r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- free || dormant || !space match
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gcp32SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrwi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- free || dormant || !space match
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
+ b gcpSrchMiss ; No joy in our hash group
+
+gcp64Search:
+ lwz r3,mpFlags(r31) ; r3 <- 1st mapping slot's flags
+ lhz r4,mpSpace(r31) ; r4 <- 1st mapping slot's space ID
+ ld r5,mpVAddr(r31) ; r5 <- 1st mapping slot's virtual address
+ b gcp64SrchLp ; Let the search begin!
+
+ .align 5
+gcp64SrchLp:
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ lwz r3,mpFlags+GV_SLOT_SZ(r31) ; r3 <- next mapping slot's flags
+ mr r7,r4 ; r7 <- current mapping slot's space ID
+ lhz r4,mpSpace+GV_SLOT_SZ(r31) ; r4 <- next mapping slot's space ID
+ clrrdi r8,r5,12 ; r8 <- current mapping slot's virtual addr w/o flags
+ ld r5,mpVAddr+GV_SLOT_SZ(r31) ; r5 <- next mapping slot's virtual addr
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
+ xor r7,r7,r9 ; Compare space ID
+ or r0,r11,r7 ; r0 <- free || dormant || !space match
+ xor r8,r8,r30 ; Compare virtual address
+ or. r0,r0,r8 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ beq gcpSrchHit ; Join common path on hit (r31 points to guest mapping)
+
+ addi r31,r31,GV_SLOT_SZ ; r31 <- next mapping slot
+ bdnz gcp64SrchLp ; Iterate
+
+ mr r6,r3 ; r6 <- current mapping slot's flags
+ clrrdi r5,r5,12 ; Remove flags from virtual address
+ andi. r11,r6,mpgFree+mpgDormant ; Isolate guest free flag
+ xor r4,r4,r9 ; Compare space ID
+ or r0,r11,r4 ; r0 <- free || dormant || !space match
+ xor r5,r5,r30 ; Compare virtual address
+ or. r0,r0,r5 ; cr0_eq <- !free && !dormant && space match && virtual addr match
+ bne gcpSrchMiss ; No joy in our hash group
+
+gcpSrchHit:
+ bt++ pf64Bitb,gcpDscon64 ; Handle 64-bit disconnect separately
+ bl mapInvPte32 ; Disconnect PTE, invalidate, gather ref and change
+ ; r31 <- mapping's physical address
+ ; r3 -> PTE slot physical address
+ ; r4 -> High-order 32 bits of PTE
+ ; r5 -> Low-order 32 bits of PTE
+ ; r6 -> PCA
+ ; r7 -> PCA physical address
+ rlwinm r2,r3,29,29,31 ; Get PTE's slot number in the PTEG (8-byte PTEs)
+ b gcpFreePTE ; Join 64-bit path to release the PTE
+gcpDscon64: bl mapInvPte64 ; Disconnect PTE, invalidate, gather ref and change
+ rlwinm r2,r3,28,29,31 ; Get PTE's slot number in the PTEG (16-byte PTEs)
+gcpFreePTE: mr. r3,r3 ; Was there a valid PTE?
+ beq- gcpSetKey ; No valid PTE, we're almost done
+ lis r0,0x8000 ; Prepare free bit for this slot
+ srw r0,r0,r2 ; Position free bit
+ or r6,r6,r0 ; Set it in our PCA image
+ lwz r8,mpPte(r31) ; Get PTE pointer
+ rlwinm r8,r8,0,~mpHValid ; Make the pointer invalid
+ stw r8,mpPte(r31) ; Save invalidated PTE pointer
+ eieio ; Synchronize all previous updates (mapInvPtexx didn't)
+ stw r6,0(r7) ; Update PCA and unlock the PTEG
+
+gcpSetKey: lwz r0,mpVAddr+4(r31) ; Get va word containing protection bits
+ rlwimi r0,r26,0,mpPP ; Insert new protection bits
+ stw r0,mpVAddr+4(r31) ; Write 'em back
+ eieio ; Ensure previous mapping updates are visible
+ li r31,mapRtOK ; I'm a success
+
+gcpRelPmap: la r3,pmapSXlk(r27) ; r3 <- host pmap search lock phys addr
+ bl sxlkUnlock ; Release host pmap search lock
+
+ mr r3,r31 ; r3 <- result code
+ bt++ pf64Bitb,gcpRtn64 ; Handle 64-bit separately
+ mtmsr r25 ; Restore 'rupts, translation
+ isync ; Throw a small wrench into the pipeline
+ b gcpPopFrame ; Nothing to do now but pop a frame and return
+gcpRtn64: mtmsrd r25 ; Restore 'rupts, translation, 32-bit mode
+gcpPopFrame:
+ lwz r0,(FM_ALIGN(gcpStackSize)+FM_SIZE+FM_LR_SAVE)(r1)
+ ; Get caller's return address
+ lwz r31,FM_ARG0+0x00(r1) ; Restore non-volatile r31
+ lwz r30,FM_ARG0+0x04(r1) ; Restore non-volatile r30
+ lwz r29,FM_ARG0+0x08(r1) ; Restore non-volatile r29
+ lwz r28,FM_ARG0+0x0C(r1) ; Restore non-volatile r28
+ mtlr r0 ; Prepare return address
+ lwz r27,FM_ARG0+0x10(r1) ; Restore non-volatile r27
+ lwz r26,FM_ARG0+0x14(r1) ; Restore non-volatile r26
+ lwz r25,FM_ARG0+0x18(r1) ; Restore non-volatile r25
+ lwz r1,0(r1) ; Pop stack frame
+ blr ; Return to caller
+
+ .align 5
+gcpSrchMiss:
+ li r31,mapRtNotFnd ; Could not locate requested mapping
+ b gcpRelPmap ; Exit through host pmap search lock release
+
+