+rbDone: stw r9,0(r3) ; Unlock
+ mtmsr r0 ; Restore xlation and rupts
+ mr r3,r10 ; Pass back the removed block
+ isync
+ blr ; Return...
+
+/*
+ * hw_select_mappings(struct mappingflush *mappingflush)
+ *
+ * Input: PCA addr
+ * Ouput: up to 8 user mappings
+ *
+ * hw_select_mappings() scans every PCA mapping hash lists and select
+ * the last user mapping if it exists.
+ *
+ */
+
+ .align 5
+ .globl EXT(hw_select_mappings)
+
+LEXT(hw_select_mappings)
+ mr r5,r3 ; Get the mapping flush addr
+ mfmsr r12 ; Get the MSR
+ rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
+ rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
+ mfsprg r9,2 ; Get feature flags
+ andi. r0,r12,0x7FCF ; Disable translation and interruptions
+ mtcrf 0x04,r9 ; Set the features
+ bt pfNoMSRirb,hvmNoMSR ; No MSR...
+ mtmsr r0
+ isync
+ b hvmNoMSRx
+hvmNoMSR:
+ mr r3,r0 ; Get the new MSR
+ li r0,loadMSR ; Get the MSR setter SC
+ sc
+hvmNoMSRx:
+ mr r0,r12
+ li r11,1 ; Get the locked value
+
+hvmptegLckx:
+ lwz r3,MFpcaptr(r5) ; Get the PCA pointer
+ lwarx r10,0,r3 ; Get the PTEG lock
+ mr. r10,r10 ; Is it locked?
+ bne- hvmptegLckwx ; Yeah...
+ stwcx. r11,0,r3 ; Take take it
+ bne- hvmptegLckx ; Someone else was trying, try again...
+ b hvmptegSXgx ; All done...
+
+ .align 4
+
+hvmptegLckwx:
+ mr. r10,r10 ; Check if it is already held
+ beq+ hvmptegLckx ; It's clear...
+ lwz r10,0(r3) ; Get lock word again...
+ b hvmptegLckwx ; Wait...
+
+ .align 4
+
+hvmptegSXgx:
+ isync ; Make sure we haven't used anything yet
+
+ li r11,8 ; set count to 8
+
+ lwz r6,PCAhash(r3) ; load the first mapping hash list
+ la r12,PCAhash(r3) ; Point to the mapping hash area
+ la r4,MFmapping(r5) ; Point to the mapping flush mapping area
+ li r7,0 ; Load zero
+ stw r7,MFmappingcnt(r5) ; Set the current count to 0
+hvmnexthash:
+ li r10,0 ; Mapping test
+
+hvmfindmap:
+ mr. r6,r6 ; Test if the hash list current pointer is zero
+ beq hvmfindmapret ; Did we hit the end of the hash list
+ lwz r7,mmPTEv(r6) ; Pick up our virtual ID
+ rlwinm r8,r7,5,0,19 ; Pick VSID 20 lower bits
+ mr. r8,r8
+ beq hvmfindmapnext ; Skip Kernel VSIDs
+ rlwinm r8,r7,1,0,3 ; Extract the Segment index
+ rlwinm r9,r7,22,4,9 ; Extract API 6 upper bits
+ or r8,r8,r9 ; Add to the virtual address
+ rlwinm r9,r7,31,6,25 ; Pick VSID 19 lower bits
+ xor r9,r9,r3 ; Exclusive or with the PCA address
+ rlwinm r9,r9,6,10,19 ; Extract API 10 lower bits
+ or r8,r8,r9 ; Add to the virtual address
+
+ stw r8,4(r4) ; Store the virtual address
+ lwz r8,mmpmap(r6) ; Get the pmap
+ stw r8,0(r4) ; Store the pmap
+ li r10,1 ; Found one
+
+hvmfindmapnext:
+ lwz r6,mmhashnext(r6) ; Pick up next mapping block
+ b hvmfindmap ; Scan the next mapping
+hvmfindmapret:
+ mr. r10,r10 ; Found mapping
+ beq hvmnexthashprep ; If not, do not update the mappingflush array
+ lwz r7,MFmappingcnt(r5) ; Get the current count
+ addi r7,r7,1 ; Increment the current count
+ stw r7,MFmappingcnt(r5) ; Store the current count
+ addi r4,r4,MFmappingSize ; Point to the next mapping flush entry
+hvmnexthashprep:
+ addi r12,r12,4 ; Load the next hash list
+ lwz r6,0(r12) ; Load the next hash list entry
+ subi r11,r11,1 ; Decrement hash list index
+ mr. r11,r11 ; Test for a remaining hash list
+ bne hvmnexthash ; Loop to scan the next hash list
+
+ li r10,0
+ stw r10,0(r3) ; Unlock the hash list
+ mtmsr r0 ; Restore translation and interruptions
+ isync
+ blr