- lwz r7,SAVflags(r3) /* Get the flags */
- rlwinm r6,r3,0,0,19 /* Round back down to the savearea page block */
- andis. r7,r7,HIGH_ADDR(SAVinuse) /* Still in use? */
- mfmsr r12 /* Get the MSR */
- bnelr- /* Still in use, just leave... */
- lwz r5,SACvrswap(r6) /* Get the conversion to real */
- lis r10,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */
- andi. r11,r12,0x7FCF /* Turn off all translation and 'rupts */
- ori r10,r10,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */
- mtmsr r11 /* Make the MSR current */
- isync /* Make sure translation is off */
-
- mfsprg r11,1 /* Get the active save area */
- xor r3,r3,r5 /* Get the real address of the savearea */
- cmplw r11,r3 /* Are we trying to toss the active one? */
- xor r6,r6,r5 /* Make the savearea block real also */
- beq- srbigtimepanic /* This is a no-no... */
-
- rlwinm r7,r3,21,31,31 /* Get position of savearea in block */
- lis r8,0x8000 /* Build a bit mask and assume first savearea */
- srw r8,r8,r7 /* Get bit position of do deallocate */
-
- lwarx r11,0,r10 ; ?
-
-srlck: lwarx r11,0,r10 /* Grab the lock value */
- li r7,1 /* Use part of the delay time */
- mr. r11,r11 /* Is it locked? */
- bne- srlcks /* Yeah, wait for it to clear... */
- stwcx. r7,0,r10 /* Try to seize that there durn lock */
- beq+ srlckd /* Got it... */
- b srlck /* Collision, try again... */
-
-srlcks: lwz r11,SVlock(r10) /* Get that lock in here */
- mr. r11,r11 /* Is it free yet? */
- beq+ srlck /* Yeah, try for it again... */
- b srlcks /* Sniff away... */
-
-srlckd: isync /* Toss preexecutions */
- lwz r11,SACalloc(r6) /* Get the allocation for this block */
- lwz r7,SVinuse(r10) /* Get the in use count */
- or r11,r11,r8 /* Turn on our bit */
- subi r7,r7,1 /* We released one, adjust count */
- cmplw r11,r8 /* Is our's the only one free? */
- stw r7,SVinuse(r10) /* Save out count */
- stw r11,SACalloc(r6) /* Save it out */
- bne+ srtrest /* Nope, then the block is already on the free list */
-
- lwz r11,SVfree(r10) /* Get the old head of the free list */
- stw r6,SVfree(r10) /* Point the head at us now */
- stw r11,SACnext(r6) /* Point us at the old last */
-
-srtrest: li r8,0 /* Get set to clear the savearea lock */
- sync /* Make sure it's all out there */
- stw r8,SVlock(r10) /* Unlock it */
- mtmsr r12 /* Restore interruptions and translation */
- isync
-
-#if 0
- lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
- li r2,0x2204 ; (TEST/DEBUG)
- oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
- sc /* (TEST/DEBUG) */
+ ; Loop over each savearea we are trimming.
+ ; r6 = next savearea to trim
+ ; r7 = last savearea to trim
+ ; r8 = #pages to trim (>0)
+ ; r9 = return address
+ ; r10 = per-proc ptr
+ ; r11 = MSR at entry
+ ; r30 = what SACalloc looks like when all saveareas are free
+ ; r31 = free pool block list
+ ; cr1 = beq set if we just trimmed the last, ie if we are done
+
+sttoss: beq+ cr1,stdone ; All done now...
+
+ cmplw cr1,r6,r7 ; Have we finished the loop?
+
+ lis r0,0x0044 ; Get top of table
+ rlwinm r2,r6,0,0,19 ; Back down to the savearea control stuff
+ ori r0,r0,0x2200 ; Finish shift table
+ rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
+ lwz r5,SACalloc(r2) ; Get the allocation bits
+ addi r4,r4,1 ; Shift 1 extra
+ rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
+ rlwnm r0,r0,r4,29,31 ; Get partial index
+ lis r4,lo16(0x8000) ; Get the bit mask
+ add r0,r0,r3 ; Make the real index
+ srw r4,r4,r0 ; Get the allocation mask
+ or r5,r5,r4 ; Free this entry
+ cmplw r5,r4 ; Is this the only free entry?
+ lwz r6,SAVprev+4(r6) ; Chain to the next trimmed savearea
+ cmplw cr7,r30,r5 ; Does this look empty?
+ stw r5,SACalloc(r2) ; Save back the allocation bits
+ beq- stputpool ; First free entry, go put it into the pool...
+ bne+ cr7,sttoss ; Not an empty block
+
+;
+; We have an empty block. Remove it from the pool list.
+;
+
+ lwz r29,SACflags(r2) ; Get the flags
+ cmplwi cr5,r31,0 ; Is this guy on the release list?
+ lwz r28,SACnext+4(r2) ; Get the forward chain
+
+ rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
+ bne- sttoss ; This is permanent entry, do not try to release...
+
+ lwz r29,SACprev+4(r2) ; and the previous
+ beq- cr5,stnot1st ; Not first
+ lwz r0,SACvrswap+4(r31) ; Load the previous pool page vr conversion
+
+stnot1st: stw r28,SACnext+4(r29) ; Previous guy points to my next
+ xor r0,r0,r31 ; Make the last guy virtual
+ stw r29,SACprev+4(r28) ; Next guy points back to my previous
+ stw r0,SAVprev+4(r2) ; Store the old top virtual as my back chain
+ mr r31,r2 ; My physical is now the head of the chain
+ b sttoss ; Get the next one...
+
+;
+; A pool block that had no free entries now has one. Stick it on the pool list.
+;
+
+stputpool: lwz r28,SVpoolfwd+4(0) ; Get the first guy on the list
+ li r0,saveanchor ; Point to the saveanchor
+ stw r2,SVpoolfwd+4(0) ; Put us on the top of the list
+ stw r28,SACnext+4(r2) ; We point to the old top
+ stw r2,SACprev+4(r28) ; Old top guy points back to us
+ stw r0,SACprev+4(r2) ; Our back points to the anchor
+ b sttoss ; Go on to the next one...
+
+
+/*
+ * ***********************
+ * * s a v e T r i m 6 4 *
+ * ***********************
+ *
+ * Handle "save_trim_free" on 64-bit processors. At this point, translation and interrupts
+ * are off, SF is on, the savearea anchor is locked, and:
+ * r8 = #pages to trim (>0)
+ * r9 = return address
+ * r10 = per-proc ptr
+ * r11 = MSR at entry
+ */
+
+saveTrim64:
+ ld r7,SVfree(0) ; Get the first on the free list
+ mr r6,r7 ; Save the first one
+ mr r5,r8 ; Save the number we are trimming
+
+sttrimming64:
+ addic. r5,r5,-1 ; Any left to do?
+ ble-- sttrimmed64 ; Nope...
+ ld r7,SAVprev(r7) ; Skip to the next one
+ b sttrimming64 ; Keep going...
+
+sttrimmed64:
+ ld r5,SAVprev(r7) ; Get the next one (for new head of free list)
+ lwz r4,SVfreecnt(0) ; Get the free count
+ std r5,SVfree(0) ; Set new head
+ sub r4,r4,r8 ; Calculate the new free count
+ li r31,0 ; Show we have no free pool blocks yet
+ crclr cr1_eq ; dont exit loop before 1st iteration
+ stw r4,SVfreecnt(0) ; Set new free count
+ lis r30,hi16(sac_empty) ; Get what empty looks like
+
+
+ ; Loop over each savearea we are trimming.
+ ; r6 = next savearea to trim
+ ; r7 = last savearea to trim
+ ; r8 = #pages to trim (>0)
+ ; r9 = return address
+ ; r10 = per-proc ptr
+ ; r11 = MSR at entry
+ ; r30 = what SACalloc looks like when all saveareas are free
+ ; r31 = free pool block list
+ ; cr1 = beq set if we just trimmed the last, ie if we are done
+ ;
+ ; WARNING: as in the 32-bit path, this code is doing a divide by 640 (SAVsize).
+
+sttoss64:
+ beq++ cr1,stdone ; All done now...
+
+ cmpld cr1,r6,r7 ; Have we finished the loop?
+
+ lis r0,0x0044 ; Get top of table
+ rldicr r2,r6,0,51 ; r2 <- phys addr of savearea block (with control area)
+ ori r0,r0,0x2200 ; Finish shift table
+ rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
+ lwz r5,SACalloc(r2) ; Get the allocation bits
+ addi r4,r4,1 ; Shift 1 extra
+ rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
+ rlwnm r0,r0,r4,29,31 ; Get partial index
+ lis r4,lo16(0x8000) ; Get the bit mask
+ add r0,r0,r3 ; Make the real index
+ srw r4,r4,r0 ; Get the allocation mask
+ or r5,r5,r4 ; Free this entry
+ cmplw r5,r4 ; Is this the only free entry?
+ ld r6,SAVprev(r6) ; Chain to the next trimmed savearea
+ cmplw cr7,r30,r5 ; Does this look empty?
+ stw r5,SACalloc(r2) ; Save back the allocation bits
+ beq-- stputpool64 ; First free entry, go put it into the pool...
+ bne++ cr7,sttoss64 ; Not an empty block
+
+; We have an empty block. Remove it from the pool list.
+
+ lwz r29,SACflags(r2) ; Get the flags
+ cmpldi cr5,r31,0 ; Is this guy on the release list?
+ ld r28,SACnext(r2) ; Get the forward chain
+
+ rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
+ bne-- sttoss64 ; This is permanent entry, do not try to release...
+
+ ld r29,SACprev(r2) ; and the previous
+ beq-- cr5,stnot1st64 ; Not first
+ ld r0,SACvrswap(r31) ; Load the previous pool page vr conversion
+
+stnot1st64:
+ std r28,SACnext(r29) ; Previous guy points to my next
+ xor r0,r0,r31 ; Make the last guy virtual
+ std r29,SACprev(r28) ; Next guy points back to my previous
+ std r0,SAVprev(r2) ; Store the old top virtual as my back chain
+ mr r31,r2 ; My physical is now the head of the chain
+ b sttoss64 ; Get the next one...
+
+; A pool block that had no free entries now has one. Stick it on the pool list.
+
+stputpool64:
+ ld r28,SVpoolfwd(0) ; Get the first guy on the list
+ li r0,saveanchor ; Point to the saveanchor
+ std r2,SVpoolfwd(0) ; Put us on the top of the list
+ std r28,SACnext(r2) ; We point to the old top
+ std r2,SACprev(r28) ; Old top guy points back to us
+ std r0,SACprev(r2) ; Our back points to the anchor
+ b sttoss64 ; Go on to the next one...
+
+
+; We are all done. Relocate pool release head, restore all, and go. This code
+; is used both by the 32 and 64-bit paths.
+; r9 = return address
+; r10 = per-proc ptr
+; r11 = MSR at entry
+; r31 = free pool block list
+
+stdone: bl saveunlock ; Unlock the saveanchor and set adjust field
+
+ mr. r3,r31 ; Move release chain and see if there are any
+ li r5,0 ; Assume either V=R or no release chain
+ beq- stnorel ; Nothing to release...
+ lwz r5,SACvrswap+4(r31) ; Get the vr conversion (only need low half if 64-bit)
+
+stnorel:
+ bl saveRestore ; restore translation and exceptions, turn off SF
+ mtlr r9 ; Restore the return
+
+ lwz r28,FM_SIZE+0(r1) ; Restore R28
+ lwz r29,FM_SIZE+4(r1) ; Restore R29
+ lwz r30,FM_SIZE+8(r1) ; Restore R30
+ lwz r31,FM_SIZE+12(r1) ; Restore R31
+ addi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Pop the stack
+ xor r3,r3,r5 ; Convert release chain address to virtual
+ rlwinm r3,r3,0,0,31 ; if 64-bit, clear upper half of virtual address
+
+#if FPVECDBG
+ lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
+ li r2,0x2207 ; (TEST/DEBUG)
+ oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
+ sc ; (TEST/DEBUG)