+saveRet64b: ; r3 <- last one to trim
+ ld r7,SAVprev(r3) ; Point to the first one not to trim
+ li r4,LocalSaveTarget ; Set the target count
+ std r7,SAVprev(r2) ; Trim stuff leaving the one just released as first
+ stw r4,lclfreecnt(r10) ; Set the current count
+
+ bl savelock ; Lock up the anchor
+
+ ld r8,SVfree(0) ; Get the old head of the free list
+ lwz r4,SVfreecnt(0) ; Get the number of free ones
+ lwz r7,SVinuse(0) ; Get the number that are in use
+ std r6,SVfree(0) ; Point to the first trimmed savearea
+ add r4,r4,r5 ; Add number trimmed to free count
+ std r8,SAVprev(r3) ; Chain the old head to the tail of the trimmed guys
+ sub r7,r7,r5 ; Remove the trims from the in use count
+ stw r4,SVfreecnt(0) ; Set new free count
+ stw r7,SVinuse(0) ; Set new in use count
+
+ mtlr r0 ; Restore the return to our caller
+ b saveunlock ; Set adjust count, unlock the saveanchor, and return
+
+
+/*
+ * *********************
+ * * s a v e R e t 3 2 *
+ * *********************
+ *
+ * This is the internal routine to free a savearea, passed by 32-bit physical
+ * address. We assume that IR, DR, and EE are all off, and:
+ * r3 = phys address of the savearea
+ * r10 = per-proc ptr
+ * We destroy:
+ * r0,r2-r8.
+ */
+ .align 5
+ saveRet32:
+ li r0,SAVempty ; Get marker for free savearea
+ lwz r7,lclfreecnt(r10) ; Get the local count
+ lwz r6,lclfree+4(r10) ; Get the old local header
+ addi r7,r7,1 ; Pop up the free count
+ stw r6,SAVprev+4(r3) ; Plant free chain pointer
+ cmplwi r7,LocalSaveMax ; Has the list gotten too long?
+ stb r0,SAVflags+2(r3) ; Mark savearea free
+ stw r3,lclfree+4(r10) ; Chain us on in
+ stw r7,lclfreecnt(r10) ; Bump up the count
+ bltlr+ ; List not too long, so done
+
+/* The local savearea chain has gotten too long. Trim it down to the target.
+ * Here's a tricky bit, and important:
+ *
+ * When we trim the list, we NEVER trim the very first one. This is because that is
+ * the very last one released and the exception exit code will release the savearea
+ * BEFORE it is done using it. Wouldn't be too good if another processor started
+ * using it, eh? So for this case, we are safe so long as the savearea stays on
+ * the local list. (Note: the exit routine needs to do this because it is in the
+ * process of restoring all context and it needs to keep it until the last second.)
+ */
+
+ mflr r0 ; save return to caller of saveRet32
+ mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed
+ lwz r3,SAVprev+4(r3) ; Skip over the first
+ subi r7,r7,LocalSaveTarget ; Figure out how much to trim
+ mr r6,r3 ; r6 <- first one to trim
+ mr r5,r7 ; Save the number we are trimming
+
+saveRet32a:
+ addic. r7,r7,-1 ; Any left to do?
+ ble- saveRet32b ; Nope...
+ lwz r3,SAVprev+4(r3) ; Skip to the next one
+ b saveRet32a ; Keep going...
+
+saveRet32b: ; r3 <- last one to trim
+ lwz r7,SAVprev+4(r3) ; Point to the first one not to trim
+ li r4,LocalSaveTarget ; Set the target count
+ stw r7,SAVprev+4(r2) ; Trim stuff leaving the one just released as first
+ stw r4,lclfreecnt(r10) ; Set the current count
+
+ bl savelock ; Lock up the anchor
+
+ lwz r8,SVfree+4(0) ; Get the old head of the free list
+ lwz r4,SVfreecnt(0) ; Get the number of free ones
+ lwz r7,SVinuse(0) ; Get the number that are in use
+ stw r6,SVfree+4(0) ; Point to the first trimmed savearea
+ add r4,r4,r5 ; Add number trimmed to free count
+ stw r8,SAVprev+4(r3) ; Chain the old head to the tail of the trimmed guys
+ sub r7,r7,r5 ; Remove the trims from the in use count
+ stw r4,SVfreecnt(0) ; Set new free count
+ stw r7,SVinuse(0) ; Set new in use count
+
+ mtlr r0 ; Restore the return to our caller
+ b saveunlock ; Set adjust count, unlock the saveanchor, and return
+
+
+/*
+ * *******************************
+ * * s a v e _ t r i m _ f r e e *
+ * *******************************
+ *
+ * struct savearea_comm *save_trim_free(void);
+ *
+ * Trim the free list down to the target count, ie by -(SVadjust) save areas.
+ * It trims the list and, if a pool page was fully allocated, puts that page on
+ * the start of the pool list.
+ *
+ * If the savearea being released is the last on a pool page (i.e., all entries
+ * are released), the page is dequeued from the pool and queued to any other
+ * found during this scan. Note that this queue is maintained virtually.
+ *
+ * When the scan is done, the saveanchor lock is released and the list of
+ * freed pool pages is returned to our caller.
+ *
+ * For latency sake we may want to revisit this code. If we are trimming a
+ * large number of saveareas, we could be disabled and holding the savearea lock
+ * for quite a while. It may be that we want to break the trim down into parts.
+ * Possibly trimming the free list, then individually pushing them into the free pool.
+ *
+ * This function expects to be called with translation on and a valid stack.
+ * It uses the standard ABI, ie we destroy r2 and r3-r11, and return the ptr in r3.
+ */
+ .align 5
+ .globl EXT(save_trim_free)
+
+LEXT(save_trim_free)
+
+ subi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Make space for 4 registers on stack
+ mflr r9 ; save our return address
+ stw r28,FM_SIZE+0(r1) ; Save R28
+ stw r29,FM_SIZE+4(r1) ; Save R29
+ stw r30,FM_SIZE+8(r1) ; Save R30
+ stw r31,FM_SIZE+12(r1) ; Save R31
+
+ bl saveSetup ; turn off translation and interrupts, load many regs
+ bl savelock ; Go lock up the anchor
+
+ lwz r8,SVadjust(0) ; How many do we need to clear out?
+ li r3,0 ; Get a 0
+ neg. r8,r8 ; Get the actual we need to toss (adjust is neg if too many)
+ ble- save_trim_free1 ; skip if no trimming needed anymore
+ bf-- pf64Bitb,saveTrim32 ; handle 32-bit processors
+ b saveTrim64 ; handle 64-bit processors
+
+save_trim_free1: ; by the time we were called, no need to trim anymore
+ stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed)
+ mtlr r9 ; Restore return
+
+#if FPVECDBG
+ lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
+ li r2,0x2206 ; (TEST/DEBUG)
+ oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
+ sc ; (TEST/DEBUG)