2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
31 #include <db_machine_commands.h>
34 #include <mach_debug.h>
36 #include <ppc/proc_reg.h>
37 #include <ppc/exception.h>
38 #include <ppc/Performance.h>
39 #include <ppc/exception.h>
40 #include <ppc/savearea.h>
41 #include <mach/ppc/vm_param.h>
45 /* Register usage conventions in this code:
49 * cr6 = feature flags (ie, pf64Bit)
51 * Because much of this code deals with physical addresses,
52 * there are parallel paths for 32- and 64-bit machines.
57 * ***********************
58 * * s a v e _ q u e u e *
59 * ***********************
61 * void save_queue(ppnum_t pagenum);
63 * This routine will add a savearea block to the free list.
64 * We also queue the block to the free pool list. This is a
65 * circular double linked list. Because this block has no free entries,
66 * it gets queued to the end of the list
69 .globl EXT(save_queue)
72 mflr r9 ; get return address
73 mr r8,r3 ; move pagenum out of the way
74 bl saveSetup ; turn translation off, 64-bit on, load many regs
75 bf-- pf64Bitb,saveQueue32 ; skip if 32-bit processor
77 sldi r2,r8,12 ; r2 <-- phys address of page
78 li r8,sac_cnt ; Get the number of saveareas per page
79 mr r4,r2 ; Point to start of chain
80 li r0,SAVempty ; Get empty marker
83 addic. r8,r8,-1 ; Keep track of how many we did
84 stb r0,SAVflags+2(r4) ; Set empty
85 addi r7,r4,SAVsize ; Point to the next slot
86 ble- saveQueue64b ; We are done with the chain
87 std r7,SAVprev(r4) ; Set this chain
88 mr r4,r7 ; Step to the next
89 b saveQueue64a ; Fill the whole block...
92 bl savelock ; Go lock the save anchor
94 ld r7,SVfree(0) ; Get the free save area list anchor
95 lwz r6,SVfreecnt(0) ; Get the number of free saveareas
97 std r2,SVfree(0) ; Queue in the new one
98 addi r6,r6,sac_cnt ; Count the ones we are linking in
99 std r7,SAVprev(r4) ; Queue the old first one off of us
100 stw r6,SVfreecnt(0) ; Save the new count
103 ; Handle 32-bit processor.
106 slwi r2,r8,12 ; r2 <-- phys address of page
107 li r8,sac_cnt ; Get the number of saveareas per page
108 mr r4,r2 ; Point to start of chain
109 li r0,SAVempty ; Get empty marker
112 addic. r8,r8,-1 ; Keep track of how many we did
113 stb r0,SAVflags+2(r4) ; Set empty
114 addi r7,r4,SAVsize ; Point to the next slot
115 ble- saveQueue32b ; We are done with the chain
116 stw r7,SAVprev+4(r4) ; Set this chain
117 mr r4,r7 ; Step to the next
118 b saveQueue32a ; Fill the whole block...
121 bl savelock ; Go lock the save anchor
123 lwz r7,SVfree+4(0) ; Get the free save area list anchor
124 lwz r6,SVfreecnt(0) ; Get the number of free saveareas
126 stw r2,SVfree+4(0) ; Queue in the new one
127 addi r6,r6,sac_cnt ; Count the ones we are linking in
128 stw r7,SAVprev+4(r4) ; Queue the old first one off of us
129 stw r6,SVfreecnt(0) ; Save the new count
131 saveQueueExit: ; join here from 64-bit path
132 bl saveunlock ; Unlock the list and set the adjust count
133 mtlr r9 ; Restore the return
136 mfsprg r2,1 ; (TEST/DEBUG)
137 mr. r2,r2 ; (TEST/DEBUG)
138 beq-- saveRestore ; (TEST/DEBUG)
139 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
140 li r2,0x2201 ; (TEST/DEBUG)
141 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
144 b saveRestore ; Restore interrupts and translation
147 * *****************************
148 * * s a v e _ g e t _ i n i t *
149 * *****************************
151 * addr64_t save_get_init(void);
153 * Note that save_get_init is used in initial processor startup only. It
154 * is used because translation is on, but no tables exist yet and we have
155 * no V=R BAT registers that cover the entire physical memory.
158 .globl EXT(save_get_init)
161 mflr r9 ; get return address
162 bl saveSetup ; turn translation off, 64-bit on, load many regs
163 bfl-- pf64Bitb,saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC)
164 btl++ pf64Bitb,saveGet64 ; get one on a 64-bit machine
165 bl saveRestore ; restore translation etc
168 ; unpack the physaddr in r3 into a long long in (r3,r4)
170 mr r4,r3 ; copy low word of phys address to r4
171 li r3,0 ; assume upper word was 0
172 bflr-- pf64Bitb ; if 32-bit processor, return
173 srdi r3,r4,32 ; unpack reg64_t to addr64_t on 64-bit machine
179 * *******************
180 * * s a v e _ g e t *
181 * *******************
183 * savearea *save_get(void);
185 * Allocate a savearea, returning a virtual address. NOTE: we must preserve
186 * r0, r2, and r12. Our callers in cswtch.s depend on this.
192 mflr r9 ; get return address
193 mr r5,r0 ; copy regs before saveSetup nails them
194 bl saveSetup ; turn translation off, 64-bit on, load many regs
195 bf-- pf64Bitb,svgt1 ; skip if 32-bit processor
197 std r5,tempr0(r10) ; save r0 in per-proc across call to saveGet64
198 std r2,tempr2(r10) ; and r2
199 std r12,tempr4(r10) ; and r12
200 bl saveGet64 ; get r3 <- savearea, r5 <- page address (with SAC)
201 ld r0,tempr0(r10) ; restore callers regs
206 svgt1: ; handle 32-bit processor
207 stw r5,tempr0+4(r10) ; save r0 in per-proc across call to saveGet32
208 stw r2,tempr2+4(r10) ; and r2
209 stw r12,tempr4+4(r10) ; and r12
210 bl saveGet32 ; get r3 <- savearea, r5 <- page address (with SAC)
211 lwz r0,tempr0+4(r10) ; restore callers regs
213 lwz r12,tempr4+4(r10)
216 lwz r5,SACvrswap+4(r5) ; Get the virtual to real translation (only need low word)
217 mtlr r9 ; restore return address
218 xor r3,r3,r5 ; convert physaddr to virtual
219 rlwinm r3,r3,0,0,31 ; 0 upper word if a 64-bit machine
222 mr r6,r0 ; (TEST/DEBUG)
223 mr r7,r2 ; (TEST/DEBUG)
224 mfsprg r2,1 ; (TEST/DEBUG)
225 mr. r2,r2 ; (TEST/DEBUG)
226 beq-- svgDBBypass ; (TEST/DEBUG)
227 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
228 li r2,0x2203 ; (TEST/DEBUG)
229 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
231 svgDBBypass: ; (TEST/DEBUG)
232 mr r0,r6 ; (TEST/DEBUG)
233 mr r2,r7 ; (TEST/DEBUG)
235 b saveRestore ; restore MSR and return to our caller
239 * ***********************************
240 * * s a v e _ g e t _ p h y s _ 3 2 *
241 * ***********************************
243 * reg64_t save_get_phys(void);
245 * This is the entry normally called from lowmem_vectors.s with
246 * translation and interrupts already off.
250 .globl EXT(save_get_phys_32)
252 LEXT(save_get_phys_32)
253 mfsprg r10,0 ; get the per-proc ptr
254 b saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC)
258 * ***********************************
259 * * s a v e _ g e t _ p h y s _ 6 4 *
260 * ***********************************
262 * reg64_t save_get_phys_64(void);
264 * This is the entry normally called from lowmem_vectors.s with
265 * translation and interrupts already off, and in 64-bit mode.
269 .globl EXT(save_get_phys_64)
271 LEXT(save_get_phys_64)
272 mfsprg r10,0 ; get the per-proc ptr
273 b saveGet64 ; Get r3 <- savearea, r5 <- page address (with SAC)
277 * *********************
278 * * s a v e G e t 6 4 *
279 * *********************
281 * This is the internal routine to allocate a savearea on a 64-bit processor.
282 * Note that we must not take any exceptions of any kind, including PTE misses, as that
283 * would deadlock trying to reenter this routine. We pass back the 64-bit physical address.
284 * First we try the local list. If that is below a threshold, we try the global free list,
285 * which requires taking a lock, and replenish. If there are no saveareas in either list,
286 * we will install the backpocket and choke. This routine assumes that the caller has
287 * turned translation off, masked interrupts, turned on 64-bit mode, and set up:
291 * r3 = 64-bit physical address of the savearea
292 * r5 = 64-bit physical address of the page the savearea is in, with SAC
301 lwz r8,lclfreecnt(r10) ; Get the count
302 ld r3,lclfree(r10) ; Get the start of local savearea list
303 cmplwi r8,LocalSaveMin ; Are we too low?
304 ble-- saveGet64GetGlobal ; We are too low and need to grow list...
306 ; Get it from the per-processor local list.
309 li r2,0x5555 ; get r2 <-- 0x55555555 55555555, our bugbug constant
310 ld r4,SAVprev(r3) ; Chain to the next one
312 subi r8,r8,1 ; Back down count
315 std r2,SAVprev(r3) ; bug next ptr
316 stw r2,SAVlevel(r3) ; bug context ID
318 std r4,lclfree(r10) ; Unchain first savearea
319 stw r2,SAVact(r3) ; bug activation ptr
320 rldicr r5,r3,0,51 ; r5 <-- page ptr, where SAC is kept
321 stw r8,lclfreecnt(r10) ; Set new count
322 stw r6,SAVflags(r3) ; clear the flags
326 ; Local list was low so replenish from global list.
327 ; r7 = return address to caller of saveGet64
332 mflr r7 ; save return adress
333 subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target
334 bl savelock ; Go lock up the anchor
336 lwz r2,SVfreecnt(0) ; Get the number on this list
337 ld r8,SVfree(0) ; Get the head of the save area list
339 sub r3,r2,r5 ; Get number left after we swipe enough for local list
340 sradi r3,r3,63 ; Get 0 if enough or -1 if not
341 andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise
342 and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise
343 or. r5,r4,r5 ; r5 <- number we will move from global to local list
344 beq-- saveGet64NoFree ; There are none to get...
346 mtctr r5 ; Get loop count
347 mr r6,r8 ; Remember the first in the list
350 bdz saveGet64d ; Count down and branch when we hit 0...
351 ld r8,SAVprev(r8) ; Get the next
352 b saveGet64c ; Keep going...
355 ld r3,SAVprev(r8) ; Get the next one
356 lwz r4,SVinuse(0) ; Get the in use count
357 sub r2,r2,r5 ; Count down what we stole
358 std r3,SVfree(0) ; Set the new first in list
359 add r4,r4,r5 ; Count the ones we just put in the local list as "in use"
360 stw r2,SVfreecnt(0) ; Set the new count
361 stw r4,SVinuse(0) ; Set the new in use count
363 ld r4,lclfree(r10) ; Get the old head of list
364 lwz r3,lclfreecnt(r10) ; Get the old count
365 std r6,lclfree(r10) ; Set the new head of the list
366 add r3,r3,r5 ; Get the new count
367 std r4,SAVprev(r8) ; Point to the old head
368 stw r3,lclfreecnt(r10) ; Set the new count
370 bl saveunlock ; Update the adjust field and unlock
371 mtlr r7 ; restore return address
372 b saveGet64 ; Start over and finally allocate the savearea...
374 ; The local list is below the repopulate threshold and the global list is empty.
375 ; First we check if there are any left in the local list and if so, we allow
376 ; them to be allocated. If not, we release the backpocket list and choke.
377 ; There is nothing more that we can do at this point. Hopefully we stay alive
378 ; long enough to grab some much-needed panic information.
379 ; r7 = return address to caller of saveGet64
383 lwz r8,lclfreecnt(r10) ; Get the count
384 mr. r8,r8 ; Are there any reserve to get?
385 beq-- saveGet64Choke ; No, go choke and die...
386 bl saveunlock ; Update the adjust field and unlock
387 ld r3,lclfree(r10) ; Get the start of local savearea list
388 lwz r8,lclfreecnt(r10) ; Get the count
389 mtlr r7 ; restore return address
390 b saveGet64GetLocal ; We have some left, dip on in...
392 ; We who are about to die salute you. The savearea chain is messed up or
393 ; empty. Add in a few so we have enough to take down the system.
396 lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket
397 ori r9,r9,lo16(EXT(backpocket)) ; and low part
399 lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements
400 ld r7,SVfree-saveanchor(r9) ; Get the head of the chain
401 lwz r6,SVinuse(0) ; Get total in the old list
403 stw r8,SVfreecnt(0) ; Set the new number of free elements
404 add r6,r6,r8 ; Add in the new ones
405 std r7,SVfree(0) ; Set the new head of the chain
406 stw r6,SVinuse(0) ; Set total in the new list
408 saveGetChokeJoin: ; join in the fun from 32-bit mode
409 lis r0,hi16(Choke) ; Set choke firmware call
410 li r7,0 ; Get a clear register to unlock
411 ori r0,r0,lo16(Choke) ; Set the rest of the choke call
412 li r3,failNoSavearea ; Set failure code
414 eieio ; Make sure all is committed
415 stw r7,SVlock(0) ; Unlock the free list
420 * *********************
421 * * s a v e G e t 3 2 *
422 * *********************
424 * This is the internal routine to allocate a savearea on a 32-bit processor.
425 * Note that we must not take any exceptions of any kind, including PTE misses, as that
426 * would deadlock trying to reenter this routine. We pass back the 32-bit physical address.
427 * First we try the local list. If that is below a threshold, we try the global free list,
428 * which requires taking a lock, and replenish. If there are no saveareas in either list,
429 * we will install the backpocket and choke. This routine assumes that the caller has
430 * turned translation off, masked interrupts, and set up:
434 * r3 = 32-bit physical address of the savearea
435 * r5 = 32-bit physical address of the page the savearea is in, with SAC
442 lwz r8,lclfreecnt(r10) ; Get the count
443 lwz r3,lclfree+4(r10) ; Get the start of local savearea list
444 cmplwi r8,LocalSaveMin ; Are we too low?
445 ble- saveGet32GetGlobal ; We are too low and need to grow list...
447 ; Get savearea from per-processor local list.
450 li r2,0x5555 ; get r2 <-- 0x55555555, our bugbug constant
451 lwz r4,SAVprev+4(r3) ; Chain to the next one
453 subi r8,r8,1 ; Back down count
455 stw r2,SAVprev+4(r3) ; bug next ptr
456 stw r2,SAVlevel(r3) ; bug context ID
458 stw r4,lclfree+4(r10) ; Unchain first savearea
459 stw r2,SAVact(r3) ; bug activation ptr
460 rlwinm r5,r3,0,0,19 ; r5 <-- page ptr, where SAC is kept
461 stw r8,lclfreecnt(r10) ; Set new count
462 stw r6,SAVflags(r3) ; clear the flags
466 ; Local list was low so replenish from global list.
467 ; r7 = return address to caller of saveGet32
472 mflr r7 ; save return adress
473 subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target
474 bl savelock ; Go lock up the anchor
476 lwz r2,SVfreecnt(0) ; Get the number on this list
477 lwz r8,SVfree+4(0) ; Get the head of the save area list
479 sub r3,r2,r5 ; Get number left after we swipe enough for local list
480 srawi r3,r3,31 ; Get 0 if enough or -1 if not
481 andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise
482 and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise
483 or. r5,r4,r5 ; r5 <- number we will move from global to local list
484 beq- saveGet32NoFree ; There are none to get...
486 mtctr r5 ; Get loop count
487 mr r6,r8 ; Remember the first in the list
490 bdz saveGet32d ; Count down and branch when we hit 0...
491 lwz r8,SAVprev+4(r8) ; Get the next
492 b saveGet32c ; Keep going...
495 lwz r3,SAVprev+4(r8) ; Get the next one
496 lwz r4,SVinuse(0) ; Get the in use count
497 sub r2,r2,r5 ; Count down what we stole
498 stw r3,SVfree+4(0) ; Set the new first in list
499 add r4,r4,r5 ; Count the ones we just put in the local list as "in use"
500 stw r2,SVfreecnt(0) ; Set the new count
501 stw r4,SVinuse(0) ; Set the new in use count
503 lwz r4,lclfree+4(r10) ; Get the old head of list
504 lwz r3,lclfreecnt(r10) ; Get the old count
505 stw r6,lclfree+4(r10) ; Set the new head of the list
506 add r3,r3,r5 ; Get the new count
507 stw r4,SAVprev+4(r8) ; Point to the old head
508 stw r3,lclfreecnt(r10) ; Set the new count
510 bl saveunlock ; Update the adjust field and unlock
511 mtlr r7 ; restore return address
512 b saveGet32 ; Start over and finally allocate the savearea...
514 ; The local list is below the repopulate threshold and the global list is empty.
515 ; First we check if there are any left in the local list and if so, we allow
516 ; them to be allocated. If not, we release the backpocket list and choke.
517 ; There is nothing more that we can do at this point. Hopefully we stay alive
518 ; long enough to grab some much-needed panic information.
519 ; r7 = return address to caller of saveGet32
523 lwz r8,lclfreecnt(r10) ; Get the count
524 mr. r8,r8 ; Are there any reserve to get?
525 beq- saveGet32Choke ; No, go choke and die...
526 bl saveunlock ; Update the adjust field and unlock
527 lwz r3,lclfree+4(r10) ; Get the start of local savearea list
528 lwz r8,lclfreecnt(r10) ; Get the count
529 mtlr r7 ; restore return address
530 b saveGet32GetLocal ; We have some left, dip on in...
532 ; We who are about to die salute you. The savearea chain is messed up or
533 ; empty. Add in a few so we have enough to take down the system.
536 lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket
537 ori r9,r9,lo16(EXT(backpocket)) ; and low part
539 lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements
540 lwz r7,SVfree+4-saveanchor(r9) ; Get the head of the chain
541 lwz r6,SVinuse(0) ; Get total in the old list
543 stw r8,SVfreecnt(0) ; Set the new number of free elements
544 add r6,r6,r8 ; Add in the new ones (why?)
545 stw r7,SVfree+4(0) ; Set the new head of the chain
546 stw r6,SVinuse(0) ; Set total in the new list
552 * *******************
553 * * s a v e _ r e t *
554 * *******************
556 * void save_ret(struct savearea *); // normal call
557 * void save_ret_wMSR(struct savearea *,reg64_t); // passes MSR to restore as 2nd arg
559 * Return a savearea passed by virtual address to the free list.
560 * Note really well: we can take NO exceptions of any kind,
561 * including a PTE miss once the savearea lock is held. That's
562 * a guaranteed deadlock. That means we must disable for interrutions
563 * and turn all translation off.
565 .globl EXT(save_ret_wMSR) ; alternate entry pt w MSR to restore in r4
568 crset 31 ; set flag for save_ret_wMSR
569 b svrt1 ; join common code
575 crclr 31 ; clear flag for save_ret_wMSR
576 svrt1: ; join from save_ret_wMSR
577 mflr r9 ; get return address
578 rlwinm r7,r3,0,0,19 ; get virtual address of SAC area at start of page
579 mr r8,r3 ; save virtual address
580 lwz r5,SACvrswap+0(r7) ; get 64-bit converter from V to R
581 lwz r6,SACvrswap+4(r7) ; both halves, though only bottom used on 32-bit machine
583 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
584 li r2,0x2204 ; (TEST/DEBUG)
585 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
588 bl saveSetup ; turn translation off, 64-bit on, load many regs
589 bf++ 31,svrt3 ; skip if not save_ret_wMSR
590 mr r11,r4 ; was save_ret_wMSR, so overwrite saved MSR
592 bf-- pf64Bitb,svrt4 ; skip if a 32-bit processor
594 ; Handle 64-bit processor.
596 rldimi r6,r5,32,0 ; merge upper and lower halves of SACvrswap together
597 xor r3,r8,r6 ; get r3 <- 64-bit physical address of this savearea
598 bl saveRet64 ; return it
599 mtlr r9 ; restore return address
600 b saveRestore64 ; restore MSR
602 ; Handle 32-bit processor.
605 xor r3,r8,r6 ; get r3 <- 32-bit physical address of this savearea
606 bl saveRet32 ; return it
607 mtlr r9 ; restore return address
608 b saveRestore32 ; restore MSR
612 * *****************************
613 * * s a v e _ r e t _ p h y s *
614 * *****************************
616 * void save_ret_phys(reg64_t);
618 * Called from lowmem vectors to return (ie, free) a savearea by physical address.
619 * Translation and interrupts are already off, and 64-bit mode is set if defined.
620 * We can take _no_ exceptions of any kind in this code, including PTE miss, since
621 * that would result in a deadlock. We expect:
622 * r3 = phys addr of savearea
623 * msr = IR, DR, and EE off, SF on
629 .globl EXT(save_ret_phys)
632 mfsprg r10,0 ; get the per-proc ptr
633 bf-- pf64Bitb,saveRet32 ; handle 32-bit machine
634 b saveRet64 ; handle 64-bit machine
638 * *********************
639 * * s a v e R e t 6 4 *
640 * *********************
642 * This is the internal routine to free a savearea, passed by 64-bit physical
643 * address. We assume that IR, DR, and EE are all off, that SF is on, and:
644 * r3 = phys address of the savearea
651 li r0,SAVempty ; Get marker for free savearea
652 lwz r7,lclfreecnt(r10) ; Get the local count
653 ld r6,lclfree(r10) ; Get the old local header
654 addi r7,r7,1 ; Pop up the free count
655 std r6,SAVprev(r3) ; Plant free chain pointer
656 cmplwi r7,LocalSaveMax ; Has the list gotten too long?
657 stb r0,SAVflags+2(r3) ; Mark savearea free
658 std r3,lclfree(r10) ; Chain us on in
659 stw r7,lclfreecnt(r10) ; Bump up the count
660 bltlr++ ; List not too long, so done
662 /* The local savearea chain has gotten too long. Trim it down to the target.
663 * Here's a tricky bit, and important:
665 * When we trim the list, we NEVER trim the very first one. This is because that is
666 * the very last one released and the exception exit code will release the savearea
667 * BEFORE it is done using it. Wouldn't be too good if another processor started
668 * using it, eh? So for this case, we are safe so long as the savearea stays on
669 * the local list. (Note: the exit routine needs to do this because it is in the
670 * process of restoring all context and it needs to keep it until the last second.)
673 mflr r0 ; save return to caller of saveRet64
674 mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed
675 ld r3,SAVprev(r3) ; Skip over the first
676 subi r7,r7,LocalSaveTarget ; Figure out how much to trim
677 mr r6,r3 ; r6 <- first one to trim
678 mr r5,r7 ; Save the number we are trimming
681 addic. r7,r7,-1 ; Any left to do?
682 ble-- saveRet64b ; Nope...
683 ld r3,SAVprev(r3) ; Skip to the next one
684 b saveRet64a ; Keep going...
686 saveRet64b: ; r3 <- last one to trim
687 ld r7,SAVprev(r3) ; Point to the first one not to trim
688 li r4,LocalSaveTarget ; Set the target count
689 std r7,SAVprev(r2) ; Trim stuff leaving the one just released as first
690 stw r4,lclfreecnt(r10) ; Set the current count
692 bl savelock ; Lock up the anchor
694 ld r8,SVfree(0) ; Get the old head of the free list
695 lwz r4,SVfreecnt(0) ; Get the number of free ones
696 lwz r7,SVinuse(0) ; Get the number that are in use
697 std r6,SVfree(0) ; Point to the first trimmed savearea
698 add r4,r4,r5 ; Add number trimmed to free count
699 std r8,SAVprev(r3) ; Chain the old head to the tail of the trimmed guys
700 sub r7,r7,r5 ; Remove the trims from the in use count
701 stw r4,SVfreecnt(0) ; Set new free count
702 stw r7,SVinuse(0) ; Set new in use count
704 mtlr r0 ; Restore the return to our caller
705 b saveunlock ; Set adjust count, unlock the saveanchor, and return
709 * *********************
710 * * s a v e R e t 3 2 *
711 * *********************
713 * This is the internal routine to free a savearea, passed by 32-bit physical
714 * address. We assume that IR, DR, and EE are all off, and:
715 * r3 = phys address of the savearea
722 li r0,SAVempty ; Get marker for free savearea
723 lwz r7,lclfreecnt(r10) ; Get the local count
724 lwz r6,lclfree+4(r10) ; Get the old local header
725 addi r7,r7,1 ; Pop up the free count
726 stw r6,SAVprev+4(r3) ; Plant free chain pointer
727 cmplwi r7,LocalSaveMax ; Has the list gotten too long?
728 stb r0,SAVflags+2(r3) ; Mark savearea free
729 stw r3,lclfree+4(r10) ; Chain us on in
730 stw r7,lclfreecnt(r10) ; Bump up the count
731 bltlr+ ; List not too long, so done
733 /* The local savearea chain has gotten too long. Trim it down to the target.
734 * Here's a tricky bit, and important:
736 * When we trim the list, we NEVER trim the very first one. This is because that is
737 * the very last one released and the exception exit code will release the savearea
738 * BEFORE it is done using it. Wouldn't be too good if another processor started
739 * using it, eh? So for this case, we are safe so long as the savearea stays on
740 * the local list. (Note: the exit routine needs to do this because it is in the
741 * process of restoring all context and it needs to keep it until the last second.)
744 mflr r0 ; save return to caller of saveRet32
745 mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed
746 lwz r3,SAVprev+4(r3) ; Skip over the first
747 subi r7,r7,LocalSaveTarget ; Figure out how much to trim
748 mr r6,r3 ; r6 <- first one to trim
749 mr r5,r7 ; Save the number we are trimming
752 addic. r7,r7,-1 ; Any left to do?
753 ble- saveRet32b ; Nope...
754 lwz r3,SAVprev+4(r3) ; Skip to the next one
755 b saveRet32a ; Keep going...
757 saveRet32b: ; r3 <- last one to trim
758 lwz r7,SAVprev+4(r3) ; Point to the first one not to trim
759 li r4,LocalSaveTarget ; Set the target count
760 stw r7,SAVprev+4(r2) ; Trim stuff leaving the one just released as first
761 stw r4,lclfreecnt(r10) ; Set the current count
763 bl savelock ; Lock up the anchor
765 lwz r8,SVfree+4(0) ; Get the old head of the free list
766 lwz r4,SVfreecnt(0) ; Get the number of free ones
767 lwz r7,SVinuse(0) ; Get the number that are in use
768 stw r6,SVfree+4(0) ; Point to the first trimmed savearea
769 add r4,r4,r5 ; Add number trimmed to free count
770 stw r8,SAVprev+4(r3) ; Chain the old head to the tail of the trimmed guys
771 sub r7,r7,r5 ; Remove the trims from the in use count
772 stw r4,SVfreecnt(0) ; Set new free count
773 stw r7,SVinuse(0) ; Set new in use count
775 mtlr r0 ; Restore the return to our caller
776 b saveunlock ; Set adjust count, unlock the saveanchor, and return
780 * *******************************
781 * * s a v e _ t r i m _ f r e e *
782 * *******************************
784 * struct savearea_comm *save_trim_free(void);
786 * Trim the free list down to the target count, ie by -(SVadjust) save areas.
787 * It trims the list and, if a pool page was fully allocated, puts that page on
788 * the start of the pool list.
790 * If the savearea being released is the last on a pool page (i.e., all entries
791 * are released), the page is dequeued from the pool and queued to any other
792 * found during this scan. Note that this queue is maintained virtually.
794 * When the scan is done, the saveanchor lock is released and the list of
795 * freed pool pages is returned to our caller.
797 * For latency sake we may want to revisit this code. If we are trimming a
798 * large number of saveareas, we could be disabled and holding the savearea lock
799 * for quite a while. It may be that we want to break the trim down into parts.
800 * Possibly trimming the free list, then individually pushing them into the free pool.
802 * This function expects to be called with translation on and a valid stack.
803 * It uses the standard ABI, ie we destroy r2 and r3-r11, and return the ptr in r3.
806 .globl EXT(save_trim_free)
810 subi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Make space for 4 registers on stack
811 mflr r9 ; save our return address
812 stw r28,FM_SIZE+0(r1) ; Save R28
813 stw r29,FM_SIZE+4(r1) ; Save R29
814 stw r30,FM_SIZE+8(r1) ; Save R30
815 stw r31,FM_SIZE+12(r1) ; Save R31
817 bl saveSetup ; turn off translation and interrupts, load many regs
818 bl savelock ; Go lock up the anchor
820 lwz r8,SVadjust(0) ; How many do we need to clear out?
822 neg. r8,r8 ; Get the actual we need to toss (adjust is neg if too many)
823 ble- save_trim_free1 ; skip if no trimming needed anymore
824 bf-- pf64Bitb,saveTrim32 ; handle 32-bit processors
825 b saveTrim64 ; handle 64-bit processors
827 save_trim_free1: ; by the time we were called, no need to trim anymore
828 stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed)
829 mtlr r9 ; Restore return
832 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
833 li r2,0x2206 ; (TEST/DEBUG)
834 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
837 addi r1,r1,(FM_ALIGN(16)+FM_SIZE); Pop stack - have not trashed register so no need to reload
838 b saveRestore ; restore translation and EE, turn SF off, return to our caller
842 * ***********************
843 * * s a v e T r i m 3 2 *
844 * ***********************
846 * Handle "save_trim_free" on 32-bit processors. At this point, translation and interrupts
847 * are off, the savearea anchor is locked, and:
848 * r8 = #pages to trim (>0)
849 * r9 = return address
855 lwz r7,SVfree+4(0) ; Get the first on the free list
856 mr r6,r7 ; Save the first one
857 mr r5,r8 ; Save the number we are trimming
859 sttrimming: addic. r5,r5,-1 ; Any left to do?
860 ble- sttrimmed ; Nope...
861 lwz r7,SAVprev+4(r7) ; Skip to the next one
862 b sttrimming ; Keep going...
864 sttrimmed: lwz r5,SAVprev+4(r7) ; Get the next one (for new head of free list)
865 lwz r4,SVfreecnt(0) ; Get the free count
866 stw r5,SVfree+4(0) ; Set new head
867 sub r4,r4,r8 ; Calculate the new free count
868 li r31,0 ; Show we have no free pool blocks yet
869 crclr cr1_eq ; dont exit loop before 1st iteration
870 stw r4,SVfreecnt(0) ; Set new free count
871 lis r30,hi16(sac_empty) ; Get what empty looks like
873 ; NOTE: The savearea size must be 640 (0x280). We are doing a divide by shifts and stuff
877 #error Savearea size is not 640!!!!!!!!!!!!
880 ; Loop over each savearea we are trimming.
881 ; r6 = next savearea to trim
882 ; r7 = last savearea to trim
883 ; r8 = #pages to trim (>0)
884 ; r9 = return address
887 ; r30 = what SACalloc looks like when all saveareas are free
888 ; r31 = free pool block list
889 ; cr1 = beq set if we just trimmed the last, ie if we are done
891 sttoss: beq+ cr1,stdone ; All done now...
893 cmplw cr1,r6,r7 ; Have we finished the loop?
895 lis r0,0x0044 ; Get top of table
896 rlwinm r2,r6,0,0,19 ; Back down to the savearea control stuff
897 ori r0,r0,0x2200 ; Finish shift table
898 rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
899 lwz r5,SACalloc(r2) ; Get the allocation bits
900 addi r4,r4,1 ; Shift 1 extra
901 rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
902 rlwnm r0,r0,r4,29,31 ; Get partial index
903 lis r4,lo16(0x8000) ; Get the bit mask
904 add r0,r0,r3 ; Make the real index
905 srw r4,r4,r0 ; Get the allocation mask
906 or r5,r5,r4 ; Free this entry
907 cmplw r5,r4 ; Is this the only free entry?
908 lwz r6,SAVprev+4(r6) ; Chain to the next trimmed savearea
909 cmplw cr7,r30,r5 ; Does this look empty?
910 stw r5,SACalloc(r2) ; Save back the allocation bits
911 beq- stputpool ; First free entry, go put it into the pool...
912 bne+ cr7,sttoss ; Not an empty block
915 ; We have an empty block. Remove it from the pool list.
918 lwz r29,SACflags(r2) ; Get the flags
919 cmplwi cr5,r31,0 ; Is this guy on the release list?
920 lwz r28,SACnext+4(r2) ; Get the forward chain
922 rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
923 bne- sttoss ; This is permanent entry, do not try to release...
925 lwz r29,SACprev+4(r2) ; and the previous
926 beq- cr5,stnot1st ; Not first
927 lwz r0,SACvrswap+4(r31) ; Load the previous pool page vr conversion
929 stnot1st: stw r28,SACnext+4(r29) ; Previous guy points to my next
930 xor r0,r0,r31 ; Make the last guy virtual
931 stw r29,SACprev+4(r28) ; Next guy points back to my previous
932 stw r0,SAVprev+4(r2) ; Store the old top virtual as my back chain
933 mr r31,r2 ; My physical is now the head of the chain
934 b sttoss ; Get the next one...
937 ; A pool block that had no free entries now has one. Stick it on the pool list.
940 stputpool: lwz r28,SVpoolfwd+4(0) ; Get the first guy on the list
941 li r0,saveanchor ; Point to the saveanchor
942 stw r2,SVpoolfwd+4(0) ; Put us on the top of the list
943 stw r28,SACnext+4(r2) ; We point to the old top
944 stw r2,SACprev+4(r28) ; Old top guy points back to us
945 stw r0,SACprev+4(r2) ; Our back points to the anchor
946 b sttoss ; Go on to the next one...
950 * ***********************
951 * * s a v e T r i m 6 4 *
952 * ***********************
954 * Handle "save_trim_free" on 64-bit processors. At this point, translation and interrupts
955 * are off, SF is on, the savearea anchor is locked, and:
956 * r8 = #pages to trim (>0)
957 * r9 = return address
963 ld r7,SVfree(0) ; Get the first on the free list
964 mr r6,r7 ; Save the first one
965 mr r5,r8 ; Save the number we are trimming
968 addic. r5,r5,-1 ; Any left to do?
969 ble-- sttrimmed64 ; Nope...
970 ld r7,SAVprev(r7) ; Skip to the next one
971 b sttrimming64 ; Keep going...
974 ld r5,SAVprev(r7) ; Get the next one (for new head of free list)
975 lwz r4,SVfreecnt(0) ; Get the free count
976 std r5,SVfree(0) ; Set new head
977 sub r4,r4,r8 ; Calculate the new free count
978 li r31,0 ; Show we have no free pool blocks yet
979 crclr cr1_eq ; dont exit loop before 1st iteration
980 stw r4,SVfreecnt(0) ; Set new free count
981 lis r30,hi16(sac_empty) ; Get what empty looks like
984 ; Loop over each savearea we are trimming.
985 ; r6 = next savearea to trim
986 ; r7 = last savearea to trim
987 ; r8 = #pages to trim (>0)
988 ; r9 = return address
991 ; r30 = what SACalloc looks like when all saveareas are free
992 ; r31 = free pool block list
993 ; cr1 = beq set if we just trimmed the last, ie if we are done
995 ; WARNING: as in the 32-bit path, this code is doing a divide by 640 (SAVsize).
998 beq++ cr1,stdone ; All done now...
1000 cmpld cr1,r6,r7 ; Have we finished the loop?
1002 lis r0,0x0044 ; Get top of table
1003 rldicr r2,r6,0,51 ; r2 <- phys addr of savearea block (with control area)
1004 ori r0,r0,0x2200 ; Finish shift table
1005 rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
1006 lwz r5,SACalloc(r2) ; Get the allocation bits
1007 addi r4,r4,1 ; Shift 1 extra
1008 rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
1009 rlwnm r0,r0,r4,29,31 ; Get partial index
1010 lis r4,lo16(0x8000) ; Get the bit mask
1011 add r0,r0,r3 ; Make the real index
1012 srw r4,r4,r0 ; Get the allocation mask
1013 or r5,r5,r4 ; Free this entry
1014 cmplw r5,r4 ; Is this the only free entry?
1015 ld r6,SAVprev(r6) ; Chain to the next trimmed savearea
1016 cmplw cr7,r30,r5 ; Does this look empty?
1017 stw r5,SACalloc(r2) ; Save back the allocation bits
1018 beq-- stputpool64 ; First free entry, go put it into the pool...
1019 bne++ cr7,sttoss64 ; Not an empty block
1021 ; We have an empty block. Remove it from the pool list.
1023 lwz r29,SACflags(r2) ; Get the flags
1024 cmpldi cr5,r31,0 ; Is this guy on the release list?
1025 ld r28,SACnext(r2) ; Get the forward chain
1027 rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
1028 bne-- sttoss64 ; This is permanent entry, do not try to release...
1030 ld r29,SACprev(r2) ; and the previous
1031 beq-- cr5,stnot1st64 ; Not first
1032 ld r0,SACvrswap(r31) ; Load the previous pool page vr conversion
1035 std r28,SACnext(r29) ; Previous guy points to my next
1036 xor r0,r0,r31 ; Make the last guy virtual
1037 std r29,SACprev(r28) ; Next guy points back to my previous
1038 std r0,SAVprev(r2) ; Store the old top virtual as my back chain
1039 mr r31,r2 ; My physical is now the head of the chain
1040 b sttoss64 ; Get the next one...
1042 ; A pool block that had no free entries now has one. Stick it on the pool list.
1045 ld r28,SVpoolfwd(0) ; Get the first guy on the list
1046 li r0,saveanchor ; Point to the saveanchor
1047 std r2,SVpoolfwd(0) ; Put us on the top of the list
1048 std r28,SACnext(r2) ; We point to the old top
1049 std r2,SACprev(r28) ; Old top guy points back to us
1050 std r0,SACprev(r2) ; Our back points to the anchor
1051 b sttoss64 ; Go on to the next one...
1054 ; We are all done. Relocate pool release head, restore all, and go. This code
1055 ; is used both by the 32 and 64-bit paths.
1056 ; r9 = return address
1057 ; r10 = per-proc ptr
1058 ; r11 = MSR at entry
1059 ; r31 = free pool block list
1061 stdone: bl saveunlock ; Unlock the saveanchor and set adjust field
1063 mr. r3,r31 ; Move release chain and see if there are any
1064 li r5,0 ; Assume either V=R or no release chain
1065 beq- stnorel ; Nothing to release...
1066 lwz r5,SACvrswap+4(r31) ; Get the vr conversion (only need low half if 64-bit)
1069 bl saveRestore ; restore translation and exceptions, turn off SF
1070 mtlr r9 ; Restore the return
1072 lwz r28,FM_SIZE+0(r1) ; Restore R28
1073 lwz r29,FM_SIZE+4(r1) ; Restore R29
1074 lwz r30,FM_SIZE+8(r1) ; Restore R30
1075 lwz r31,FM_SIZE+12(r1) ; Restore R31
1076 addi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Pop the stack
1077 xor r3,r3,r5 ; Convert release chain address to virtual
1078 rlwinm r3,r3,0,0,31 ; if 64-bit, clear upper half of virtual address
1081 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1082 li r2,0x2207 ; (TEST/DEBUG)
1083 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1090 * ***************************
1091 * * s a v e _ r e c o v e r *
1092 * ***************************
1094 * int save_recover(void);
1096 * Returns nonzero if we can get enough saveareas to hit the target. We scan the free
1097 * pool. If we empty a pool block, we remove it from the pool list.
1101 .globl EXT(save_recover)
1104 mflr r9 ; save return address
1105 bl saveSetup ; turn translation and interrupts off, SF on, load many regs
1106 bl savelock ; lock the savearea anchor
1108 lwz r8,SVadjust(0) ; How many do we need to clear get?
1110 mr. r8,r8 ; Do we need any?
1111 ble-- save_recover1 ; not any more
1112 bf-- pf64Bitb,saveRecover32 ; handle 32-bit processor
1113 b saveRecover64 ; handle 64-bit processor
1115 save_recover1: ; by the time we locked the anchor, no longer short
1116 mtlr r9 ; Restore return
1117 stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed)
1119 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1120 li r2,0x2208 ; (TEST/DEBUG)
1121 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1124 b saveRestore ; turn translation etc back on, return to our caller
1128 * *****************************
1129 * * s a v e R e c o v e r 3 2 *
1130 * *****************************
1132 * Handle "save_recover" on 32-bit processors. At this point, translation and interrupts
1133 * are off, the savearea anchor is locked, and:
1134 * r8 = #pages to recover
1135 * r9 = return address
1136 * r10 = per-proc ptr
1137 * r11 = MSR at entry
1141 li r6,saveanchor ; Start at pool anchor
1142 crclr cr1_eq ; initialize the loop test
1143 lwz r7,SVfreecnt(0) ; Get the current free count
1146 ; Loop over next block in free pool. r6 is the ptr to the last block we looked at.
1148 srcnpool: lwz r6,SACnext+4(r6) ; Point to the next one
1149 cmplwi r6,saveanchor ; Have we wrapped?
1150 beq- srcdone ; Yes, did not have enough...
1152 lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block
1155 ; NOTE: The savearea size must be 640 (0x280). We are doing a multiply by shifts and add.
1156 ; offset = (index << 9) + (index << 7)
1159 #error Savearea size is not 640!!!!!!!!!!!!
1162 ; Loop over free savearea in current block.
1163 ; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc)
1164 ; r6 = ptr to current free pool block
1166 ; r8 = #pages more we still need to recover
1167 ; r9 = return address
1168 ; r10 = per-proc ptr
1169 ; r11 = MSR at entry
1170 ; cr1 = beq if (r8==0)
1172 srcnext: beq- cr1,srcdone ; We have no more to get...
1174 lis r3,0x8000 ; Get the top bit on
1175 cntlzw r4,r5 ; Find a free slot
1176 addi r7,r7,1 ; Bump up the free count
1177 srw r3,r3,r4 ; Make a mask
1178 slwi r0,r4,7 ; First multiply by 128
1179 subi r8,r8,1 ; Decrement the need count
1180 slwi r2,r4,9 ; Then multiply by 512
1181 andc. r5,r5,r3 ; Clear out the "free" bit
1182 add r2,r2,r0 ; Sum to multiply by 640
1184 stw r5,SACalloc(r6) ; Set new allocation bits
1186 add r2,r2,r6 ; Get the actual address of the savearea
1187 lwz r3,SVfree+4(0) ; Get the head of the chain
1188 cmplwi cr1,r8,0 ; Do we actually need any more?
1189 stw r2,SVfree+4(0) ; Push ourselves in the front
1190 stw r3,SAVprev+4(r2) ; Chain the rest of the list behind
1192 bne+ srcnext ; The pool block is not empty yet, try for another...
1194 lwz r2,SACnext+4(r6) ; Get the next pointer
1195 lwz r3,SACprev+4(r6) ; Get the previous pointer
1196 stw r3,SACprev+4(r2) ; The previous of my next points to my previous
1197 stw r2,SACnext+4(r3) ; The next of my previous points to my next
1198 bne+ cr1,srcnpool ; We still have more to do...
1201 ; Join here from 64-bit path when we have recovered all the saveareas we need to.
1203 srcdone: stw r7,SVfreecnt(0) ; Set the new free count
1204 bl saveunlock ; Unlock the save and set adjust field
1206 mtlr r9 ; Restore the return
1208 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1209 li r2,0x2209 ; (TEST/DEBUG)
1210 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1213 b saveRestore ; turn xlate and EE back on, SF off, and return to our caller
1217 * *****************************
1218 * * s a v e R e c o v e r 6 4 *
1219 * *****************************
1221 * Handle "save_recover" on 64-bit processors. At this point, translation and interrupts
1222 * are off, the savearea anchor is locked, and:
1223 * r8 = #pages to recover
1224 * r9 = return address
1225 * r10 = per-proc ptr
1226 * r11 = MSR at entry
1230 li r6,saveanchor ; Start at pool anchor
1231 crclr cr1_eq ; initialize the loop test
1232 lwz r7,SVfreecnt(0) ; Get the current free count
1235 ; Loop over next block in free pool. r6 is the ptr to the last block we looked at.
1238 ld r6,SACnext(r6) ; Point to the next one
1239 cmpldi r6,saveanchor ; Have we wrapped?
1240 beq-- srcdone ; Yes, did not have enough...
1242 lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block
1245 ; Loop over free savearea in current block.
1246 ; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc)
1247 ; r6 = ptr to current free pool block
1249 ; r8 = #pages more we still need to recover
1250 ; r9 = return address
1251 ; r10 = per-proc ptr
1252 ; r11 = MSR at entry
1253 ; cr1 = beq if (r8==0)
1255 ; WARNING: as in the 32-bit path, we depend on (SAVsize==640)
1258 beq-- cr1,srcdone ; We have no more to get...
1260 lis r3,0x8000 ; Get the top bit on
1261 cntlzw r4,r5 ; Find a free slot
1262 addi r7,r7,1 ; Bump up the free count
1263 srw r3,r3,r4 ; Make a mask
1264 slwi r0,r4,7 ; First multiply by 128
1265 subi r8,r8,1 ; Decrement the need count
1266 slwi r2,r4,9 ; Then multiply by 512
1267 andc. r5,r5,r3 ; Clear out the "free" bit
1268 add r2,r2,r0 ; Sum to multiply by 640
1270 stw r5,SACalloc(r6) ; Set new allocation bits
1272 add r2,r2,r6 ; Get the actual address of the savearea
1273 ld r3,SVfree(0) ; Get the head of the chain
1274 cmplwi cr1,r8,0 ; Do we actually need any more?
1275 std r2,SVfree(0) ; Push ourselves in the front
1276 std r3,SAVprev(r2) ; Chain the rest of the list behind
1278 bne++ srcnext64 ; The pool block is not empty yet, try for another...
1280 ld r2,SACnext(r6) ; Get the next pointer
1281 ld r3,SACprev(r6) ; Get the previous pointer
1282 std r3,SACprev(r2) ; The previous of my next points to my previous
1283 std r2,SACnext(r3) ; The next of my previous points to my next
1284 bne++ cr1,srcnpool64 ; We still have more to do...
1290 * *******************
1291 * * s a v e l o c k *
1292 * *******************
1294 * Lock the savearea anchor, so we can manipulate the free list.
1295 * msr = interrupts and translation off
1301 savelock: lwz r8,SVlock(0) ; See if lock is held
1303 li r12,saveanchor ; Point to the saveanchor
1304 bne-- savelock ; loop until lock released...
1306 savelock0: lwarx r8,0,r12 ; Grab the lock value
1308 li r8,1 ; get nonzero to lock it with
1309 bne-- savelock1 ; already locked, wait for it to clear...
1310 stwcx. r8,0,r12 ; Try to seize that there durn lock
1311 isync ; assume we got it
1312 beqlr++ ; reservation not lost, so we have the lock
1313 b savelock0 ; Try again...
1315 savelock1: li r8,lgKillResv ; Point to killing field
1316 stwcx. r8,0,r8 ; Kill reservation
1317 b savelock ; Start over....
1321 * ***********************
1322 * * s a v e u n l o c k *
1323 * ***********************
1326 * This is the common routine that sets the saveadjust field and unlocks the savearea
1328 * msr = interrupts and translation off
1334 lwz r6,SVfreecnt(0) ; and the number on the free list
1335 lwz r5,SVinuse(0) ; Pick up the in use count
1336 subic. r8,r6,FreeListMin ; do we have at least the minimum?
1337 lwz r2,SVtarget(0) ; Get the target
1338 neg r8,r8 ; assuming we are short, get r8 <- shortfall
1339 blt-- saveunlock1 ; skip if fewer than minimum on free list
1341 add r6,r6,r5 ; Get the total number of saveareas
1342 addi r5,r2,-SaveLowHysteresis ; Find low end of acceptible range
1343 sub r5,r6,r5 ; Make everything below hysteresis negative
1344 sub r2,r2,r6 ; Get the distance from the target
1345 addi r5,r5,-(SaveLowHysteresis + SaveHighHysteresis + 1) ; Subtract full hysteresis range
1346 srawi r5,r5,31 ; Get 0xFFFFFFFF if outside range or 0 if inside
1347 and r8,r2,r5 ; r8 <- 0 if in range or distance to target if not
1350 li r5,0 ; Set a clear value
1351 stw r8,SVadjust(0) ; Set the adjustment value
1352 eieio ; Make sure everything is done
1353 stw r5,SVlock(0) ; Unlock the savearea chain
1358 * *******************
1359 * * s a v e _ c p v *
1360 * *******************
1362 * struct savearea *save_cpv(addr64_t saveAreaPhysAddr);
1364 * Converts a physical savearea address to virtual. Called with translation on
1365 * and in 32-bit mode. Note that the argument is passed as a long long in (r3,r4).
1369 .globl EXT(save_cpv)
1372 mflr r9 ; save return address
1373 mr r8,r3 ; save upper half of phys address here
1374 bl saveSetup ; turn off translation and interrupts, turn SF on
1375 rlwinm r5,r4,0,0,19 ; Round back to the start of the physical savearea block
1376 bf-- pf64Bitb,save_cpv1 ; skip if 32-bit processor
1377 rldimi r5,r8,32,0 ; r5 <- 64-bit phys address of block
1379 lwz r6,SACvrswap+4(r5) ; Get the conversion to virtual (only need low half if 64-bit)
1380 mtlr r9 ; restore return address
1381 xor r3,r4,r6 ; convert phys to virtual
1382 rlwinm r3,r3,0,0,31 ; if 64-bit, zero upper half of virtual address
1383 b saveRestore ; turn translation etc back on, SF off, and return r3
1387 * *********************
1388 * * s a v e S e t u p *
1389 * *********************
1391 * This routine is called at the start of all the save-area subroutines.
1392 * It turns off translation, disabled interrupts, turns on 64-bit mode,
1393 * and sets up cr6 with the feature flags (especially pf64Bit).
1395 * Note that most save-area routines cannot take _any_ interrupt (such as a
1396 * PTE miss) once the savearea anchor is locked, since that would result in
1397 * instant deadlock as we need a save-area to process any exception.
1399 * r10 = per-proc ptr
1401 * cr5 = pfNoMSRir feature flag
1402 * cr6 = pf64Bit feature flag
1404 * We use r0, r3, r10, and r11.
1409 mfsprg r3,2 ; get feature flags
1411 mtcrf 0x2,r3 ; copy pf64Bit to cr6
1412 ori r0,r0,lo16(MASK(MSR_IR)+MASK(MSR_DR)+MASK(MSR_EE))
1413 mtcrf 0x4,r3 ; copy pfNoMSRir to cr5
1414 andc r3,r11,r0 ; turn off IR, DR, and EE
1415 li r0,1 ; get a 1 in case its a 64-bit machine
1416 bf-- pf64Bitb,saveSetup1 ; skip if not a 64-bit machine
1417 rldimi r3,r0,63,MSR_SF_BIT ; turn SF (bit 0) on
1418 mtmsrd r3 ; turn translation and interrupts off, 64-bit mode on
1419 isync ; wait for it to happen
1420 mfsprg r10,0 ; get per-proc ptr
1422 saveSetup1: ; here on 32-bit machines
1423 bt- pfNoMSRirb,saveSetup2 ; skip if cannot turn off IR with a mtmsr
1424 mtmsr r3 ; turn translation and interrupts off
1425 isync ; wait for it to happen
1426 mfsprg r10,0 ; get per-proc ptr
1428 saveSetup2: ; here if pfNoMSRir set for this machine
1429 li r0,loadMSR ; we will "mtmsr r3" via system call
1431 mfsprg r10,0 ; get per-proc ptr
1436 * *************************
1437 * * s a v e R e s t o r e *
1438 * *************************
1440 * Undoes the effect of calling "saveSetup", ie it turns relocation and interrupts back on,
1441 * and turns 64-bit mode back off.
1443 * cr6 = pf64Bit feature flag
1447 bt++ pf64Bitb,saveRestore64 ; handle a 64-bit processor
1449 mtmsr r11 ; restore MSR
1450 isync ; wait for translation to start up
1452 saveRestore64: ; 64-bit processor
1453 mtmsrd r11 ; restore MSR
1454 isync ; wait for changes to happen