]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/savearea_asm.s
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / ppc / savearea_asm.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #define FPVECDBG 0
24
25 #include <assym.s>
26 #include <debug.h>
27 #include <cpus.h>
28 #include <db_machine_commands.h>
29 #include <mach_rt.h>
30
31 #include <mach_debug.h>
32 #include <ppc/asm.h>
33 #include <ppc/proc_reg.h>
34 #include <ppc/exception.h>
35 #include <ppc/Performance.h>
36 #include <ppc/exception.h>
37 #include <ppc/savearea.h>
38 #include <mach/ppc/vm_param.h>
39
40 .text
41
42 /* Register usage conventions in this code:
43 * r9 = return address
44 * r10 = per-proc ptr
45 * r11 = MSR at entry
46 * cr6 = feature flags (ie, pf64Bit)
47 *
48 * Because much of this code deals with physical addresses,
49 * there are parallel paths for 32- and 64-bit machines.
50 */
51
52
53 /*
54 * ***********************
55 * * s a v e _ q u e u e *
56 * ***********************
57 *
58 * void save_queue(ppnum_t pagenum);
59 *
60 * This routine will add a savearea block to the free list.
61 * We also queue the block to the free pool list. This is a
62 * circular double linked list. Because this block has no free entries,
63 * it gets queued to the end of the list
64 */
65 .align 5
66 .globl EXT(save_queue)
67
68 LEXT(save_queue)
69 mflr r9 ; get return address
70 mr r8,r3 ; move pagenum out of the way
71 bl saveSetup ; turn translation off, 64-bit on, load many regs
72 bf-- pf64Bitb,saveQueue32 ; skip if 32-bit processor
73
74 sldi r2,r8,12 ; r2 <-- phys address of page
75 li r8,sac_cnt ; Get the number of saveareas per page
76 mr r4,r2 ; Point to start of chain
77 li r0,SAVempty ; Get empty marker
78
79 saveQueue64a:
80 addic. r8,r8,-1 ; Keep track of how many we did
81 stb r0,SAVflags+2(r4) ; Set empty
82 addi r7,r4,SAVsize ; Point to the next slot
83 ble- saveQueue64b ; We are done with the chain
84 std r7,SAVprev(r4) ; Set this chain
85 mr r4,r7 ; Step to the next
86 b saveQueue64a ; Fill the whole block...
87
88 saveQueue64b:
89 bl savelock ; Go lock the save anchor
90
91 ld r7,SVfree(0) ; Get the free save area list anchor
92 lwz r6,SVfreecnt(0) ; Get the number of free saveareas
93
94 std r2,SVfree(0) ; Queue in the new one
95 addi r6,r6,sac_cnt ; Count the ones we are linking in
96 std r7,SAVprev(r4) ; Queue the old first one off of us
97 stw r6,SVfreecnt(0) ; Save the new count
98 b saveQueueExit
99
100 ; Handle 32-bit processor.
101
102 saveQueue32:
103 slwi r2,r8,12 ; r2 <-- phys address of page
104 li r8,sac_cnt ; Get the number of saveareas per page
105 mr r4,r2 ; Point to start of chain
106 li r0,SAVempty ; Get empty marker
107
108 saveQueue32a:
109 addic. r8,r8,-1 ; Keep track of how many we did
110 stb r0,SAVflags+2(r4) ; Set empty
111 addi r7,r4,SAVsize ; Point to the next slot
112 ble- saveQueue32b ; We are done with the chain
113 stw r7,SAVprev+4(r4) ; Set this chain
114 mr r4,r7 ; Step to the next
115 b saveQueue32a ; Fill the whole block...
116
117 saveQueue32b:
118 bl savelock ; Go lock the save anchor
119
120 lwz r7,SVfree+4(0) ; Get the free save area list anchor
121 lwz r6,SVfreecnt(0) ; Get the number of free saveareas
122
123 stw r2,SVfree+4(0) ; Queue in the new one
124 addi r6,r6,sac_cnt ; Count the ones we are linking in
125 stw r7,SAVprev+4(r4) ; Queue the old first one off of us
126 stw r6,SVfreecnt(0) ; Save the new count
127
128 saveQueueExit: ; join here from 64-bit path
129 bl saveunlock ; Unlock the list and set the adjust count
130 mtlr r9 ; Restore the return
131
132 #if FPVECDBG
133 mfsprg r2,1 ; (TEST/DEBUG)
134 mr. r2,r2 ; (TEST/DEBUG)
135 beq-- saveRestore ; (TEST/DEBUG)
136 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
137 li r2,0x2201 ; (TEST/DEBUG)
138 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
139 sc ; (TEST/DEBUG)
140 #endif
141 b saveRestore ; Restore interrupts and translation
142
143 /*
144 * *****************************
145 * * s a v e _ g e t _ i n i t *
146 * *****************************
147 *
148 * addr64_t save_get_init(void);
149 *
150 * Note that save_get_init is used in initial processor startup only. It
151 * is used because translation is on, but no tables exist yet and we have
152 * no V=R BAT registers that cover the entire physical memory.
153 */
154 .align 5
155 .globl EXT(save_get_init)
156
157 LEXT(save_get_init)
158 mflr r9 ; get return address
159 bl saveSetup ; turn translation off, 64-bit on, load many regs
160 bfl-- pf64Bitb,saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC)
161 btl++ pf64Bitb,saveGet64 ; get one on a 64-bit machine
162 bl saveRestore ; restore translation etc
163 mtlr r9
164
165 ; unpack the physaddr in r3 into a long long in (r3,r4)
166
167 mr r4,r3 ; copy low word of phys address to r4
168 li r3,0 ; assume upper word was 0
169 bflr-- pf64Bitb ; if 32-bit processor, return
170 srdi r3,r4,32 ; unpack reg64_t to addr64_t on 64-bit machine
171 rlwinm r4,r4,0,0,31
172 blr
173
174
175 /*
176 * *******************
177 * * s a v e _ g e t *
178 * *******************
179 *
180 * savearea *save_get(void);
181 *
182 * Allocate a savearea, returning a virtual address. NOTE: we must preserve
183 * r0, r2, and r12. Our callers in cswtch.s depend on this.
184 */
185 .align 5
186 .globl EXT(save_get)
187
188 LEXT(save_get)
189 mflr r9 ; get return address
190 mr r5,r0 ; copy regs before saveSetup nails them
191 bl saveSetup ; turn translation off, 64-bit on, load many regs
192 bf-- pf64Bitb,svgt1 ; skip if 32-bit processor
193
194 std r5,tempr0(r10) ; save r0 in per-proc across call to saveGet64
195 std r2,tempr2(r10) ; and r2
196 std r12,tempr4(r10) ; and r12
197 bl saveGet64 ; get r3 <- savearea, r5 <- page address (with SAC)
198 ld r0,tempr0(r10) ; restore callers regs
199 ld r2,tempr2(r10)
200 ld r12,tempr4(r10)
201 b svgt2
202
203 svgt1: ; handle 32-bit processor
204 stw r5,tempr0+4(r10) ; save r0 in per-proc across call to saveGet32
205 stw r2,tempr2+4(r10) ; and r2
206 stw r12,tempr4+4(r10) ; and r12
207 bl saveGet32 ; get r3 <- savearea, r5 <- page address (with SAC)
208 lwz r0,tempr0+4(r10) ; restore callers regs
209 lwz r2,tempr2+4(r10)
210 lwz r12,tempr4+4(r10)
211
212 svgt2:
213 lwz r5,SACvrswap+4(r5) ; Get the virtual to real translation (only need low word)
214 mtlr r9 ; restore return address
215 xor r3,r3,r5 ; convert physaddr to virtual
216 rlwinm r3,r3,0,0,31 ; 0 upper word if a 64-bit machine
217
218 #if FPVECDBG
219 mr r6,r0 ; (TEST/DEBUG)
220 mr r7,r2 ; (TEST/DEBUG)
221 mfsprg r2,1 ; (TEST/DEBUG)
222 mr. r2,r2 ; (TEST/DEBUG)
223 beq-- svgDBBypass ; (TEST/DEBUG)
224 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
225 li r2,0x2203 ; (TEST/DEBUG)
226 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
227 sc ; (TEST/DEBUG)
228 svgDBBypass: ; (TEST/DEBUG)
229 mr r0,r6 ; (TEST/DEBUG)
230 mr r2,r7 ; (TEST/DEBUG)
231 #endif
232 b saveRestore ; restore MSR and return to our caller
233
234
235 /*
236 * ***********************************
237 * * s a v e _ g e t _ p h y s _ 3 2 *
238 * ***********************************
239 *
240 * reg64_t save_get_phys(void);
241 *
242 * This is the entry normally called from lowmem_vectors.s with
243 * translation and interrupts already off.
244 * MUST NOT TOUCH CR7
245 */
246 .align 5
247 .globl EXT(save_get_phys_32)
248
249 LEXT(save_get_phys_32)
250 mfsprg r10,0 ; get the per-proc ptr
251 b saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC)
252
253
254 /*
255 * ***********************************
256 * * s a v e _ g e t _ p h y s _ 6 4 *
257 * ***********************************
258 *
259 * reg64_t save_get_phys_64(void);
260 *
261 * This is the entry normally called from lowmem_vectors.s with
262 * translation and interrupts already off, and in 64-bit mode.
263 * MUST NOT TOUCH CR7
264 */
265 .align 5
266 .globl EXT(save_get_phys_64)
267
268 LEXT(save_get_phys_64)
269 mfsprg r10,0 ; get the per-proc ptr
270 b saveGet64 ; Get r3 <- savearea, r5 <- page address (with SAC)
271
272
273 /*
274 * *********************
275 * * s a v e G e t 6 4 *
276 * *********************
277 *
278 * This is the internal routine to allocate a savearea on a 64-bit processor.
279 * Note that we must not take any exceptions of any kind, including PTE misses, as that
280 * would deadlock trying to reenter this routine. We pass back the 64-bit physical address.
281 * First we try the local list. If that is below a threshold, we try the global free list,
282 * which requires taking a lock, and replenish. If there are no saveareas in either list,
283 * we will install the backpocket and choke. This routine assumes that the caller has
284 * turned translation off, masked interrupts, turned on 64-bit mode, and set up:
285 * r10 = per-proc ptr
286 *
287 * We return:
288 * r3 = 64-bit physical address of the savearea
289 * r5 = 64-bit physical address of the page the savearea is in, with SAC
290 *
291 * We destroy:
292 * r2-r8.
293 *
294 * MUST NOT TOUCH CR7
295 */
296
297 saveGet64:
298 lwz r8,lclfreecnt(r10) ; Get the count
299 ld r3,lclfree(r10) ; Get the start of local savearea list
300 cmplwi r8,LocalSaveMin ; Are we too low?
301 ble-- saveGet64GetGlobal ; We are too low and need to grow list...
302
303 ; Get it from the per-processor local list.
304
305 saveGet64GetLocal:
306 li r2,0x5555 ; get r2 <-- 0x55555555 55555555, our bugbug constant
307 ld r4,SAVprev(r3) ; Chain to the next one
308 oris r2,r2,0x5555
309 subi r8,r8,1 ; Back down count
310 rldimi r2,r2,32,0
311
312 std r2,SAVprev(r3) ; bug next ptr
313 stw r2,SAVlevel(r3) ; bug context ID
314 li r6,0
315 std r4,lclfree(r10) ; Unchain first savearea
316 stw r2,SAVact(r3) ; bug activation ptr
317 rldicr r5,r3,0,51 ; r5 <-- page ptr, where SAC is kept
318 stw r8,lclfreecnt(r10) ; Set new count
319 stw r6,SAVflags(r3) ; clear the flags
320
321 blr
322
323 ; Local list was low so replenish from global list.
324 ; r7 = return address to caller of saveGet64
325 ; r8 = lclfreecnt
326 ; r10 = per-proc ptr
327
328 saveGet64GetGlobal:
329 mflr r7 ; save return adress
330 subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target
331 bl savelock ; Go lock up the anchor
332
333 lwz r2,SVfreecnt(0) ; Get the number on this list
334 ld r8,SVfree(0) ; Get the head of the save area list
335
336 sub r3,r2,r5 ; Get number left after we swipe enough for local list
337 sradi r3,r3,63 ; Get 0 if enough or -1 if not
338 andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise
339 and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise
340 or. r5,r4,r5 ; r5 <- number we will move from global to local list
341 beq-- saveGet64NoFree ; There are none to get...
342
343 mtctr r5 ; Get loop count
344 mr r6,r8 ; Remember the first in the list
345
346 saveGet64c:
347 bdz saveGet64d ; Count down and branch when we hit 0...
348 ld r8,SAVprev(r8) ; Get the next
349 b saveGet64c ; Keep going...
350
351 saveGet64d:
352 ld r3,SAVprev(r8) ; Get the next one
353 lwz r4,SVinuse(0) ; Get the in use count
354 sub r2,r2,r5 ; Count down what we stole
355 std r3,SVfree(0) ; Set the new first in list
356 add r4,r4,r5 ; Count the ones we just put in the local list as "in use"
357 stw r2,SVfreecnt(0) ; Set the new count
358 stw r4,SVinuse(0) ; Set the new in use count
359
360 ld r4,lclfree(r10) ; Get the old head of list
361 lwz r3,lclfreecnt(r10) ; Get the old count
362 std r6,lclfree(r10) ; Set the new head of the list
363 add r3,r3,r5 ; Get the new count
364 std r4,SAVprev(r8) ; Point to the old head
365 stw r3,lclfreecnt(r10) ; Set the new count
366
367 bl saveunlock ; Update the adjust field and unlock
368 mtlr r7 ; restore return address
369 b saveGet64 ; Start over and finally allocate the savearea...
370
371 ; The local list is below the repopulate threshold and the global list is empty.
372 ; First we check if there are any left in the local list and if so, we allow
373 ; them to be allocated. If not, we release the backpocket list and choke.
374 ; There is nothing more that we can do at this point. Hopefully we stay alive
375 ; long enough to grab some much-needed panic information.
376 ; r7 = return address to caller of saveGet64
377 ; r10 = per-proc ptr
378
379 saveGet64NoFree:
380 lwz r8,lclfreecnt(r10) ; Get the count
381 mr. r8,r8 ; Are there any reserve to get?
382 beq-- saveGet64Choke ; No, go choke and die...
383 bl saveunlock ; Update the adjust field and unlock
384 ld r3,lclfree(r10) ; Get the start of local savearea list
385 lwz r8,lclfreecnt(r10) ; Get the count
386 mtlr r7 ; restore return address
387 b saveGet64GetLocal ; We have some left, dip on in...
388
389 ; We who are about to die salute you. The savearea chain is messed up or
390 ; empty. Add in a few so we have enough to take down the system.
391
392 saveGet64Choke:
393 lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket
394 ori r9,r9,lo16(EXT(backpocket)) ; and low part
395
396 lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements
397 ld r7,SVfree-saveanchor(r9) ; Get the head of the chain
398 lwz r6,SVinuse(0) ; Get total in the old list
399
400 stw r8,SVfreecnt(0) ; Set the new number of free elements
401 add r6,r6,r8 ; Add in the new ones
402 std r7,SVfree(0) ; Set the new head of the chain
403 stw r6,SVinuse(0) ; Set total in the new list
404
405 saveGetChokeJoin: ; join in the fun from 32-bit mode
406 lis r0,hi16(Choke) ; Set choke firmware call
407 li r7,0 ; Get a clear register to unlock
408 ori r0,r0,lo16(Choke) ; Set the rest of the choke call
409 li r3,failNoSavearea ; Set failure code
410
411 eieio ; Make sure all is committed
412 stw r7,SVlock(0) ; Unlock the free list
413 sc ; System ABEND
414
415
416 /*
417 * *********************
418 * * s a v e G e t 3 2 *
419 * *********************
420 *
421 * This is the internal routine to allocate a savearea on a 32-bit processor.
422 * Note that we must not take any exceptions of any kind, including PTE misses, as that
423 * would deadlock trying to reenter this routine. We pass back the 32-bit physical address.
424 * First we try the local list. If that is below a threshold, we try the global free list,
425 * which requires taking a lock, and replenish. If there are no saveareas in either list,
426 * we will install the backpocket and choke. This routine assumes that the caller has
427 * turned translation off, masked interrupts, and set up:
428 * r10 = per-proc ptr
429 *
430 * We return:
431 * r3 = 32-bit physical address of the savearea
432 * r5 = 32-bit physical address of the page the savearea is in, with SAC
433 *
434 * We destroy:
435 * r2-r8.
436 */
437
438 saveGet32:
439 lwz r8,lclfreecnt(r10) ; Get the count
440 lwz r3,lclfree+4(r10) ; Get the start of local savearea list
441 cmplwi r8,LocalSaveMin ; Are we too low?
442 ble- saveGet32GetGlobal ; We are too low and need to grow list...
443
444 ; Get savearea from per-processor local list.
445
446 saveGet32GetLocal:
447 li r2,0x5555 ; get r2 <-- 0x55555555, our bugbug constant
448 lwz r4,SAVprev+4(r3) ; Chain to the next one
449 oris r2,r2,0x5555
450 subi r8,r8,1 ; Back down count
451
452 stw r2,SAVprev+4(r3) ; bug next ptr
453 stw r2,SAVlevel(r3) ; bug context ID
454 li r6,0
455 stw r4,lclfree+4(r10) ; Unchain first savearea
456 stw r2,SAVact(r3) ; bug activation ptr
457 rlwinm r5,r3,0,0,19 ; r5 <-- page ptr, where SAC is kept
458 stw r8,lclfreecnt(r10) ; Set new count
459 stw r6,SAVflags(r3) ; clear the flags
460
461 blr
462
463 ; Local list was low so replenish from global list.
464 ; r7 = return address to caller of saveGet32
465 ; r8 = lclfreecnt
466 ; r10 = per-proc ptr
467
468 saveGet32GetGlobal:
469 mflr r7 ; save return adress
470 subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target
471 bl savelock ; Go lock up the anchor
472
473 lwz r2,SVfreecnt(0) ; Get the number on this list
474 lwz r8,SVfree+4(0) ; Get the head of the save area list
475
476 sub r3,r2,r5 ; Get number left after we swipe enough for local list
477 srawi r3,r3,31 ; Get 0 if enough or -1 if not
478 andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise
479 and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise
480 or. r5,r4,r5 ; r5 <- number we will move from global to local list
481 beq- saveGet32NoFree ; There are none to get...
482
483 mtctr r5 ; Get loop count
484 mr r6,r8 ; Remember the first in the list
485
486 saveGet32c:
487 bdz saveGet32d ; Count down and branch when we hit 0...
488 lwz r8,SAVprev+4(r8) ; Get the next
489 b saveGet32c ; Keep going...
490
491 saveGet32d:
492 lwz r3,SAVprev+4(r8) ; Get the next one
493 lwz r4,SVinuse(0) ; Get the in use count
494 sub r2,r2,r5 ; Count down what we stole
495 stw r3,SVfree+4(0) ; Set the new first in list
496 add r4,r4,r5 ; Count the ones we just put in the local list as "in use"
497 stw r2,SVfreecnt(0) ; Set the new count
498 stw r4,SVinuse(0) ; Set the new in use count
499
500 lwz r4,lclfree+4(r10) ; Get the old head of list
501 lwz r3,lclfreecnt(r10) ; Get the old count
502 stw r6,lclfree+4(r10) ; Set the new head of the list
503 add r3,r3,r5 ; Get the new count
504 stw r4,SAVprev+4(r8) ; Point to the old head
505 stw r3,lclfreecnt(r10) ; Set the new count
506
507 bl saveunlock ; Update the adjust field and unlock
508 mtlr r7 ; restore return address
509 b saveGet32 ; Start over and finally allocate the savearea...
510
511 ; The local list is below the repopulate threshold and the global list is empty.
512 ; First we check if there are any left in the local list and if so, we allow
513 ; them to be allocated. If not, we release the backpocket list and choke.
514 ; There is nothing more that we can do at this point. Hopefully we stay alive
515 ; long enough to grab some much-needed panic information.
516 ; r7 = return address to caller of saveGet32
517 ; r10 = per-proc ptr
518
519 saveGet32NoFree:
520 lwz r8,lclfreecnt(r10) ; Get the count
521 mr. r8,r8 ; Are there any reserve to get?
522 beq- saveGet32Choke ; No, go choke and die...
523 bl saveunlock ; Update the adjust field and unlock
524 lwz r3,lclfree+4(r10) ; Get the start of local savearea list
525 lwz r8,lclfreecnt(r10) ; Get the count
526 mtlr r7 ; restore return address
527 b saveGet32GetLocal ; We have some left, dip on in...
528
529 ; We who are about to die salute you. The savearea chain is messed up or
530 ; empty. Add in a few so we have enough to take down the system.
531
532 saveGet32Choke:
533 lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket
534 ori r9,r9,lo16(EXT(backpocket)) ; and low part
535
536 lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements
537 lwz r7,SVfree+4-saveanchor(r9) ; Get the head of the chain
538 lwz r6,SVinuse(0) ; Get total in the old list
539
540 stw r8,SVfreecnt(0) ; Set the new number of free elements
541 add r6,r6,r8 ; Add in the new ones (why?)
542 stw r7,SVfree+4(0) ; Set the new head of the chain
543 stw r6,SVinuse(0) ; Set total in the new list
544
545 b saveGetChokeJoin
546
547
548 /*
549 * *******************
550 * * s a v e _ r e t *
551 * *******************
552 *
553 * void save_ret(struct savearea *); // normal call
554 * void save_ret_wMSR(struct savearea *,reg64_t); // passes MSR to restore as 2nd arg
555 *
556 * Return a savearea passed by virtual address to the free list.
557 * Note really well: we can take NO exceptions of any kind,
558 * including a PTE miss once the savearea lock is held. That's
559 * a guaranteed deadlock. That means we must disable for interrutions
560 * and turn all translation off.
561 */
562 .globl EXT(save_ret_wMSR) ; alternate entry pt w MSR to restore in r4
563
564 LEXT(save_ret_wMSR)
565 crset 31 ; set flag for save_ret_wMSR
566 b svrt1 ; join common code
567
568 .align 5
569 .globl EXT(save_ret)
570
571 LEXT(save_ret)
572 crclr 31 ; clear flag for save_ret_wMSR
573 svrt1: ; join from save_ret_wMSR
574 mflr r9 ; get return address
575 rlwinm r7,r3,0,0,19 ; get virtual address of SAC area at start of page
576 mr r8,r3 ; save virtual address
577 lwz r5,SACvrswap+0(r7) ; get 64-bit converter from V to R
578 lwz r6,SACvrswap+4(r7) ; both halves, though only bottom used on 32-bit machine
579 #if FPVECDBG
580 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
581 li r2,0x2204 ; (TEST/DEBUG)
582 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
583 sc ; (TEST/DEBUG)
584 #endif
585 bl saveSetup ; turn translation off, 64-bit on, load many regs
586 bf++ 31,svrt3 ; skip if not save_ret_wMSR
587 mr r11,r4 ; was save_ret_wMSR, so overwrite saved MSR
588 svrt3:
589 bf-- pf64Bitb,svrt4 ; skip if a 32-bit processor
590
591 ; Handle 64-bit processor.
592
593 rldimi r6,r5,32,0 ; merge upper and lower halves of SACvrswap together
594 xor r3,r8,r6 ; get r3 <- 64-bit physical address of this savearea
595 bl saveRet64 ; return it
596 mtlr r9 ; restore return address
597 b saveRestore64 ; restore MSR
598
599 ; Handle 32-bit processor.
600
601 svrt4:
602 xor r3,r8,r6 ; get r3 <- 32-bit physical address of this savearea
603 bl saveRet32 ; return it
604 mtlr r9 ; restore return address
605 b saveRestore32 ; restore MSR
606
607
608 /*
609 * *****************************
610 * * s a v e _ r e t _ p h y s *
611 * *****************************
612 *
613 * void save_ret_phys(reg64_t);
614 *
615 * Called from lowmem vectors to return (ie, free) a savearea by physical address.
616 * Translation and interrupts are already off, and 64-bit mode is set if defined.
617 * We can take _no_ exceptions of any kind in this code, including PTE miss, since
618 * that would result in a deadlock. We expect:
619 * r3 = phys addr of savearea
620 * msr = IR, DR, and EE off, SF on
621 * cr6 = pf64Bit flag
622 * We destroy:
623 * r0,r2-r10.
624 */
625 .align 5
626 .globl EXT(save_ret_phys)
627
628 LEXT(save_ret_phys)
629 mfsprg r10,0 ; get the per-proc ptr
630 bf-- pf64Bitb,saveRet32 ; handle 32-bit machine
631 b saveRet64 ; handle 64-bit machine
632
633
634 /*
635 * *********************
636 * * s a v e R e t 6 4 *
637 * *********************
638 *
639 * This is the internal routine to free a savearea, passed by 64-bit physical
640 * address. We assume that IR, DR, and EE are all off, that SF is on, and:
641 * r3 = phys address of the savearea
642 * r10 = per-proc ptr
643 * We destroy:
644 * r0,r2-r8.
645 */
646 .align 5
647 saveRet64:
648 li r0,SAVempty ; Get marker for free savearea
649 lwz r7,lclfreecnt(r10) ; Get the local count
650 ld r6,lclfree(r10) ; Get the old local header
651 addi r7,r7,1 ; Pop up the free count
652 std r6,SAVprev(r3) ; Plant free chain pointer
653 cmplwi r7,LocalSaveMax ; Has the list gotten too long?
654 stb r0,SAVflags+2(r3) ; Mark savearea free
655 std r3,lclfree(r10) ; Chain us on in
656 stw r7,lclfreecnt(r10) ; Bump up the count
657 bltlr++ ; List not too long, so done
658
659 /* The local savearea chain has gotten too long. Trim it down to the target.
660 * Here's a tricky bit, and important:
661 *
662 * When we trim the list, we NEVER trim the very first one. This is because that is
663 * the very last one released and the exception exit code will release the savearea
664 * BEFORE it is done using it. Wouldn't be too good if another processor started
665 * using it, eh? So for this case, we are safe so long as the savearea stays on
666 * the local list. (Note: the exit routine needs to do this because it is in the
667 * process of restoring all context and it needs to keep it until the last second.)
668 */
669
670 mflr r0 ; save return to caller of saveRet64
671 mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed
672 ld r3,SAVprev(r3) ; Skip over the first
673 subi r7,r7,LocalSaveTarget ; Figure out how much to trim
674 mr r6,r3 ; r6 <- first one to trim
675 mr r5,r7 ; Save the number we are trimming
676
677 saveRet64a:
678 addic. r7,r7,-1 ; Any left to do?
679 ble-- saveRet64b ; Nope...
680 ld r3,SAVprev(r3) ; Skip to the next one
681 b saveRet64a ; Keep going...
682
683 saveRet64b: ; r3 <- last one to trim
684 ld r7,SAVprev(r3) ; Point to the first one not to trim
685 li r4,LocalSaveTarget ; Set the target count
686 std r7,SAVprev(r2) ; Trim stuff leaving the one just released as first
687 stw r4,lclfreecnt(r10) ; Set the current count
688
689 bl savelock ; Lock up the anchor
690
691 ld r8,SVfree(0) ; Get the old head of the free list
692 lwz r4,SVfreecnt(0) ; Get the number of free ones
693 lwz r7,SVinuse(0) ; Get the number that are in use
694 std r6,SVfree(0) ; Point to the first trimmed savearea
695 add r4,r4,r5 ; Add number trimmed to free count
696 std r8,SAVprev(r3) ; Chain the old head to the tail of the trimmed guys
697 sub r7,r7,r5 ; Remove the trims from the in use count
698 stw r4,SVfreecnt(0) ; Set new free count
699 stw r7,SVinuse(0) ; Set new in use count
700
701 mtlr r0 ; Restore the return to our caller
702 b saveunlock ; Set adjust count, unlock the saveanchor, and return
703
704
705 /*
706 * *********************
707 * * s a v e R e t 3 2 *
708 * *********************
709 *
710 * This is the internal routine to free a savearea, passed by 32-bit physical
711 * address. We assume that IR, DR, and EE are all off, and:
712 * r3 = phys address of the savearea
713 * r10 = per-proc ptr
714 * We destroy:
715 * r0,r2-r8.
716 */
717 .align 5
718 saveRet32:
719 li r0,SAVempty ; Get marker for free savearea
720 lwz r7,lclfreecnt(r10) ; Get the local count
721 lwz r6,lclfree+4(r10) ; Get the old local header
722 addi r7,r7,1 ; Pop up the free count
723 stw r6,SAVprev+4(r3) ; Plant free chain pointer
724 cmplwi r7,LocalSaveMax ; Has the list gotten too long?
725 stb r0,SAVflags+2(r3) ; Mark savearea free
726 stw r3,lclfree+4(r10) ; Chain us on in
727 stw r7,lclfreecnt(r10) ; Bump up the count
728 bltlr+ ; List not too long, so done
729
730 /* The local savearea chain has gotten too long. Trim it down to the target.
731 * Here's a tricky bit, and important:
732 *
733 * When we trim the list, we NEVER trim the very first one. This is because that is
734 * the very last one released and the exception exit code will release the savearea
735 * BEFORE it is done using it. Wouldn't be too good if another processor started
736 * using it, eh? So for this case, we are safe so long as the savearea stays on
737 * the local list. (Note: the exit routine needs to do this because it is in the
738 * process of restoring all context and it needs to keep it until the last second.)
739 */
740
741 mflr r0 ; save return to caller of saveRet32
742 mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed
743 lwz r3,SAVprev+4(r3) ; Skip over the first
744 subi r7,r7,LocalSaveTarget ; Figure out how much to trim
745 mr r6,r3 ; r6 <- first one to trim
746 mr r5,r7 ; Save the number we are trimming
747
748 saveRet32a:
749 addic. r7,r7,-1 ; Any left to do?
750 ble- saveRet32b ; Nope...
751 lwz r3,SAVprev+4(r3) ; Skip to the next one
752 b saveRet32a ; Keep going...
753
754 saveRet32b: ; r3 <- last one to trim
755 lwz r7,SAVprev+4(r3) ; Point to the first one not to trim
756 li r4,LocalSaveTarget ; Set the target count
757 stw r7,SAVprev+4(r2) ; Trim stuff leaving the one just released as first
758 stw r4,lclfreecnt(r10) ; Set the current count
759
760 bl savelock ; Lock up the anchor
761
762 lwz r8,SVfree+4(0) ; Get the old head of the free list
763 lwz r4,SVfreecnt(0) ; Get the number of free ones
764 lwz r7,SVinuse(0) ; Get the number that are in use
765 stw r6,SVfree+4(0) ; Point to the first trimmed savearea
766 add r4,r4,r5 ; Add number trimmed to free count
767 stw r8,SAVprev+4(r3) ; Chain the old head to the tail of the trimmed guys
768 sub r7,r7,r5 ; Remove the trims from the in use count
769 stw r4,SVfreecnt(0) ; Set new free count
770 stw r7,SVinuse(0) ; Set new in use count
771
772 mtlr r0 ; Restore the return to our caller
773 b saveunlock ; Set adjust count, unlock the saveanchor, and return
774
775
776 /*
777 * *******************************
778 * * s a v e _ t r i m _ f r e e *
779 * *******************************
780 *
781 * struct savearea_comm *save_trim_free(void);
782 *
783 * Trim the free list down to the target count, ie by -(SVadjust) save areas.
784 * It trims the list and, if a pool page was fully allocated, puts that page on
785 * the start of the pool list.
786 *
787 * If the savearea being released is the last on a pool page (i.e., all entries
788 * are released), the page is dequeued from the pool and queued to any other
789 * found during this scan. Note that this queue is maintained virtually.
790 *
791 * When the scan is done, the saveanchor lock is released and the list of
792 * freed pool pages is returned to our caller.
793 *
794 * For latency sake we may want to revisit this code. If we are trimming a
795 * large number of saveareas, we could be disabled and holding the savearea lock
796 * for quite a while. It may be that we want to break the trim down into parts.
797 * Possibly trimming the free list, then individually pushing them into the free pool.
798 *
799 * This function expects to be called with translation on and a valid stack.
800 * It uses the standard ABI, ie we destroy r2 and r3-r11, and return the ptr in r3.
801 */
802 .align 5
803 .globl EXT(save_trim_free)
804
805 LEXT(save_trim_free)
806
807 subi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Make space for 4 registers on stack
808 mflr r9 ; save our return address
809 stw r28,FM_SIZE+0(r1) ; Save R28
810 stw r29,FM_SIZE+4(r1) ; Save R29
811 stw r30,FM_SIZE+8(r1) ; Save R30
812 stw r31,FM_SIZE+12(r1) ; Save R31
813
814 bl saveSetup ; turn off translation and interrupts, load many regs
815 bl savelock ; Go lock up the anchor
816
817 lwz r8,SVadjust(0) ; How many do we need to clear out?
818 li r3,0 ; Get a 0
819 neg. r8,r8 ; Get the actual we need to toss (adjust is neg if too many)
820 ble- save_trim_free1 ; skip if no trimming needed anymore
821 bf-- pf64Bitb,saveTrim32 ; handle 32-bit processors
822 b saveTrim64 ; handle 64-bit processors
823
824 save_trim_free1: ; by the time we were called, no need to trim anymore
825 stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed)
826 mtlr r9 ; Restore return
827
828 #if FPVECDBG
829 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
830 li r2,0x2206 ; (TEST/DEBUG)
831 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
832 sc ; (TEST/DEBUG)
833 #endif
834 addi r1,r1,(FM_ALIGN(16)+FM_SIZE); Pop stack - have not trashed register so no need to reload
835 b saveRestore ; restore translation and EE, turn SF off, return to our caller
836
837
838 /*
839 * ***********************
840 * * s a v e T r i m 3 2 *
841 * ***********************
842 *
843 * Handle "save_trim_free" on 32-bit processors. At this point, translation and interrupts
844 * are off, the savearea anchor is locked, and:
845 * r8 = #pages to trim (>0)
846 * r9 = return address
847 * r10 = per-proc ptr
848 * r11 = MSR at entry
849 */
850
851 saveTrim32:
852 lwz r7,SVfree+4(0) ; Get the first on the free list
853 mr r6,r7 ; Save the first one
854 mr r5,r8 ; Save the number we are trimming
855
856 sttrimming: addic. r5,r5,-1 ; Any left to do?
857 ble- sttrimmed ; Nope...
858 lwz r7,SAVprev+4(r7) ; Skip to the next one
859 b sttrimming ; Keep going...
860
861 sttrimmed: lwz r5,SAVprev+4(r7) ; Get the next one (for new head of free list)
862 lwz r4,SVfreecnt(0) ; Get the free count
863 stw r5,SVfree+4(0) ; Set new head
864 sub r4,r4,r8 ; Calculate the new free count
865 li r31,0 ; Show we have no free pool blocks yet
866 crclr cr1_eq ; dont exit loop before 1st iteration
867 stw r4,SVfreecnt(0) ; Set new free count
868 lis r30,hi16(sac_empty) ; Get what empty looks like
869
870 ; NOTE: The savearea size must be 640 (0x280). We are doing a divide by shifts and stuff
871 ; here.
872 ;
873 #if SAVsize != 640
874 #error Savearea size is not 640!!!!!!!!!!!!
875 #endif
876
877 ; Loop over each savearea we are trimming.
878 ; r6 = next savearea to trim
879 ; r7 = last savearea to trim
880 ; r8 = #pages to trim (>0)
881 ; r9 = return address
882 ; r10 = per-proc ptr
883 ; r11 = MSR at entry
884 ; r30 = what SACalloc looks like when all saveareas are free
885 ; r31 = free pool block list
886 ; cr1 = beq set if we just trimmed the last, ie if we are done
887
888 sttoss: beq+ cr1,stdone ; All done now...
889
890 cmplw cr1,r6,r7 ; Have we finished the loop?
891
892 lis r0,0x0044 ; Get top of table
893 rlwinm r2,r6,0,0,19 ; Back down to the savearea control stuff
894 ori r0,r0,0x2200 ; Finish shift table
895 rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
896 lwz r5,SACalloc(r2) ; Get the allocation bits
897 addi r4,r4,1 ; Shift 1 extra
898 rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
899 rlwnm r0,r0,r4,29,31 ; Get partial index
900 lis r4,lo16(0x8000) ; Get the bit mask
901 add r0,r0,r3 ; Make the real index
902 srw r4,r4,r0 ; Get the allocation mask
903 or r5,r5,r4 ; Free this entry
904 cmplw r5,r4 ; Is this the only free entry?
905 lwz r6,SAVprev+4(r6) ; Chain to the next trimmed savearea
906 cmplw cr7,r30,r5 ; Does this look empty?
907 stw r5,SACalloc(r2) ; Save back the allocation bits
908 beq- stputpool ; First free entry, go put it into the pool...
909 bne+ cr7,sttoss ; Not an empty block
910
911 ;
912 ; We have an empty block. Remove it from the pool list.
913 ;
914
915 lwz r29,SACflags(r2) ; Get the flags
916 cmplwi cr5,r31,0 ; Is this guy on the release list?
917 lwz r28,SACnext+4(r2) ; Get the forward chain
918
919 rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
920 bne- sttoss ; This is permanent entry, do not try to release...
921
922 lwz r29,SACprev+4(r2) ; and the previous
923 beq- cr5,stnot1st ; Not first
924 lwz r0,SACvrswap+4(r31) ; Load the previous pool page vr conversion
925
926 stnot1st: stw r28,SACnext+4(r29) ; Previous guy points to my next
927 xor r0,r0,r31 ; Make the last guy virtual
928 stw r29,SACprev+4(r28) ; Next guy points back to my previous
929 stw r0,SAVprev+4(r2) ; Store the old top virtual as my back chain
930 mr r31,r2 ; My physical is now the head of the chain
931 b sttoss ; Get the next one...
932
933 ;
934 ; A pool block that had no free entries now has one. Stick it on the pool list.
935 ;
936
937 stputpool: lwz r28,SVpoolfwd+4(0) ; Get the first guy on the list
938 li r0,saveanchor ; Point to the saveanchor
939 stw r2,SVpoolfwd+4(0) ; Put us on the top of the list
940 stw r28,SACnext+4(r2) ; We point to the old top
941 stw r2,SACprev+4(r28) ; Old top guy points back to us
942 stw r0,SACprev+4(r2) ; Our back points to the anchor
943 b sttoss ; Go on to the next one...
944
945
946 /*
947 * ***********************
948 * * s a v e T r i m 6 4 *
949 * ***********************
950 *
951 * Handle "save_trim_free" on 64-bit processors. At this point, translation and interrupts
952 * are off, SF is on, the savearea anchor is locked, and:
953 * r8 = #pages to trim (>0)
954 * r9 = return address
955 * r10 = per-proc ptr
956 * r11 = MSR at entry
957 */
958
959 saveTrim64:
960 ld r7,SVfree(0) ; Get the first on the free list
961 mr r6,r7 ; Save the first one
962 mr r5,r8 ; Save the number we are trimming
963
964 sttrimming64:
965 addic. r5,r5,-1 ; Any left to do?
966 ble-- sttrimmed64 ; Nope...
967 ld r7,SAVprev(r7) ; Skip to the next one
968 b sttrimming64 ; Keep going...
969
970 sttrimmed64:
971 ld r5,SAVprev(r7) ; Get the next one (for new head of free list)
972 lwz r4,SVfreecnt(0) ; Get the free count
973 std r5,SVfree(0) ; Set new head
974 sub r4,r4,r8 ; Calculate the new free count
975 li r31,0 ; Show we have no free pool blocks yet
976 crclr cr1_eq ; dont exit loop before 1st iteration
977 stw r4,SVfreecnt(0) ; Set new free count
978 lis r30,hi16(sac_empty) ; Get what empty looks like
979
980
981 ; Loop over each savearea we are trimming.
982 ; r6 = next savearea to trim
983 ; r7 = last savearea to trim
984 ; r8 = #pages to trim (>0)
985 ; r9 = return address
986 ; r10 = per-proc ptr
987 ; r11 = MSR at entry
988 ; r30 = what SACalloc looks like when all saveareas are free
989 ; r31 = free pool block list
990 ; cr1 = beq set if we just trimmed the last, ie if we are done
991 ;
992 ; WARNING: as in the 32-bit path, this code is doing a divide by 640 (SAVsize).
993
994 sttoss64:
995 beq++ cr1,stdone ; All done now...
996
997 cmpld cr1,r6,r7 ; Have we finished the loop?
998
999 lis r0,0x0044 ; Get top of table
1000 rldicr r2,r6,0,51 ; r2 <- phys addr of savearea block (with control area)
1001 ori r0,r0,0x2200 ; Finish shift table
1002 rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
1003 lwz r5,SACalloc(r2) ; Get the allocation bits
1004 addi r4,r4,1 ; Shift 1 extra
1005 rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
1006 rlwnm r0,r0,r4,29,31 ; Get partial index
1007 lis r4,lo16(0x8000) ; Get the bit mask
1008 add r0,r0,r3 ; Make the real index
1009 srw r4,r4,r0 ; Get the allocation mask
1010 or r5,r5,r4 ; Free this entry
1011 cmplw r5,r4 ; Is this the only free entry?
1012 ld r6,SAVprev(r6) ; Chain to the next trimmed savearea
1013 cmplw cr7,r30,r5 ; Does this look empty?
1014 stw r5,SACalloc(r2) ; Save back the allocation bits
1015 beq-- stputpool64 ; First free entry, go put it into the pool...
1016 bne++ cr7,sttoss64 ; Not an empty block
1017
1018 ; We have an empty block. Remove it from the pool list.
1019
1020 lwz r29,SACflags(r2) ; Get the flags
1021 cmpldi cr5,r31,0 ; Is this guy on the release list?
1022 ld r28,SACnext(r2) ; Get the forward chain
1023
1024 rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
1025 bne-- sttoss64 ; This is permanent entry, do not try to release...
1026
1027 ld r29,SACprev(r2) ; and the previous
1028 beq-- cr5,stnot1st64 ; Not first
1029 ld r0,SACvrswap(r31) ; Load the previous pool page vr conversion
1030
1031 stnot1st64:
1032 std r28,SACnext(r29) ; Previous guy points to my next
1033 xor r0,r0,r31 ; Make the last guy virtual
1034 std r29,SACprev(r28) ; Next guy points back to my previous
1035 std r0,SAVprev(r2) ; Store the old top virtual as my back chain
1036 mr r31,r2 ; My physical is now the head of the chain
1037 b sttoss64 ; Get the next one...
1038
1039 ; A pool block that had no free entries now has one. Stick it on the pool list.
1040
1041 stputpool64:
1042 ld r28,SVpoolfwd(0) ; Get the first guy on the list
1043 li r0,saveanchor ; Point to the saveanchor
1044 std r2,SVpoolfwd(0) ; Put us on the top of the list
1045 std r28,SACnext(r2) ; We point to the old top
1046 std r2,SACprev(r28) ; Old top guy points back to us
1047 std r0,SACprev(r2) ; Our back points to the anchor
1048 b sttoss64 ; Go on to the next one...
1049
1050
1051 ; We are all done. Relocate pool release head, restore all, and go. This code
1052 ; is used both by the 32 and 64-bit paths.
1053 ; r9 = return address
1054 ; r10 = per-proc ptr
1055 ; r11 = MSR at entry
1056 ; r31 = free pool block list
1057
1058 stdone: bl saveunlock ; Unlock the saveanchor and set adjust field
1059
1060 mr. r3,r31 ; Move release chain and see if there are any
1061 li r5,0 ; Assume either V=R or no release chain
1062 beq- stnorel ; Nothing to release...
1063 lwz r5,SACvrswap+4(r31) ; Get the vr conversion (only need low half if 64-bit)
1064
1065 stnorel:
1066 bl saveRestore ; restore translation and exceptions, turn off SF
1067 mtlr r9 ; Restore the return
1068
1069 lwz r28,FM_SIZE+0(r1) ; Restore R28
1070 lwz r29,FM_SIZE+4(r1) ; Restore R29
1071 lwz r30,FM_SIZE+8(r1) ; Restore R30
1072 lwz r31,FM_SIZE+12(r1) ; Restore R31
1073 addi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Pop the stack
1074 xor r3,r3,r5 ; Convert release chain address to virtual
1075 rlwinm r3,r3,0,0,31 ; if 64-bit, clear upper half of virtual address
1076
1077 #if FPVECDBG
1078 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1079 li r2,0x2207 ; (TEST/DEBUG)
1080 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1081 sc ; (TEST/DEBUG)
1082 #endif
1083 blr ; Return...
1084
1085
1086 /*
1087 * ***************************
1088 * * s a v e _ r e c o v e r *
1089 * ***************************
1090 *
1091 * int save_recover(void);
1092 *
1093 * Returns nonzero if we can get enough saveareas to hit the target. We scan the free
1094 * pool. If we empty a pool block, we remove it from the pool list.
1095 */
1096
1097 .align 5
1098 .globl EXT(save_recover)
1099
1100 LEXT(save_recover)
1101 mflr r9 ; save return address
1102 bl saveSetup ; turn translation and interrupts off, SF on, load many regs
1103 bl savelock ; lock the savearea anchor
1104
1105 lwz r8,SVadjust(0) ; How many do we need to clear get?
1106 li r3,0 ; Get a 0
1107 mr. r8,r8 ; Do we need any?
1108 ble-- save_recover1 ; not any more
1109 bf-- pf64Bitb,saveRecover32 ; handle 32-bit processor
1110 b saveRecover64 ; handle 64-bit processor
1111
1112 save_recover1: ; by the time we locked the anchor, no longer short
1113 mtlr r9 ; Restore return
1114 stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed)
1115 #if FPVECDBG
1116 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1117 li r2,0x2208 ; (TEST/DEBUG)
1118 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1119 sc ; (TEST/DEBUG)
1120 #endif
1121 b saveRestore ; turn translation etc back on, return to our caller
1122
1123
1124 /*
1125 * *****************************
1126 * * s a v e R e c o v e r 3 2 *
1127 * *****************************
1128 *
1129 * Handle "save_recover" on 32-bit processors. At this point, translation and interrupts
1130 * are off, the savearea anchor is locked, and:
1131 * r8 = #pages to recover
1132 * r9 = return address
1133 * r10 = per-proc ptr
1134 * r11 = MSR at entry
1135 */
1136
1137 saveRecover32:
1138 li r6,saveanchor ; Start at pool anchor
1139 crclr cr1_eq ; initialize the loop test
1140 lwz r7,SVfreecnt(0) ; Get the current free count
1141
1142
1143 ; Loop over next block in free pool. r6 is the ptr to the last block we looked at.
1144
1145 srcnpool: lwz r6,SACnext+4(r6) ; Point to the next one
1146 cmplwi r6,saveanchor ; Have we wrapped?
1147 beq- srcdone ; Yes, did not have enough...
1148
1149 lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block
1150
1151 ;
1152 ; NOTE: The savearea size must be 640 (0x280). We are doing a multiply by shifts and add.
1153 ; offset = (index << 9) + (index << 7)
1154 ;
1155 #if SAVsize != 640
1156 #error Savearea size is not 640!!!!!!!!!!!!
1157 #endif
1158
1159 ; Loop over free savearea in current block.
1160 ; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc)
1161 ; r6 = ptr to current free pool block
1162 ; r7 = free count
1163 ; r8 = #pages more we still need to recover
1164 ; r9 = return address
1165 ; r10 = per-proc ptr
1166 ; r11 = MSR at entry
1167 ; cr1 = beq if (r8==0)
1168
1169 srcnext: beq- cr1,srcdone ; We have no more to get...
1170
1171 lis r3,0x8000 ; Get the top bit on
1172 cntlzw r4,r5 ; Find a free slot
1173 addi r7,r7,1 ; Bump up the free count
1174 srw r3,r3,r4 ; Make a mask
1175 slwi r0,r4,7 ; First multiply by 128
1176 subi r8,r8,1 ; Decrement the need count
1177 slwi r2,r4,9 ; Then multiply by 512
1178 andc. r5,r5,r3 ; Clear out the "free" bit
1179 add r2,r2,r0 ; Sum to multiply by 640
1180
1181 stw r5,SACalloc(r6) ; Set new allocation bits
1182
1183 add r2,r2,r6 ; Get the actual address of the savearea
1184 lwz r3,SVfree+4(0) ; Get the head of the chain
1185 cmplwi cr1,r8,0 ; Do we actually need any more?
1186 stw r2,SVfree+4(0) ; Push ourselves in the front
1187 stw r3,SAVprev+4(r2) ; Chain the rest of the list behind
1188
1189 bne+ srcnext ; The pool block is not empty yet, try for another...
1190
1191 lwz r2,SACnext+4(r6) ; Get the next pointer
1192 lwz r3,SACprev+4(r6) ; Get the previous pointer
1193 stw r3,SACprev+4(r2) ; The previous of my next points to my previous
1194 stw r2,SACnext+4(r3) ; The next of my previous points to my next
1195 bne+ cr1,srcnpool ; We still have more to do...
1196
1197
1198 ; Join here from 64-bit path when we have recovered all the saveareas we need to.
1199
1200 srcdone: stw r7,SVfreecnt(0) ; Set the new free count
1201 bl saveunlock ; Unlock the save and set adjust field
1202
1203 mtlr r9 ; Restore the return
1204 #if FPVECDBG
1205 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1206 li r2,0x2209 ; (TEST/DEBUG)
1207 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1208 sc ; (TEST/DEBUG)
1209 #endif
1210 b saveRestore ; turn xlate and EE back on, SF off, and return to our caller
1211
1212
1213 /*
1214 * *****************************
1215 * * s a v e R e c o v e r 6 4 *
1216 * *****************************
1217 *
1218 * Handle "save_recover" on 64-bit processors. At this point, translation and interrupts
1219 * are off, the savearea anchor is locked, and:
1220 * r8 = #pages to recover
1221 * r9 = return address
1222 * r10 = per-proc ptr
1223 * r11 = MSR at entry
1224 */
1225
1226 saveRecover64:
1227 li r6,saveanchor ; Start at pool anchor
1228 crclr cr1_eq ; initialize the loop test
1229 lwz r7,SVfreecnt(0) ; Get the current free count
1230
1231
1232 ; Loop over next block in free pool. r6 is the ptr to the last block we looked at.
1233
1234 srcnpool64:
1235 ld r6,SACnext(r6) ; Point to the next one
1236 cmpldi r6,saveanchor ; Have we wrapped?
1237 beq-- srcdone ; Yes, did not have enough...
1238
1239 lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block
1240
1241
1242 ; Loop over free savearea in current block.
1243 ; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc)
1244 ; r6 = ptr to current free pool block
1245 ; r7 = free count
1246 ; r8 = #pages more we still need to recover
1247 ; r9 = return address
1248 ; r10 = per-proc ptr
1249 ; r11 = MSR at entry
1250 ; cr1 = beq if (r8==0)
1251 ;
1252 ; WARNING: as in the 32-bit path, we depend on (SAVsize==640)
1253
1254 srcnext64:
1255 beq-- cr1,srcdone ; We have no more to get...
1256
1257 lis r3,0x8000 ; Get the top bit on
1258 cntlzw r4,r5 ; Find a free slot
1259 addi r7,r7,1 ; Bump up the free count
1260 srw r3,r3,r4 ; Make a mask
1261 slwi r0,r4,7 ; First multiply by 128
1262 subi r8,r8,1 ; Decrement the need count
1263 slwi r2,r4,9 ; Then multiply by 512
1264 andc. r5,r5,r3 ; Clear out the "free" bit
1265 add r2,r2,r0 ; Sum to multiply by 640
1266
1267 stw r5,SACalloc(r6) ; Set new allocation bits
1268
1269 add r2,r2,r6 ; Get the actual address of the savearea
1270 ld r3,SVfree(0) ; Get the head of the chain
1271 cmplwi cr1,r8,0 ; Do we actually need any more?
1272 std r2,SVfree(0) ; Push ourselves in the front
1273 std r3,SAVprev(r2) ; Chain the rest of the list behind
1274
1275 bne++ srcnext64 ; The pool block is not empty yet, try for another...
1276
1277 ld r2,SACnext(r6) ; Get the next pointer
1278 ld r3,SACprev(r6) ; Get the previous pointer
1279 std r3,SACprev(r2) ; The previous of my next points to my previous
1280 std r2,SACnext(r3) ; The next of my previous points to my next
1281 bne++ cr1,srcnpool64 ; We still have more to do...
1282
1283 b srcdone
1284
1285
1286 /*
1287 * *******************
1288 * * s a v e l o c k *
1289 * *******************
1290 *
1291 * Lock the savearea anchor, so we can manipulate the free list.
1292 * msr = interrupts and translation off
1293 * We destroy:
1294 * r8, r3, r12
1295 */
1296 .align 5
1297
1298 savelock: lwz r8,SVlock(0) ; See if lock is held
1299 cmpwi r8,0
1300 li r12,saveanchor ; Point to the saveanchor
1301 bne-- savelock ; loop until lock released...
1302
1303 savelock0: lwarx r8,0,r12 ; Grab the lock value
1304 cmpwi r8,0 ; taken?
1305 li r8,1 ; get nonzero to lock it with
1306 bne-- savelock1 ; already locked, wait for it to clear...
1307 stwcx. r8,0,r12 ; Try to seize that there durn lock
1308 isync ; assume we got it
1309 beqlr++ ; reservation not lost, so we have the lock
1310 b savelock0 ; Try again...
1311
1312 savelock1: li r8,lgKillResv ; Point to killing field
1313 stwcx. r8,0,r8 ; Kill reservation
1314 b savelock ; Start over....
1315
1316
1317 /*
1318 * ***********************
1319 * * s a v e u n l o c k *
1320 * ***********************
1321 *
1322 *
1323 * This is the common routine that sets the saveadjust field and unlocks the savearea
1324 * anchor.
1325 * msr = interrupts and translation off
1326 * We destroy:
1327 * r2, r5, r6, r8.
1328 */
1329 .align 5
1330 saveunlock:
1331 lwz r6,SVfreecnt(0) ; and the number on the free list
1332 lwz r5,SVinuse(0) ; Pick up the in use count
1333 subic. r8,r6,FreeListMin ; do we have at least the minimum?
1334 lwz r2,SVtarget(0) ; Get the target
1335 neg r8,r8 ; assuming we are short, get r8 <- shortfall
1336 blt-- saveunlock1 ; skip if fewer than minimum on free list
1337
1338 add r6,r6,r5 ; Get the total number of saveareas
1339 addi r5,r2,-SaveLowHysteresis ; Find low end of acceptible range
1340 sub r5,r6,r5 ; Make everything below hysteresis negative
1341 sub r2,r2,r6 ; Get the distance from the target
1342 addi r5,r5,-(SaveLowHysteresis + SaveHighHysteresis + 1) ; Subtract full hysteresis range
1343 srawi r5,r5,31 ; Get 0xFFFFFFFF if outside range or 0 if inside
1344 and r8,r2,r5 ; r8 <- 0 if in range or distance to target if not
1345
1346 saveunlock1:
1347 li r5,0 ; Set a clear value
1348 stw r8,SVadjust(0) ; Set the adjustment value
1349 eieio ; Make sure everything is done
1350 stw r5,SVlock(0) ; Unlock the savearea chain
1351 blr
1352
1353
1354 /*
1355 * *******************
1356 * * s a v e _ c p v *
1357 * *******************
1358 *
1359 * struct savearea *save_cpv(addr64_t saveAreaPhysAddr);
1360 *
1361 * Converts a physical savearea address to virtual. Called with translation on
1362 * and in 32-bit mode. Note that the argument is passed as a long long in (r3,r4).
1363 */
1364
1365 .align 5
1366 .globl EXT(save_cpv)
1367
1368 LEXT(save_cpv)
1369 mflr r9 ; save return address
1370 mr r8,r3 ; save upper half of phys address here
1371 bl saveSetup ; turn off translation and interrupts, turn SF on
1372 rlwinm r5,r4,0,0,19 ; Round back to the start of the physical savearea block
1373 bf-- pf64Bitb,save_cpv1 ; skip if 32-bit processor
1374 rldimi r5,r8,32,0 ; r5 <- 64-bit phys address of block
1375 save_cpv1:
1376 lwz r6,SACvrswap+4(r5) ; Get the conversion to virtual (only need low half if 64-bit)
1377 mtlr r9 ; restore return address
1378 xor r3,r4,r6 ; convert phys to virtual
1379 rlwinm r3,r3,0,0,31 ; if 64-bit, zero upper half of virtual address
1380 b saveRestore ; turn translation etc back on, SF off, and return r3
1381
1382
1383 /*
1384 * *********************
1385 * * s a v e S e t u p *
1386 * *********************
1387 *
1388 * This routine is called at the start of all the save-area subroutines.
1389 * It turns off translation, disabled interrupts, turns on 64-bit mode,
1390 * and sets up cr6 with the feature flags (especially pf64Bit).
1391 *
1392 * Note that most save-area routines cannot take _any_ interrupt (such as a
1393 * PTE miss) once the savearea anchor is locked, since that would result in
1394 * instant deadlock as we need a save-area to process any exception.
1395 * We set up:
1396 * r10 = per-proc ptr
1397 * r11 = old MSR
1398 * cr5 = pfNoMSRir feature flag
1399 * cr6 = pf64Bit feature flag
1400 *
1401 * We use r0, r3, r10, and r11.
1402 */
1403
1404 saveSetup:
1405 mfmsr r11 ; get msr
1406 mfsprg r3,2 ; get feature flags
1407 li r0,0
1408 mtcrf 0x2,r3 ; copy pf64Bit to cr6
1409 ori r0,r0,lo16(MASK(MSR_IR)+MASK(MSR_DR)+MASK(MSR_EE))
1410 mtcrf 0x4,r3 ; copy pfNoMSRir to cr5
1411 andc r3,r11,r0 ; turn off IR, DR, and EE
1412 li r0,1 ; get a 1 in case its a 64-bit machine
1413 bf-- pf64Bitb,saveSetup1 ; skip if not a 64-bit machine
1414 rldimi r3,r0,63,MSR_SF_BIT ; turn SF (bit 0) on
1415 mtmsrd r3 ; turn translation and interrupts off, 64-bit mode on
1416 isync ; wait for it to happen
1417 mfsprg r10,0 ; get per-proc ptr
1418 blr
1419 saveSetup1: ; here on 32-bit machines
1420 bt- pfNoMSRirb,saveSetup2 ; skip if cannot turn off IR with a mtmsr
1421 mtmsr r3 ; turn translation and interrupts off
1422 isync ; wait for it to happen
1423 mfsprg r10,0 ; get per-proc ptr
1424 blr
1425 saveSetup2: ; here if pfNoMSRir set for this machine
1426 li r0,loadMSR ; we will "mtmsr r3" via system call
1427 sc
1428 mfsprg r10,0 ; get per-proc ptr
1429 blr
1430
1431
1432 /*
1433 * *************************
1434 * * s a v e R e s t o r e *
1435 * *************************
1436 *
1437 * Undoes the effect of calling "saveSetup", ie it turns relocation and interrupts back on,
1438 * and turns 64-bit mode back off.
1439 * r11 = old MSR
1440 * cr6 = pf64Bit feature flag
1441 */
1442
1443 saveRestore:
1444 bt++ pf64Bitb,saveRestore64 ; handle a 64-bit processor
1445 saveRestore32:
1446 mtmsr r11 ; restore MSR
1447 isync ; wait for translation to start up
1448 blr
1449 saveRestore64: ; 64-bit processor
1450 mtmsrd r11 ; restore MSR
1451 isync ; wait for changes to happen
1452 blr
1453