]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | ||
23 | #define FPVECDBG 0 | |
24 | ||
25 | #include <assym.s> | |
26 | #include <debug.h> | |
27 | #include <db_machine_commands.h> | |
28 | #include <mach_rt.h> | |
29 | ||
30 | #include <mach_debug.h> | |
31 | #include <ppc/asm.h> | |
32 | #include <ppc/proc_reg.h> | |
33 | #include <ppc/exception.h> | |
34 | #include <ppc/Performance.h> | |
35 | #include <ppc/exception.h> | |
36 | #include <ppc/savearea.h> | |
37 | #include <mach/ppc/vm_param.h> | |
38 | ||
39 | .text | |
40 | ||
41 | /* Register usage conventions in this code: | |
42 | * r9 = return address | |
43 | * r10 = per-proc ptr | |
44 | * r11 = MSR at entry | |
45 | * cr6 = feature flags (ie, pf64Bit) | |
46 | * | |
47 | * Because much of this code deals with physical addresses, | |
48 | * there are parallel paths for 32- and 64-bit machines. | |
49 | */ | |
50 | ||
51 | ||
52 | /* | |
53 | * ***************************** | |
54 | * * s a v e _ s n a p s h o t * | |
55 | * ***************************** | |
56 | * | |
57 | * void save_snapshot(); | |
58 | * | |
59 | * Link the current free list & processor local list on an independent list. | |
60 | */ | |
61 | .align 5 | |
62 | .globl EXT(save_snapshot) | |
63 | ||
64 | LEXT(save_snapshot) | |
65 | mflr r9 ; get return address | |
66 | bl saveSetup ; turn translation off, 64-bit on, load many regs | |
67 | bf-- pf64Bitb,save_snapshot32 ; skip if 32-bit processor | |
68 | ||
69 | ; Handle 64-bit processor. | |
70 | ||
71 | save_snapshot64: | |
72 | ||
73 | ld r8,next_savearea(r10) ; Start with the current savearea | |
74 | std r8,SVsavefreesnapshot(0) ; Make it the restore list anchor | |
75 | ld r5,SVfree(0) ; Get free save area list anchor | |
76 | ||
77 | save_snapshot64nextfree: | |
78 | mr r7,r5 | |
79 | std r7,savemisc1(r8) ; Link this one | |
80 | ld r5,SAVprev(r7) ; Get the next | |
81 | mr r8,r7 | |
82 | mr. r0,r5 | |
83 | bne save_snapshot64nextfree | |
84 | ||
85 | lwz r6,SVinuse(0) ; Get inuse count | |
86 | ld r5,lclfree(r10) ; Get the local savearea list | |
87 | subi r6,r6,1 ; Count the first as free | |
88 | ||
89 | save_snapshot64nextlocalfree: | |
90 | subi r6,r6,1 ; Count as free | |
91 | mr r7,r5 | |
92 | std r7,savemisc1(r8) ; Link this one | |
93 | ld r5,SAVprev(r7) ; Get the next | |
94 | mr r8,r7 | |
95 | mr. r0,r5 | |
96 | bne save_snapshot64nextlocalfree | |
97 | ||
98 | std r5,savemisc1(r8) ; End the list | |
99 | stw r6,SVsaveinusesnapshot(0) ; Save the new number of inuse saveareas | |
100 | ||
101 | mtlr r9 ; Restore the return | |
102 | b saveRestore64 ; Restore interrupts and translation | |
103 | ||
104 | ; Handle 32-bit processor. | |
105 | ||
106 | save_snapshot32: | |
107 | lwz r8,next_savearea+4(r10) ; Start with the current savearea | |
108 | stw r8,SVsavefreesnapshot+4(0) ; Make it the restore list anchor | |
109 | lwz r5,SVfree+4(0) ; Get free save area list anchor | |
110 | ||
111 | save_snapshot32nextfree: | |
112 | mr r7,r5 | |
113 | stw r7,savemisc1+4(r8) ; Link this one | |
114 | lwz r5,SAVprev+4(r7) ; Get the next | |
115 | mr r8,r7 | |
116 | mr. r0,r5 | |
117 | bne save_snapshot32nextfree | |
118 | ||
119 | lwz r6,SVinuse(0) ; Get inuse count | |
120 | lwz r5,lclfree+4(r10) ; Get the local savearea list | |
121 | subi r6,r6,1 ; Count the first as free | |
122 | ||
123 | save_snapshot32nextlocalfree: | |
124 | subi r6,r6,1 ; Count as free | |
125 | mr r7,r5 | |
126 | stw r7,savemisc1+4(r8) ; Link this one | |
127 | lwz r5,SAVprev+4(r7) ; Get the next | |
128 | mr r8,r7 | |
129 | mr. r0,r5 | |
130 | bne save_snapshot32nextlocalfree | |
131 | ||
132 | stw r5,savemisc1+4(r8) ; End the list | |
133 | stw r6,SVsaveinusesnapshot(0) ; Save the new number of inuse saveareas | |
134 | ||
135 | mtlr r9 ; Restore the return | |
136 | b saveRestore32 ; Restore interrupts and translation | |
137 | ||
138 | /* | |
139 | * ********************************************* | |
140 | * * s a v e _ s n a p s h o t _ r e s t o r e * | |
141 | * ********************************************* | |
142 | * | |
143 | * void save_snapshot_restore(); | |
144 | * | |
145 | * Restore the free list from the snapshot list, and reset the processors next savearea. | |
146 | */ | |
147 | .align 5 | |
148 | .globl EXT(save_snapshot_restore) | |
149 | ||
150 | LEXT(save_snapshot_restore) | |
151 | mflr r9 ; get return address | |
152 | bl saveSetup ; turn translation off, 64-bit on, load many regs | |
153 | bf-- pf64Bitb,save_snapshot_restore32 ; skip if 32-bit processor | |
154 | ||
155 | ; Handle 64-bit processor. | |
156 | ||
157 | save_snapshot_restore64: | |
158 | lwz r7,SVsaveinusesnapshot(0) | |
159 | stw r7,SVinuse(0) ; Set the new inuse count | |
160 | ||
161 | li r6,0 | |
162 | stw r6,lclfreecnt(r10) ; None local now | |
163 | std r6,lclfree(r10) ; None local now | |
164 | ||
165 | ld r8,SVsavefreesnapshot(0) ; Get the restore list anchor | |
166 | std r8,SVfree(0) ; Make it the free list anchor | |
167 | li r5,SAVempty ; Get marker for free savearea | |
168 | ||
169 | save_snapshot_restore64nextfree: | |
170 | addi r6,r6,1 ; Count as free | |
171 | stb r5,SAVflags+2(r8) ; Mark savearea free | |
172 | ld r7,savemisc1(r8) ; Get the next | |
173 | std r7,SAVprev(r8) ; Set the next in free list | |
174 | mr. r8,r7 | |
175 | bne save_snapshot_restore64nextfree | |
176 | ||
177 | stw r6,SVfreecnt(0) ; Set the new free count | |
178 | ||
179 | bl saveGet64 | |
180 | std r3,next_savearea(r10) ; Get the next savearea | |
181 | ||
182 | mtlr r9 ; Restore the return | |
183 | b saveRestore64 ; Restore interrupts and translation | |
184 | ||
185 | ; Handle 32-bit processor. | |
186 | ||
187 | save_snapshot_restore32: | |
188 | lwz r7,SVsaveinusesnapshot(0) | |
189 | stw r7,SVinuse(0) ; Set the new inuse count | |
190 | ||
191 | li r6,0 | |
192 | stw r6,lclfreecnt(r10) ; None local now | |
193 | stw r6,lclfree+4(r10) ; None local now | |
194 | ||
195 | lwz r8,SVsavefreesnapshot+4(0) ; Get the restore list anchor | |
196 | stw r8,SVfree+4(0) ; Make it the free list anchor | |
197 | li r5,SAVempty ; Get marker for free savearea | |
198 | ||
199 | save_snapshot_restore32nextfree: | |
200 | addi r6,r6,1 ; Count as free | |
201 | stb r5,SAVflags+2(r8) ; Mark savearea free | |
202 | lwz r7,savemisc1+4(r8) ; Get the next | |
203 | stw r7,SAVprev+4(r8) ; Set the next in free list | |
204 | mr. r8,r7 | |
205 | bne save_snapshot_restore32nextfree | |
206 | ||
207 | stw r6,SVfreecnt(0) ; Set the new free count | |
208 | ||
209 | bl saveGet32 | |
210 | stw r3,next_savearea+4(r10) ; Get the next savearea | |
211 | ||
212 | mtlr r9 ; Restore the return | |
213 | b saveRestore32 ; Restore interrupts and translation | |
214 | ||
215 | /* | |
216 | * *********************** | |
217 | * * s a v e _ q u e u e * | |
218 | * *********************** | |
219 | * | |
220 | * void save_queue(ppnum_t pagenum); | |
221 | * | |
222 | * This routine will add a savearea block to the free list. | |
223 | * We also queue the block to the free pool list. This is a | |
224 | * circular double linked list. Because this block has no free entries, | |
225 | * it gets queued to the end of the list | |
226 | */ | |
227 | .align 5 | |
228 | .globl EXT(save_queue) | |
229 | ||
230 | LEXT(save_queue) | |
231 | mflr r9 ; get return address | |
232 | mr r8,r3 ; move pagenum out of the way | |
233 | bl saveSetup ; turn translation off, 64-bit on, load many regs | |
234 | bf-- pf64Bitb,saveQueue32 ; skip if 32-bit processor | |
235 | ||
236 | sldi r2,r8,12 ; r2 <-- phys address of page | |
237 | li r8,sac_cnt ; Get the number of saveareas per page | |
238 | mr r4,r2 ; Point to start of chain | |
239 | li r0,SAVempty ; Get empty marker | |
240 | ||
241 | saveQueue64a: | |
242 | addic. r8,r8,-1 ; Keep track of how many we did | |
243 | stb r0,SAVflags+2(r4) ; Set empty | |
244 | addi r7,r4,SAVsize ; Point to the next slot | |
245 | ble- saveQueue64b ; We are done with the chain | |
246 | std r7,SAVprev(r4) ; Set this chain | |
247 | mr r4,r7 ; Step to the next | |
248 | b saveQueue64a ; Fill the whole block... | |
249 | ||
250 | saveQueue64b: | |
251 | bl savelock ; Go lock the save anchor | |
252 | ||
253 | ld r7,SVfree(0) ; Get the free save area list anchor | |
254 | lwz r6,SVfreecnt(0) ; Get the number of free saveareas | |
255 | ||
256 | std r2,SVfree(0) ; Queue in the new one | |
257 | addi r6,r6,sac_cnt ; Count the ones we are linking in | |
258 | std r7,SAVprev(r4) ; Queue the old first one off of us | |
259 | stw r6,SVfreecnt(0) ; Save the new count | |
260 | b saveQueueExit | |
261 | ||
262 | ; Handle 32-bit processor. | |
263 | ||
264 | saveQueue32: | |
265 | slwi r2,r8,12 ; r2 <-- phys address of page | |
266 | li r8,sac_cnt ; Get the number of saveareas per page | |
267 | mr r4,r2 ; Point to start of chain | |
268 | li r0,SAVempty ; Get empty marker | |
269 | ||
270 | saveQueue32a: | |
271 | addic. r8,r8,-1 ; Keep track of how many we did | |
272 | stb r0,SAVflags+2(r4) ; Set empty | |
273 | addi r7,r4,SAVsize ; Point to the next slot | |
274 | ble- saveQueue32b ; We are done with the chain | |
275 | stw r7,SAVprev+4(r4) ; Set this chain | |
276 | mr r4,r7 ; Step to the next | |
277 | b saveQueue32a ; Fill the whole block... | |
278 | ||
279 | saveQueue32b: | |
280 | bl savelock ; Go lock the save anchor | |
281 | ||
282 | lwz r7,SVfree+4(0) ; Get the free save area list anchor | |
283 | lwz r6,SVfreecnt(0) ; Get the number of free saveareas | |
284 | ||
285 | stw r2,SVfree+4(0) ; Queue in the new one | |
286 | addi r6,r6,sac_cnt ; Count the ones we are linking in | |
287 | stw r7,SAVprev+4(r4) ; Queue the old first one off of us | |
288 | stw r6,SVfreecnt(0) ; Save the new count | |
289 | ||
290 | saveQueueExit: ; join here from 64-bit path | |
291 | bl saveunlock ; Unlock the list and set the adjust count | |
292 | mtlr r9 ; Restore the return | |
293 | ||
294 | #if FPVECDBG | |
295 | mfsprg r2,1 ; (TEST/DEBUG) | |
296 | mr. r2,r2 ; (TEST/DEBUG) | |
297 | beq-- saveRestore ; (TEST/DEBUG) | |
298 | lis r0,hi16(CutTrace) ; (TEST/DEBUG) | |
299 | li r2,0x2201 ; (TEST/DEBUG) | |
300 | oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) | |
301 | sc ; (TEST/DEBUG) | |
302 | #endif | |
303 | b saveRestore ; Restore interrupts and translation | |
304 | ||
305 | /* | |
306 | * ***************************** | |
307 | * * s a v e _ g e t _ i n i t * | |
308 | * ***************************** | |
309 | * | |
310 | * addr64_t save_get_init(void); | |
311 | * | |
312 | * Note that save_get_init is used in initial processor startup only. It | |
313 | * is used because translation is on, but no tables exist yet and we have | |
314 | * no V=R BAT registers that cover the entire physical memory. | |
315 | */ | |
316 | .align 5 | |
317 | .globl EXT(save_get_init) | |
318 | ||
319 | LEXT(save_get_init) | |
320 | mflr r9 ; get return address | |
321 | bl saveSetup ; turn translation off, 64-bit on, load many regs | |
322 | bfl-- pf64Bitb,saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC) | |
323 | btl++ pf64Bitb,saveGet64 ; get one on a 64-bit machine | |
324 | bl saveRestore ; restore translation etc | |
325 | mtlr r9 | |
326 | ||
327 | ; unpack the physaddr in r3 into a long long in (r3,r4) | |
328 | ||
329 | mr r4,r3 ; copy low word of phys address to r4 | |
330 | li r3,0 ; assume upper word was 0 | |
331 | bflr-- pf64Bitb ; if 32-bit processor, return | |
332 | srdi r3,r4,32 ; unpack reg64_t to addr64_t on 64-bit machine | |
333 | rlwinm r4,r4,0,0,31 | |
334 | blr | |
335 | ||
336 | ||
337 | /* | |
338 | * ******************* | |
339 | * * s a v e _ g e t * | |
340 | * ******************* | |
341 | * | |
342 | * savearea *save_get(void); | |
343 | * | |
344 | * Allocate a savearea, returning a virtual address. NOTE: we must preserve | |
345 | * r0, r2, and r12. Our callers in cswtch.s depend on this. | |
346 | */ | |
347 | .align 5 | |
348 | .globl EXT(save_get) | |
349 | ||
350 | LEXT(save_get) | |
351 | mflr r9 ; get return address | |
352 | mr r5,r0 ; copy regs before saveSetup nails them | |
353 | bl saveSetup ; turn translation off, 64-bit on, load many regs | |
354 | bf-- pf64Bitb,svgt1 ; skip if 32-bit processor | |
355 | ||
356 | std r5,tempr0(r10) ; save r0 in per-proc across call to saveGet64 | |
357 | std r2,tempr2(r10) ; and r2 | |
358 | std r12,tempr4(r10) ; and r12 | |
359 | bl saveGet64 ; get r3 <- savearea, r5 <- page address (with SAC) | |
360 | ld r0,tempr0(r10) ; restore callers regs | |
361 | ld r2,tempr2(r10) | |
362 | ld r12,tempr4(r10) | |
363 | b svgt2 | |
364 | ||
365 | svgt1: ; handle 32-bit processor | |
366 | stw r5,tempr0+4(r10) ; save r0 in per-proc across call to saveGet32 | |
367 | stw r2,tempr2+4(r10) ; and r2 | |
368 | stw r12,tempr4+4(r10) ; and r12 | |
369 | bl saveGet32 ; get r3 <- savearea, r5 <- page address (with SAC) | |
370 | lwz r0,tempr0+4(r10) ; restore callers regs | |
371 | lwz r2,tempr2+4(r10) | |
372 | lwz r12,tempr4+4(r10) | |
373 | ||
374 | svgt2: | |
375 | lwz r5,SACvrswap+4(r5) ; Get the virtual to real translation (only need low word) | |
376 | mtlr r9 ; restore return address | |
377 | xor r3,r3,r5 ; convert physaddr to virtual | |
378 | rlwinm r3,r3,0,0,31 ; 0 upper word if a 64-bit machine | |
379 | ||
380 | #if FPVECDBG | |
381 | mr r6,r0 ; (TEST/DEBUG) | |
382 | mr r7,r2 ; (TEST/DEBUG) | |
383 | mfsprg r2,1 ; (TEST/DEBUG) | |
384 | mr. r2,r2 ; (TEST/DEBUG) | |
385 | beq-- svgDBBypass ; (TEST/DEBUG) | |
386 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
387 | li r2,0x2203 ; (TEST/DEBUG) | |
388 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
389 | sc ; (TEST/DEBUG) | |
390 | svgDBBypass: ; (TEST/DEBUG) | |
391 | mr r0,r6 ; (TEST/DEBUG) | |
392 | mr r2,r7 ; (TEST/DEBUG) | |
393 | #endif | |
394 | b saveRestore ; restore MSR and return to our caller | |
395 | ||
396 | ||
397 | /* | |
398 | * *********************************** | |
399 | * * s a v e _ g e t _ p h y s _ 3 2 * | |
400 | * *********************************** | |
401 | * | |
402 | * reg64_t save_get_phys(void); | |
403 | * | |
404 | * This is the entry normally called from lowmem_vectors.s with | |
405 | * translation and interrupts already off. | |
406 | * MUST NOT TOUCH CR7 | |
407 | */ | |
408 | .align 5 | |
409 | .globl EXT(save_get_phys_32) | |
410 | ||
411 | LEXT(save_get_phys_32) | |
412 | mfsprg r10,0 ; get the per-proc ptr | |
413 | b saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC) | |
414 | ||
415 | ||
416 | /* | |
417 | * *********************************** | |
418 | * * s a v e _ g e t _ p h y s _ 6 4 * | |
419 | * *********************************** | |
420 | * | |
421 | * reg64_t save_get_phys_64(void); | |
422 | * | |
423 | * This is the entry normally called from lowmem_vectors.s with | |
424 | * translation and interrupts already off, and in 64-bit mode. | |
425 | * MUST NOT TOUCH CR7 | |
426 | */ | |
427 | .align 5 | |
428 | .globl EXT(save_get_phys_64) | |
429 | ||
430 | LEXT(save_get_phys_64) | |
431 | mfsprg r10,0 ; get the per-proc ptr | |
432 | b saveGet64 ; Get r3 <- savearea, r5 <- page address (with SAC) | |
433 | ||
434 | ||
435 | /* | |
436 | * ********************* | |
437 | * * s a v e G e t 6 4 * | |
438 | * ********************* | |
439 | * | |
440 | * This is the internal routine to allocate a savearea on a 64-bit processor. | |
441 | * Note that we must not take any exceptions of any kind, including PTE misses, as that | |
442 | * would deadlock trying to reenter this routine. We pass back the 64-bit physical address. | |
443 | * First we try the local list. If that is below a threshold, we try the global free list, | |
444 | * which requires taking a lock, and replenish. If there are no saveareas in either list, | |
445 | * we will install the backpocket and choke. This routine assumes that the caller has | |
446 | * turned translation off, masked interrupts, turned on 64-bit mode, and set up: | |
447 | * r10 = per-proc ptr | |
448 | * | |
449 | * We return: | |
450 | * r3 = 64-bit physical address of the savearea | |
451 | * r5 = 64-bit physical address of the page the savearea is in, with SAC | |
452 | * | |
453 | * We destroy: | |
454 | * r2-r8. | |
455 | * | |
456 | * MUST NOT TOUCH CR7 | |
457 | */ | |
458 | ||
459 | saveGet64: | |
460 | lwz r8,lclfreecnt(r10) ; Get the count | |
461 | ld r3,lclfree(r10) ; Get the start of local savearea list | |
462 | cmplwi r8,LocalSaveMin ; Are we too low? | |
463 | ble-- saveGet64GetGlobal ; We are too low and need to grow list... | |
464 | ||
465 | ; Get it from the per-processor local list. | |
466 | ||
467 | saveGet64GetLocal: | |
468 | li r2,0x5555 ; get r2 <-- 0x55555555 55555555, our bugbug constant | |
469 | ld r4,SAVprev(r3) ; Chain to the next one | |
470 | oris r2,r2,0x5555 | |
471 | subi r8,r8,1 ; Back down count | |
472 | rldimi r2,r2,32,0 | |
473 | ||
474 | std r2,SAVprev(r3) ; bug next ptr | |
475 | stw r2,SAVlevel(r3) ; bug context ID | |
476 | li r6,0 | |
477 | std r4,lclfree(r10) ; Unchain first savearea | |
478 | stw r2,SAVact(r3) ; bug activation ptr | |
479 | rldicr r5,r3,0,51 ; r5 <-- page ptr, where SAC is kept | |
480 | stw r8,lclfreecnt(r10) ; Set new count | |
481 | stw r6,SAVflags(r3) ; clear the flags | |
482 | ||
483 | blr | |
484 | ||
485 | ; Local list was low so replenish from global list. | |
486 | ; r7 = return address to caller of saveGet64 | |
487 | ; r8 = lclfreecnt | |
488 | ; r10 = per-proc ptr | |
489 | ||
490 | saveGet64GetGlobal: | |
491 | mflr r7 ; save return adress | |
492 | subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target | |
493 | bl savelock ; Go lock up the anchor | |
494 | ||
495 | lwz r2,SVfreecnt(0) ; Get the number on this list | |
496 | ld r8,SVfree(0) ; Get the head of the save area list | |
497 | ||
498 | sub r3,r2,r5 ; Get number left after we swipe enough for local list | |
499 | sradi r3,r3,63 ; Get 0 if enough or -1 if not | |
500 | andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise | |
501 | and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise | |
502 | or. r5,r4,r5 ; r5 <- number we will move from global to local list | |
503 | beq-- saveGet64NoFree ; There are none to get... | |
504 | ||
505 | mtctr r5 ; Get loop count | |
506 | mr r6,r8 ; Remember the first in the list | |
507 | ||
508 | saveGet64c: | |
509 | bdz saveGet64d ; Count down and branch when we hit 0... | |
510 | ld r8,SAVprev(r8) ; Get the next | |
511 | b saveGet64c ; Keep going... | |
512 | ||
513 | saveGet64d: | |
514 | ld r3,SAVprev(r8) ; Get the next one | |
515 | lwz r4,SVinuse(0) ; Get the in use count | |
516 | sub r2,r2,r5 ; Count down what we stole | |
517 | std r3,SVfree(0) ; Set the new first in list | |
518 | add r4,r4,r5 ; Count the ones we just put in the local list as "in use" | |
519 | stw r2,SVfreecnt(0) ; Set the new count | |
520 | stw r4,SVinuse(0) ; Set the new in use count | |
521 | ||
522 | ld r4,lclfree(r10) ; Get the old head of list | |
523 | lwz r3,lclfreecnt(r10) ; Get the old count | |
524 | std r6,lclfree(r10) ; Set the new head of the list | |
525 | add r3,r3,r5 ; Get the new count | |
526 | std r4,SAVprev(r8) ; Point to the old head | |
527 | stw r3,lclfreecnt(r10) ; Set the new count | |
528 | ||
529 | bl saveunlock ; Update the adjust field and unlock | |
530 | mtlr r7 ; restore return address | |
531 | b saveGet64 ; Start over and finally allocate the savearea... | |
532 | ||
533 | ; The local list is below the repopulate threshold and the global list is empty. | |
534 | ; First we check if there are any left in the local list and if so, we allow | |
535 | ; them to be allocated. If not, we release the backpocket list and choke. | |
536 | ; There is nothing more that we can do at this point. Hopefully we stay alive | |
537 | ; long enough to grab some much-needed panic information. | |
538 | ; r7 = return address to caller of saveGet64 | |
539 | ; r10 = per-proc ptr | |
540 | ||
541 | saveGet64NoFree: | |
542 | lwz r8,lclfreecnt(r10) ; Get the count | |
543 | mr. r8,r8 ; Are there any reserve to get? | |
544 | beq-- saveGet64Choke ; No, go choke and die... | |
545 | bl saveunlock ; Update the adjust field and unlock | |
546 | ld r3,lclfree(r10) ; Get the start of local savearea list | |
547 | lwz r8,lclfreecnt(r10) ; Get the count | |
548 | mtlr r7 ; restore return address | |
549 | b saveGet64GetLocal ; We have some left, dip on in... | |
550 | ||
551 | ; We who are about to die salute you. The savearea chain is messed up or | |
552 | ; empty. Add in a few so we have enough to take down the system. | |
553 | ||
554 | saveGet64Choke: | |
555 | lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket | |
556 | ori r9,r9,lo16(EXT(backpocket)) ; and low part | |
557 | ||
558 | lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements | |
559 | ld r7,SVfree-saveanchor(r9) ; Get the head of the chain | |
560 | lwz r6,SVinuse(0) ; Get total in the old list | |
561 | ||
562 | stw r8,SVfreecnt(0) ; Set the new number of free elements | |
563 | add r6,r6,r8 ; Add in the new ones | |
564 | std r7,SVfree(0) ; Set the new head of the chain | |
565 | stw r6,SVinuse(0) ; Set total in the new list | |
566 | ||
567 | saveGetChokeJoin: ; join in the fun from 32-bit mode | |
568 | lis r0,hi16(Choke) ; Set choke firmware call | |
569 | li r7,0 ; Get a clear register to unlock | |
570 | ori r0,r0,lo16(Choke) ; Set the rest of the choke call | |
571 | li r3,failNoSavearea ; Set failure code | |
572 | ||
573 | eieio ; Make sure all is committed | |
574 | stw r7,SVlock(0) ; Unlock the free list | |
575 | sc ; System ABEND | |
576 | ||
577 | ||
578 | /* | |
579 | * ********************* | |
580 | * * s a v e G e t 3 2 * | |
581 | * ********************* | |
582 | * | |
583 | * This is the internal routine to allocate a savearea on a 32-bit processor. | |
584 | * Note that we must not take any exceptions of any kind, including PTE misses, as that | |
585 | * would deadlock trying to reenter this routine. We pass back the 32-bit physical address. | |
586 | * First we try the local list. If that is below a threshold, we try the global free list, | |
587 | * which requires taking a lock, and replenish. If there are no saveareas in either list, | |
588 | * we will install the backpocket and choke. This routine assumes that the caller has | |
589 | * turned translation off, masked interrupts, and set up: | |
590 | * r10 = per-proc ptr | |
591 | * | |
592 | * We return: | |
593 | * r3 = 32-bit physical address of the savearea | |
594 | * r5 = 32-bit physical address of the page the savearea is in, with SAC | |
595 | * | |
596 | * We destroy: | |
597 | * r2-r8. | |
598 | */ | |
599 | ||
600 | saveGet32: | |
601 | lwz r8,lclfreecnt(r10) ; Get the count | |
602 | lwz r3,lclfree+4(r10) ; Get the start of local savearea list | |
603 | cmplwi r8,LocalSaveMin ; Are we too low? | |
604 | ble- saveGet32GetGlobal ; We are too low and need to grow list... | |
605 | ||
606 | ; Get savearea from per-processor local list. | |
607 | ||
608 | saveGet32GetLocal: | |
609 | li r2,0x5555 ; get r2 <-- 0x55555555, our bugbug constant | |
610 | lwz r4,SAVprev+4(r3) ; Chain to the next one | |
611 | oris r2,r2,0x5555 | |
612 | subi r8,r8,1 ; Back down count | |
613 | ||
614 | stw r2,SAVprev+4(r3) ; bug next ptr | |
615 | stw r2,SAVlevel(r3) ; bug context ID | |
616 | li r6,0 | |
617 | stw r4,lclfree+4(r10) ; Unchain first savearea | |
618 | stw r2,SAVact(r3) ; bug activation ptr | |
619 | rlwinm r5,r3,0,0,19 ; r5 <-- page ptr, where SAC is kept | |
620 | stw r8,lclfreecnt(r10) ; Set new count | |
621 | stw r6,SAVflags(r3) ; clear the flags | |
622 | ||
623 | blr | |
624 | ||
625 | ; Local list was low so replenish from global list. | |
626 | ; r7 = return address to caller of saveGet32 | |
627 | ; r8 = lclfreecnt | |
628 | ; r10 = per-proc ptr | |
629 | ||
630 | saveGet32GetGlobal: | |
631 | mflr r7 ; save return adress | |
632 | subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target | |
633 | bl savelock ; Go lock up the anchor | |
634 | ||
635 | lwz r2,SVfreecnt(0) ; Get the number on this list | |
636 | lwz r8,SVfree+4(0) ; Get the head of the save area list | |
637 | ||
638 | sub r3,r2,r5 ; Get number left after we swipe enough for local list | |
639 | srawi r3,r3,31 ; Get 0 if enough or -1 if not | |
640 | andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise | |
641 | and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise | |
642 | or. r5,r4,r5 ; r5 <- number we will move from global to local list | |
643 | beq- saveGet32NoFree ; There are none to get... | |
644 | ||
645 | mtctr r5 ; Get loop count | |
646 | mr r6,r8 ; Remember the first in the list | |
647 | ||
648 | saveGet32c: | |
649 | bdz saveGet32d ; Count down and branch when we hit 0... | |
650 | lwz r8,SAVprev+4(r8) ; Get the next | |
651 | b saveGet32c ; Keep going... | |
652 | ||
653 | saveGet32d: | |
654 | lwz r3,SAVprev+4(r8) ; Get the next one | |
655 | lwz r4,SVinuse(0) ; Get the in use count | |
656 | sub r2,r2,r5 ; Count down what we stole | |
657 | stw r3,SVfree+4(0) ; Set the new first in list | |
658 | add r4,r4,r5 ; Count the ones we just put in the local list as "in use" | |
659 | stw r2,SVfreecnt(0) ; Set the new count | |
660 | stw r4,SVinuse(0) ; Set the new in use count | |
661 | ||
662 | lwz r4,lclfree+4(r10) ; Get the old head of list | |
663 | lwz r3,lclfreecnt(r10) ; Get the old count | |
664 | stw r6,lclfree+4(r10) ; Set the new head of the list | |
665 | add r3,r3,r5 ; Get the new count | |
666 | stw r4,SAVprev+4(r8) ; Point to the old head | |
667 | stw r3,lclfreecnt(r10) ; Set the new count | |
668 | ||
669 | bl saveunlock ; Update the adjust field and unlock | |
670 | mtlr r7 ; restore return address | |
671 | b saveGet32 ; Start over and finally allocate the savearea... | |
672 | ||
673 | ; The local list is below the repopulate threshold and the global list is empty. | |
674 | ; First we check if there are any left in the local list and if so, we allow | |
675 | ; them to be allocated. If not, we release the backpocket list and choke. | |
676 | ; There is nothing more that we can do at this point. Hopefully we stay alive | |
677 | ; long enough to grab some much-needed panic information. | |
678 | ; r7 = return address to caller of saveGet32 | |
679 | ; r10 = per-proc ptr | |
680 | ||
681 | saveGet32NoFree: | |
682 | lwz r8,lclfreecnt(r10) ; Get the count | |
683 | mr. r8,r8 ; Are there any reserve to get? | |
684 | beq- saveGet32Choke ; No, go choke and die... | |
685 | bl saveunlock ; Update the adjust field and unlock | |
686 | lwz r3,lclfree+4(r10) ; Get the start of local savearea list | |
687 | lwz r8,lclfreecnt(r10) ; Get the count | |
688 | mtlr r7 ; restore return address | |
689 | b saveGet32GetLocal ; We have some left, dip on in... | |
690 | ||
691 | ; We who are about to die salute you. The savearea chain is messed up or | |
692 | ; empty. Add in a few so we have enough to take down the system. | |
693 | ||
694 | saveGet32Choke: | |
695 | lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket | |
696 | ori r9,r9,lo16(EXT(backpocket)) ; and low part | |
697 | ||
698 | lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements | |
699 | lwz r7,SVfree+4-saveanchor(r9) ; Get the head of the chain | |
700 | lwz r6,SVinuse(0) ; Get total in the old list | |
701 | ||
702 | stw r8,SVfreecnt(0) ; Set the new number of free elements | |
703 | add r6,r6,r8 ; Add in the new ones (why?) | |
704 | stw r7,SVfree+4(0) ; Set the new head of the chain | |
705 | stw r6,SVinuse(0) ; Set total in the new list | |
706 | ||
707 | b saveGetChokeJoin | |
708 | ||
709 | ||
710 | /* | |
711 | * ******************* | |
712 | * * s a v e _ r e t * | |
713 | * ******************* | |
714 | * | |
715 | * void save_ret(struct savearea *); // normal call | |
716 | * void save_ret_wMSR(struct savearea *,reg64_t); // passes MSR to restore as 2nd arg | |
717 | * | |
718 | * Return a savearea passed by virtual address to the free list. | |
719 | * Note really well: we can take NO exceptions of any kind, | |
720 | * including a PTE miss once the savearea lock is held. That's | |
721 | * a guaranteed deadlock. That means we must disable for interrutions | |
722 | * and turn all translation off. | |
723 | */ | |
724 | .globl EXT(save_ret_wMSR) ; alternate entry pt w MSR to restore in r4 | |
725 | ||
726 | LEXT(save_ret_wMSR) | |
727 | crset 31 ; set flag for save_ret_wMSR | |
728 | b svrt1 ; join common code | |
729 | ||
730 | .align 5 | |
731 | .globl EXT(save_ret) | |
732 | ||
733 | LEXT(save_ret) | |
734 | crclr 31 ; clear flag for save_ret_wMSR | |
735 | svrt1: ; join from save_ret_wMSR | |
736 | mflr r9 ; get return address | |
737 | rlwinm r7,r3,0,0,19 ; get virtual address of SAC area at start of page | |
738 | mr r8,r3 ; save virtual address | |
739 | lwz r5,SACvrswap+0(r7) ; get 64-bit converter from V to R | |
740 | lwz r6,SACvrswap+4(r7) ; both halves, though only bottom used on 32-bit machine | |
741 | #if FPVECDBG | |
742 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
743 | li r2,0x2204 ; (TEST/DEBUG) | |
744 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
745 | sc ; (TEST/DEBUG) | |
746 | #endif | |
747 | bl saveSetup ; turn translation off, 64-bit on, load many regs | |
748 | bf++ 31,svrt3 ; skip if not save_ret_wMSR | |
749 | mr r11,r4 ; was save_ret_wMSR, so overwrite saved MSR | |
750 | svrt3: | |
751 | bf-- pf64Bitb,svrt4 ; skip if a 32-bit processor | |
752 | ||
753 | ; Handle 64-bit processor. | |
754 | ||
755 | rldimi r6,r5,32,0 ; merge upper and lower halves of SACvrswap together | |
756 | xor r3,r8,r6 ; get r3 <- 64-bit physical address of this savearea | |
757 | bl saveRet64 ; return it | |
758 | mtlr r9 ; restore return address | |
759 | b saveRestore64 ; restore MSR | |
760 | ||
761 | ; Handle 32-bit processor. | |
762 | ||
763 | svrt4: | |
764 | xor r3,r8,r6 ; get r3 <- 32-bit physical address of this savearea | |
765 | bl saveRet32 ; return it | |
766 | mtlr r9 ; restore return address | |
767 | b saveRestore32 ; restore MSR | |
768 | ||
769 | ||
770 | /* | |
771 | * ***************************** | |
772 | * * s a v e _ r e t _ p h y s * | |
773 | * ***************************** | |
774 | * | |
775 | * void save_ret_phys(reg64_t); | |
776 | * | |
777 | * Called from lowmem vectors to return (ie, free) a savearea by physical address. | |
778 | * Translation and interrupts are already off, and 64-bit mode is set if defined. | |
779 | * We can take _no_ exceptions of any kind in this code, including PTE miss, since | |
780 | * that would result in a deadlock. We expect: | |
781 | * r3 = phys addr of savearea | |
782 | * msr = IR, DR, and EE off, SF on | |
783 | * cr6 = pf64Bit flag | |
784 | * We destroy: | |
785 | * r0,r2-r10. | |
786 | */ | |
787 | .align 5 | |
788 | .globl EXT(save_ret_phys) | |
789 | ||
790 | LEXT(save_ret_phys) | |
791 | mfsprg r10,0 ; get the per-proc ptr | |
792 | bf-- pf64Bitb,saveRet32 ; handle 32-bit machine | |
793 | b saveRet64 ; handle 64-bit machine | |
794 | ||
795 | ||
796 | /* | |
797 | * ********************* | |
798 | * * s a v e R e t 6 4 * | |
799 | * ********************* | |
800 | * | |
801 | * This is the internal routine to free a savearea, passed by 64-bit physical | |
802 | * address. We assume that IR, DR, and EE are all off, that SF is on, and: | |
803 | * r3 = phys address of the savearea | |
804 | * r10 = per-proc ptr | |
805 | * We destroy: | |
806 | * r0,r2-r8. | |
807 | */ | |
808 | .align 5 | |
809 | saveRet64: | |
810 | li r0,SAVempty ; Get marker for free savearea | |
811 | lwz r7,lclfreecnt(r10) ; Get the local count | |
812 | ld r6,lclfree(r10) ; Get the old local header | |
813 | addi r7,r7,1 ; Pop up the free count | |
814 | std r6,SAVprev(r3) ; Plant free chain pointer | |
815 | cmplwi r7,LocalSaveMax ; Has the list gotten too long? | |
816 | stb r0,SAVflags+2(r3) ; Mark savearea free | |
817 | std r3,lclfree(r10) ; Chain us on in | |
818 | stw r7,lclfreecnt(r10) ; Bump up the count | |
819 | bltlr++ ; List not too long, so done | |
820 | ||
821 | /* The local savearea chain has gotten too long. Trim it down to the target. | |
822 | * Here's a tricky bit, and important: | |
823 | * | |
824 | * When we trim the list, we NEVER trim the very first one. This is because that is | |
825 | * the very last one released and the exception exit code will release the savearea | |
826 | * BEFORE it is done using it. Wouldn't be too good if another processor started | |
827 | * using it, eh? So for this case, we are safe so long as the savearea stays on | |
828 | * the local list. (Note: the exit routine needs to do this because it is in the | |
829 | * process of restoring all context and it needs to keep it until the last second.) | |
830 | */ | |
831 | ||
832 | mflr r0 ; save return to caller of saveRet64 | |
833 | mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed | |
834 | ld r3,SAVprev(r3) ; Skip over the first | |
835 | subi r7,r7,LocalSaveTarget ; Figure out how much to trim | |
836 | mr r6,r3 ; r6 <- first one to trim | |
837 | mr r5,r7 ; Save the number we are trimming | |
838 | ||
839 | saveRet64a: | |
840 | addic. r7,r7,-1 ; Any left to do? | |
841 | ble-- saveRet64b ; Nope... | |
842 | ld r3,SAVprev(r3) ; Skip to the next one | |
843 | b saveRet64a ; Keep going... | |
844 | ||
845 | saveRet64b: ; r3 <- last one to trim | |
846 | ld r7,SAVprev(r3) ; Point to the first one not to trim | |
847 | li r4,LocalSaveTarget ; Set the target count | |
848 | std r7,SAVprev(r2) ; Trim stuff leaving the one just released as first | |
849 | stw r4,lclfreecnt(r10) ; Set the current count | |
850 | ||
851 | bl savelock ; Lock up the anchor | |
852 | ||
853 | ld r8,SVfree(0) ; Get the old head of the free list | |
854 | lwz r4,SVfreecnt(0) ; Get the number of free ones | |
855 | lwz r7,SVinuse(0) ; Get the number that are in use | |
856 | std r6,SVfree(0) ; Point to the first trimmed savearea | |
857 | add r4,r4,r5 ; Add number trimmed to free count | |
858 | std r8,SAVprev(r3) ; Chain the old head to the tail of the trimmed guys | |
859 | sub r7,r7,r5 ; Remove the trims from the in use count | |
860 | stw r4,SVfreecnt(0) ; Set new free count | |
861 | stw r7,SVinuse(0) ; Set new in use count | |
862 | ||
863 | mtlr r0 ; Restore the return to our caller | |
864 | b saveunlock ; Set adjust count, unlock the saveanchor, and return | |
865 | ||
866 | ||
867 | /* | |
868 | * ********************* | |
869 | * * s a v e R e t 3 2 * | |
870 | * ********************* | |
871 | * | |
872 | * This is the internal routine to free a savearea, passed by 32-bit physical | |
873 | * address. We assume that IR, DR, and EE are all off, and: | |
874 | * r3 = phys address of the savearea | |
875 | * r10 = per-proc ptr | |
876 | * We destroy: | |
877 | * r0,r2-r8. | |
878 | */ | |
879 | .align 5 | |
880 | saveRet32: | |
881 | li r0,SAVempty ; Get marker for free savearea | |
882 | lwz r7,lclfreecnt(r10) ; Get the local count | |
883 | lwz r6,lclfree+4(r10) ; Get the old local header | |
884 | addi r7,r7,1 ; Pop up the free count | |
885 | stw r6,SAVprev+4(r3) ; Plant free chain pointer | |
886 | cmplwi r7,LocalSaveMax ; Has the list gotten too long? | |
887 | stb r0,SAVflags+2(r3) ; Mark savearea free | |
888 | stw r3,lclfree+4(r10) ; Chain us on in | |
889 | stw r7,lclfreecnt(r10) ; Bump up the count | |
890 | bltlr+ ; List not too long, so done | |
891 | ||
892 | /* The local savearea chain has gotten too long. Trim it down to the target. | |
893 | * Here's a tricky bit, and important: | |
894 | * | |
895 | * When we trim the list, we NEVER trim the very first one. This is because that is | |
896 | * the very last one released and the exception exit code will release the savearea | |
897 | * BEFORE it is done using it. Wouldn't be too good if another processor started | |
898 | * using it, eh? So for this case, we are safe so long as the savearea stays on | |
899 | * the local list. (Note: the exit routine needs to do this because it is in the | |
900 | * process of restoring all context and it needs to keep it until the last second.) | |
901 | */ | |
902 | ||
903 | mflr r0 ; save return to caller of saveRet32 | |
904 | mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed | |
905 | lwz r3,SAVprev+4(r3) ; Skip over the first | |
906 | subi r7,r7,LocalSaveTarget ; Figure out how much to trim | |
907 | mr r6,r3 ; r6 <- first one to trim | |
908 | mr r5,r7 ; Save the number we are trimming | |
909 | ||
910 | saveRet32a: | |
911 | addic. r7,r7,-1 ; Any left to do? | |
912 | ble- saveRet32b ; Nope... | |
913 | lwz r3,SAVprev+4(r3) ; Skip to the next one | |
914 | b saveRet32a ; Keep going... | |
915 | ||
916 | saveRet32b: ; r3 <- last one to trim | |
917 | lwz r7,SAVprev+4(r3) ; Point to the first one not to trim | |
918 | li r4,LocalSaveTarget ; Set the target count | |
919 | stw r7,SAVprev+4(r2) ; Trim stuff leaving the one just released as first | |
920 | stw r4,lclfreecnt(r10) ; Set the current count | |
921 | ||
922 | bl savelock ; Lock up the anchor | |
923 | ||
924 | lwz r8,SVfree+4(0) ; Get the old head of the free list | |
925 | lwz r4,SVfreecnt(0) ; Get the number of free ones | |
926 | lwz r7,SVinuse(0) ; Get the number that are in use | |
927 | stw r6,SVfree+4(0) ; Point to the first trimmed savearea | |
928 | add r4,r4,r5 ; Add number trimmed to free count | |
929 | stw r8,SAVprev+4(r3) ; Chain the old head to the tail of the trimmed guys | |
930 | sub r7,r7,r5 ; Remove the trims from the in use count | |
931 | stw r4,SVfreecnt(0) ; Set new free count | |
932 | stw r7,SVinuse(0) ; Set new in use count | |
933 | ||
934 | mtlr r0 ; Restore the return to our caller | |
935 | b saveunlock ; Set adjust count, unlock the saveanchor, and return | |
936 | ||
937 | ||
938 | /* | |
939 | * ******************************* | |
940 | * * s a v e _ t r i m _ f r e e * | |
941 | * ******************************* | |
942 | * | |
943 | * struct savearea_comm *save_trim_free(void); | |
944 | * | |
945 | * Trim the free list down to the target count, ie by -(SVadjust) save areas. | |
946 | * It trims the list and, if a pool page was fully allocated, puts that page on | |
947 | * the start of the pool list. | |
948 | * | |
949 | * If the savearea being released is the last on a pool page (i.e., all entries | |
950 | * are released), the page is dequeued from the pool and queued to any other | |
951 | * found during this scan. Note that this queue is maintained virtually. | |
952 | * | |
953 | * When the scan is done, the saveanchor lock is released and the list of | |
954 | * freed pool pages is returned to our caller. | |
955 | * | |
956 | * For latency sake we may want to revisit this code. If we are trimming a | |
957 | * large number of saveareas, we could be disabled and holding the savearea lock | |
958 | * for quite a while. It may be that we want to break the trim down into parts. | |
959 | * Possibly trimming the free list, then individually pushing them into the free pool. | |
960 | * | |
961 | * This function expects to be called with translation on and a valid stack. | |
962 | * It uses the standard ABI, ie we destroy r2 and r3-r11, and return the ptr in r3. | |
963 | */ | |
964 | .align 5 | |
965 | .globl EXT(save_trim_free) | |
966 | ||
967 | LEXT(save_trim_free) | |
968 | ||
969 | subi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Make space for 4 registers on stack | |
970 | mflr r9 ; save our return address | |
971 | stw r28,FM_SIZE+0(r1) ; Save R28 | |
972 | stw r29,FM_SIZE+4(r1) ; Save R29 | |
973 | stw r30,FM_SIZE+8(r1) ; Save R30 | |
974 | stw r31,FM_SIZE+12(r1) ; Save R31 | |
975 | ||
976 | bl saveSetup ; turn off translation and interrupts, load many regs | |
977 | bl savelock ; Go lock up the anchor | |
978 | ||
979 | lwz r8,SVadjust(0) ; How many do we need to clear out? | |
980 | li r3,0 ; Get a 0 | |
981 | neg. r8,r8 ; Get the actual we need to toss (adjust is neg if too many) | |
982 | ble- save_trim_free1 ; skip if no trimming needed anymore | |
983 | bf-- pf64Bitb,saveTrim32 ; handle 32-bit processors | |
984 | b saveTrim64 ; handle 64-bit processors | |
985 | ||
986 | save_trim_free1: ; by the time we were called, no need to trim anymore | |
987 | stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed) | |
988 | mtlr r9 ; Restore return | |
989 | ||
990 | #if FPVECDBG | |
991 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
992 | li r2,0x2206 ; (TEST/DEBUG) | |
993 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
994 | sc ; (TEST/DEBUG) | |
995 | #endif | |
996 | addi r1,r1,(FM_ALIGN(16)+FM_SIZE); Pop stack - have not trashed register so no need to reload | |
997 | b saveRestore ; restore translation and EE, turn SF off, return to our caller | |
998 | ||
999 | ||
1000 | /* | |
1001 | * *********************** | |
1002 | * * s a v e T r i m 3 2 * | |
1003 | * *********************** | |
1004 | * | |
1005 | * Handle "save_trim_free" on 32-bit processors. At this point, translation and interrupts | |
1006 | * are off, the savearea anchor is locked, and: | |
1007 | * r8 = #pages to trim (>0) | |
1008 | * r9 = return address | |
1009 | * r10 = per-proc ptr | |
1010 | * r11 = MSR at entry | |
1011 | */ | |
1012 | ||
1013 | saveTrim32: | |
1014 | lwz r7,SVfree+4(0) ; Get the first on the free list | |
1015 | mr r6,r7 ; Save the first one | |
1016 | mr r5,r8 ; Save the number we are trimming | |
1017 | ||
1018 | sttrimming: addic. r5,r5,-1 ; Any left to do? | |
1019 | ble- sttrimmed ; Nope... | |
1020 | lwz r7,SAVprev+4(r7) ; Skip to the next one | |
1021 | b sttrimming ; Keep going... | |
1022 | ||
1023 | sttrimmed: lwz r5,SAVprev+4(r7) ; Get the next one (for new head of free list) | |
1024 | lwz r4,SVfreecnt(0) ; Get the free count | |
1025 | stw r5,SVfree+4(0) ; Set new head | |
1026 | sub r4,r4,r8 ; Calculate the new free count | |
1027 | li r31,0 ; Show we have no free pool blocks yet | |
1028 | crclr cr1_eq ; dont exit loop before 1st iteration | |
1029 | stw r4,SVfreecnt(0) ; Set new free count | |
1030 | lis r30,hi16(sac_empty) ; Get what empty looks like | |
1031 | ||
1032 | ; NOTE: The savearea size must be 640 (0x280). We are doing a divide by shifts and stuff | |
1033 | ; here. | |
1034 | ; | |
1035 | #if SAVsize != 640 | |
1036 | #error Savearea size is not 640!!!!!!!!!!!! | |
1037 | #endif | |
1038 | ||
1039 | ; Loop over each savearea we are trimming. | |
1040 | ; r6 = next savearea to trim | |
1041 | ; r7 = last savearea to trim | |
1042 | ; r8 = #pages to trim (>0) | |
1043 | ; r9 = return address | |
1044 | ; r10 = per-proc ptr | |
1045 | ; r11 = MSR at entry | |
1046 | ; r30 = what SACalloc looks like when all saveareas are free | |
1047 | ; r31 = free pool block list | |
1048 | ; cr1 = beq set if we just trimmed the last, ie if we are done | |
1049 | ||
1050 | sttoss: beq+ cr1,stdone ; All done now... | |
1051 | ||
1052 | cmplw cr1,r6,r7 ; Have we finished the loop? | |
1053 | ||
1054 | lis r0,0x0044 ; Get top of table | |
1055 | rlwinm r2,r6,0,0,19 ; Back down to the savearea control stuff | |
1056 | ori r0,r0,0x2200 ; Finish shift table | |
1057 | rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble) | |
1058 | lwz r5,SACalloc(r2) ; Get the allocation bits | |
1059 | addi r4,r4,1 ; Shift 1 extra | |
1060 | rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1 | |
1061 | rlwnm r0,r0,r4,29,31 ; Get partial index | |
1062 | lis r4,lo16(0x8000) ; Get the bit mask | |
1063 | add r0,r0,r3 ; Make the real index | |
1064 | srw r4,r4,r0 ; Get the allocation mask | |
1065 | or r5,r5,r4 ; Free this entry | |
1066 | cmplw r5,r4 ; Is this the only free entry? | |
1067 | lwz r6,SAVprev+4(r6) ; Chain to the next trimmed savearea | |
1068 | cmplw cr7,r30,r5 ; Does this look empty? | |
1069 | stw r5,SACalloc(r2) ; Save back the allocation bits | |
1070 | beq- stputpool ; First free entry, go put it into the pool... | |
1071 | bne+ cr7,sttoss ; Not an empty block | |
1072 | ||
1073 | ; | |
1074 | ; We have an empty block. Remove it from the pool list. | |
1075 | ; | |
1076 | ||
1077 | lwz r29,SACflags(r2) ; Get the flags | |
1078 | cmplwi cr5,r31,0 ; Is this guy on the release list? | |
1079 | lwz r28,SACnext+4(r2) ; Get the forward chain | |
1080 | ||
1081 | rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below) | |
1082 | bne- sttoss ; This is permanent entry, do not try to release... | |
1083 | ||
1084 | lwz r29,SACprev+4(r2) ; and the previous | |
1085 | beq- cr5,stnot1st ; Not first | |
1086 | lwz r0,SACvrswap+4(r31) ; Load the previous pool page vr conversion | |
1087 | ||
1088 | stnot1st: stw r28,SACnext+4(r29) ; Previous guy points to my next | |
1089 | xor r0,r0,r31 ; Make the last guy virtual | |
1090 | stw r29,SACprev+4(r28) ; Next guy points back to my previous | |
1091 | stw r0,SAVprev+4(r2) ; Store the old top virtual as my back chain | |
1092 | mr r31,r2 ; My physical is now the head of the chain | |
1093 | b sttoss ; Get the next one... | |
1094 | ||
1095 | ; | |
1096 | ; A pool block that had no free entries now has one. Stick it on the pool list. | |
1097 | ; | |
1098 | ||
1099 | stputpool: lwz r28,SVpoolfwd+4(0) ; Get the first guy on the list | |
1100 | li r0,saveanchor ; Point to the saveanchor | |
1101 | stw r2,SVpoolfwd+4(0) ; Put us on the top of the list | |
1102 | stw r28,SACnext+4(r2) ; We point to the old top | |
1103 | stw r2,SACprev+4(r28) ; Old top guy points back to us | |
1104 | stw r0,SACprev+4(r2) ; Our back points to the anchor | |
1105 | b sttoss ; Go on to the next one... | |
1106 | ||
1107 | ||
1108 | /* | |
1109 | * *********************** | |
1110 | * * s a v e T r i m 6 4 * | |
1111 | * *********************** | |
1112 | * | |
1113 | * Handle "save_trim_free" on 64-bit processors. At this point, translation and interrupts | |
1114 | * are off, SF is on, the savearea anchor is locked, and: | |
1115 | * r8 = #pages to trim (>0) | |
1116 | * r9 = return address | |
1117 | * r10 = per-proc ptr | |
1118 | * r11 = MSR at entry | |
1119 | */ | |
1120 | ||
1121 | saveTrim64: | |
1122 | ld r7,SVfree(0) ; Get the first on the free list | |
1123 | mr r6,r7 ; Save the first one | |
1124 | mr r5,r8 ; Save the number we are trimming | |
1125 | ||
1126 | sttrimming64: | |
1127 | addic. r5,r5,-1 ; Any left to do? | |
1128 | ble-- sttrimmed64 ; Nope... | |
1129 | ld r7,SAVprev(r7) ; Skip to the next one | |
1130 | b sttrimming64 ; Keep going... | |
1131 | ||
1132 | sttrimmed64: | |
1133 | ld r5,SAVprev(r7) ; Get the next one (for new head of free list) | |
1134 | lwz r4,SVfreecnt(0) ; Get the free count | |
1135 | std r5,SVfree(0) ; Set new head | |
1136 | sub r4,r4,r8 ; Calculate the new free count | |
1137 | li r31,0 ; Show we have no free pool blocks yet | |
1138 | crclr cr1_eq ; dont exit loop before 1st iteration | |
1139 | stw r4,SVfreecnt(0) ; Set new free count | |
1140 | lis r30,hi16(sac_empty) ; Get what empty looks like | |
1141 | ||
1142 | ||
1143 | ; Loop over each savearea we are trimming. | |
1144 | ; r6 = next savearea to trim | |
1145 | ; r7 = last savearea to trim | |
1146 | ; r8 = #pages to trim (>0) | |
1147 | ; r9 = return address | |
1148 | ; r10 = per-proc ptr | |
1149 | ; r11 = MSR at entry | |
1150 | ; r30 = what SACalloc looks like when all saveareas are free | |
1151 | ; r31 = free pool block list | |
1152 | ; cr1 = beq set if we just trimmed the last, ie if we are done | |
1153 | ; | |
1154 | ; WARNING: as in the 32-bit path, this code is doing a divide by 640 (SAVsize). | |
1155 | ||
1156 | sttoss64: | |
1157 | beq++ cr1,stdone ; All done now... | |
1158 | ||
1159 | cmpld cr1,r6,r7 ; Have we finished the loop? | |
1160 | ||
1161 | lis r0,0x0044 ; Get top of table | |
1162 | rldicr r2,r6,0,51 ; r2 <- phys addr of savearea block (with control area) | |
1163 | ori r0,r0,0x2200 ; Finish shift table | |
1164 | rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble) | |
1165 | lwz r5,SACalloc(r2) ; Get the allocation bits | |
1166 | addi r4,r4,1 ; Shift 1 extra | |
1167 | rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1 | |
1168 | rlwnm r0,r0,r4,29,31 ; Get partial index | |
1169 | lis r4,lo16(0x8000) ; Get the bit mask | |
1170 | add r0,r0,r3 ; Make the real index | |
1171 | srw r4,r4,r0 ; Get the allocation mask | |
1172 | or r5,r5,r4 ; Free this entry | |
1173 | cmplw r5,r4 ; Is this the only free entry? | |
1174 | ld r6,SAVprev(r6) ; Chain to the next trimmed savearea | |
1175 | cmplw cr7,r30,r5 ; Does this look empty? | |
1176 | stw r5,SACalloc(r2) ; Save back the allocation bits | |
1177 | beq-- stputpool64 ; First free entry, go put it into the pool... | |
1178 | bne++ cr7,sttoss64 ; Not an empty block | |
1179 | ||
1180 | ; We have an empty block. Remove it from the pool list. | |
1181 | ||
1182 | lwz r29,SACflags(r2) ; Get the flags | |
1183 | cmpldi cr5,r31,0 ; Is this guy on the release list? | |
1184 | ld r28,SACnext(r2) ; Get the forward chain | |
1185 | ||
1186 | rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below) | |
1187 | bne-- sttoss64 ; This is permanent entry, do not try to release... | |
1188 | ||
1189 | ld r29,SACprev(r2) ; and the previous | |
1190 | beq-- cr5,stnot1st64 ; Not first | |
1191 | ld r0,SACvrswap(r31) ; Load the previous pool page vr conversion | |
1192 | ||
1193 | stnot1st64: | |
1194 | std r28,SACnext(r29) ; Previous guy points to my next | |
1195 | xor r0,r0,r31 ; Make the last guy virtual | |
1196 | std r29,SACprev(r28) ; Next guy points back to my previous | |
1197 | std r0,SAVprev(r2) ; Store the old top virtual as my back chain | |
1198 | mr r31,r2 ; My physical is now the head of the chain | |
1199 | b sttoss64 ; Get the next one... | |
1200 | ||
1201 | ; A pool block that had no free entries now has one. Stick it on the pool list. | |
1202 | ||
1203 | stputpool64: | |
1204 | ld r28,SVpoolfwd(0) ; Get the first guy on the list | |
1205 | li r0,saveanchor ; Point to the saveanchor | |
1206 | std r2,SVpoolfwd(0) ; Put us on the top of the list | |
1207 | std r28,SACnext(r2) ; We point to the old top | |
1208 | std r2,SACprev(r28) ; Old top guy points back to us | |
1209 | std r0,SACprev(r2) ; Our back points to the anchor | |
1210 | b sttoss64 ; Go on to the next one... | |
1211 | ||
1212 | ||
1213 | ; We are all done. Relocate pool release head, restore all, and go. This code | |
1214 | ; is used both by the 32 and 64-bit paths. | |
1215 | ; r9 = return address | |
1216 | ; r10 = per-proc ptr | |
1217 | ; r11 = MSR at entry | |
1218 | ; r31 = free pool block list | |
1219 | ||
1220 | stdone: bl saveunlock ; Unlock the saveanchor and set adjust field | |
1221 | ||
1222 | mr. r3,r31 ; Move release chain and see if there are any | |
1223 | li r5,0 ; Assume either V=R or no release chain | |
1224 | beq- stnorel ; Nothing to release... | |
1225 | lwz r5,SACvrswap+4(r31) ; Get the vr conversion (only need low half if 64-bit) | |
1226 | ||
1227 | stnorel: | |
1228 | bl saveRestore ; restore translation and exceptions, turn off SF | |
1229 | mtlr r9 ; Restore the return | |
1230 | ||
1231 | lwz r28,FM_SIZE+0(r1) ; Restore R28 | |
1232 | lwz r29,FM_SIZE+4(r1) ; Restore R29 | |
1233 | lwz r30,FM_SIZE+8(r1) ; Restore R30 | |
1234 | lwz r31,FM_SIZE+12(r1) ; Restore R31 | |
1235 | addi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Pop the stack | |
1236 | xor r3,r3,r5 ; Convert release chain address to virtual | |
1237 | rlwinm r3,r3,0,0,31 ; if 64-bit, clear upper half of virtual address | |
1238 | ||
1239 | #if FPVECDBG | |
1240 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1241 | li r2,0x2207 ; (TEST/DEBUG) | |
1242 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1243 | sc ; (TEST/DEBUG) | |
1244 | #endif | |
1245 | blr ; Return... | |
1246 | ||
1247 | ||
1248 | /* | |
1249 | * *************************** | |
1250 | * * s a v e _ r e c o v e r * | |
1251 | * *************************** | |
1252 | * | |
1253 | * int save_recover(void); | |
1254 | * | |
1255 | * Returns nonzero if we can get enough saveareas to hit the target. We scan the free | |
1256 | * pool. If we empty a pool block, we remove it from the pool list. | |
1257 | */ | |
1258 | ||
1259 | .align 5 | |
1260 | .globl EXT(save_recover) | |
1261 | ||
1262 | LEXT(save_recover) | |
1263 | mflr r9 ; save return address | |
1264 | bl saveSetup ; turn translation and interrupts off, SF on, load many regs | |
1265 | bl savelock ; lock the savearea anchor | |
1266 | ||
1267 | lwz r8,SVadjust(0) ; How many do we need to clear get? | |
1268 | li r3,0 ; Get a 0 | |
1269 | mr. r8,r8 ; Do we need any? | |
1270 | ble-- save_recover1 ; not any more | |
1271 | bf-- pf64Bitb,saveRecover32 ; handle 32-bit processor | |
1272 | b saveRecover64 ; handle 64-bit processor | |
1273 | ||
1274 | save_recover1: ; by the time we locked the anchor, no longer short | |
1275 | mtlr r9 ; Restore return | |
1276 | stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed) | |
1277 | #if FPVECDBG | |
1278 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1279 | li r2,0x2208 ; (TEST/DEBUG) | |
1280 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1281 | sc ; (TEST/DEBUG) | |
1282 | #endif | |
1283 | b saveRestore ; turn translation etc back on, return to our caller | |
1284 | ||
1285 | ||
1286 | /* | |
1287 | * ***************************** | |
1288 | * * s a v e R e c o v e r 3 2 * | |
1289 | * ***************************** | |
1290 | * | |
1291 | * Handle "save_recover" on 32-bit processors. At this point, translation and interrupts | |
1292 | * are off, the savearea anchor is locked, and: | |
1293 | * r8 = #pages to recover | |
1294 | * r9 = return address | |
1295 | * r10 = per-proc ptr | |
1296 | * r11 = MSR at entry | |
1297 | */ | |
1298 | ||
1299 | saveRecover32: | |
1300 | li r6,saveanchor ; Start at pool anchor | |
1301 | crclr cr1_eq ; initialize the loop test | |
1302 | lwz r7,SVfreecnt(0) ; Get the current free count | |
1303 | ||
1304 | ||
1305 | ; Loop over next block in free pool. r6 is the ptr to the last block we looked at. | |
1306 | ||
1307 | srcnpool: lwz r6,SACnext+4(r6) ; Point to the next one | |
1308 | cmplwi r6,saveanchor ; Have we wrapped? | |
1309 | beq- srcdone ; Yes, did not have enough... | |
1310 | ||
1311 | lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block | |
1312 | ||
1313 | ; | |
1314 | ; NOTE: The savearea size must be 640 (0x280). We are doing a multiply by shifts and add. | |
1315 | ; offset = (index << 9) + (index << 7) | |
1316 | ; | |
1317 | #if SAVsize != 640 | |
1318 | #error Savearea size is not 640!!!!!!!!!!!! | |
1319 | #endif | |
1320 | ||
1321 | ; Loop over free savearea in current block. | |
1322 | ; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc) | |
1323 | ; r6 = ptr to current free pool block | |
1324 | ; r7 = free count | |
1325 | ; r8 = #pages more we still need to recover | |
1326 | ; r9 = return address | |
1327 | ; r10 = per-proc ptr | |
1328 | ; r11 = MSR at entry | |
1329 | ; cr1 = beq if (r8==0) | |
1330 | ||
1331 | srcnext: beq- cr1,srcdone ; We have no more to get... | |
1332 | ||
1333 | lis r3,0x8000 ; Get the top bit on | |
1334 | cntlzw r4,r5 ; Find a free slot | |
1335 | addi r7,r7,1 ; Bump up the free count | |
1336 | srw r3,r3,r4 ; Make a mask | |
1337 | slwi r0,r4,7 ; First multiply by 128 | |
1338 | subi r8,r8,1 ; Decrement the need count | |
1339 | slwi r2,r4,9 ; Then multiply by 512 | |
1340 | andc. r5,r5,r3 ; Clear out the "free" bit | |
1341 | add r2,r2,r0 ; Sum to multiply by 640 | |
1342 | ||
1343 | stw r5,SACalloc(r6) ; Set new allocation bits | |
1344 | ||
1345 | add r2,r2,r6 ; Get the actual address of the savearea | |
1346 | lwz r3,SVfree+4(0) ; Get the head of the chain | |
1347 | cmplwi cr1,r8,0 ; Do we actually need any more? | |
1348 | stw r2,SVfree+4(0) ; Push ourselves in the front | |
1349 | stw r3,SAVprev+4(r2) ; Chain the rest of the list behind | |
1350 | ||
1351 | bne+ srcnext ; The pool block is not empty yet, try for another... | |
1352 | ||
1353 | lwz r2,SACnext+4(r6) ; Get the next pointer | |
1354 | lwz r3,SACprev+4(r6) ; Get the previous pointer | |
1355 | stw r3,SACprev+4(r2) ; The previous of my next points to my previous | |
1356 | stw r2,SACnext+4(r3) ; The next of my previous points to my next | |
1357 | bne+ cr1,srcnpool ; We still have more to do... | |
1358 | ||
1359 | ||
1360 | ; Join here from 64-bit path when we have recovered all the saveareas we need to. | |
1361 | ||
1362 | srcdone: stw r7,SVfreecnt(0) ; Set the new free count | |
1363 | bl saveunlock ; Unlock the save and set adjust field | |
1364 | ||
1365 | mtlr r9 ; Restore the return | |
1366 | #if FPVECDBG | |
1367 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1368 | li r2,0x2209 ; (TEST/DEBUG) | |
1369 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1370 | sc ; (TEST/DEBUG) | |
1371 | #endif | |
1372 | b saveRestore ; turn xlate and EE back on, SF off, and return to our caller | |
1373 | ||
1374 | ||
1375 | /* | |
1376 | * ***************************** | |
1377 | * * s a v e R e c o v e r 6 4 * | |
1378 | * ***************************** | |
1379 | * | |
1380 | * Handle "save_recover" on 64-bit processors. At this point, translation and interrupts | |
1381 | * are off, the savearea anchor is locked, and: | |
1382 | * r8 = #pages to recover | |
1383 | * r9 = return address | |
1384 | * r10 = per-proc ptr | |
1385 | * r11 = MSR at entry | |
1386 | */ | |
1387 | ||
1388 | saveRecover64: | |
1389 | li r6,saveanchor ; Start at pool anchor | |
1390 | crclr cr1_eq ; initialize the loop test | |
1391 | lwz r7,SVfreecnt(0) ; Get the current free count | |
1392 | ||
1393 | ||
1394 | ; Loop over next block in free pool. r6 is the ptr to the last block we looked at. | |
1395 | ||
1396 | srcnpool64: | |
1397 | ld r6,SACnext(r6) ; Point to the next one | |
1398 | cmpldi r6,saveanchor ; Have we wrapped? | |
1399 | beq-- srcdone ; Yes, did not have enough... | |
1400 | ||
1401 | lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block | |
1402 | ||
1403 | ||
1404 | ; Loop over free savearea in current block. | |
1405 | ; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc) | |
1406 | ; r6 = ptr to current free pool block | |
1407 | ; r7 = free count | |
1408 | ; r8 = #pages more we still need to recover | |
1409 | ; r9 = return address | |
1410 | ; r10 = per-proc ptr | |
1411 | ; r11 = MSR at entry | |
1412 | ; cr1 = beq if (r8==0) | |
1413 | ; | |
1414 | ; WARNING: as in the 32-bit path, we depend on (SAVsize==640) | |
1415 | ||
1416 | srcnext64: | |
1417 | beq-- cr1,srcdone ; We have no more to get... | |
1418 | ||
1419 | lis r3,0x8000 ; Get the top bit on | |
1420 | cntlzw r4,r5 ; Find a free slot | |
1421 | addi r7,r7,1 ; Bump up the free count | |
1422 | srw r3,r3,r4 ; Make a mask | |
1423 | slwi r0,r4,7 ; First multiply by 128 | |
1424 | subi r8,r8,1 ; Decrement the need count | |
1425 | slwi r2,r4,9 ; Then multiply by 512 | |
1426 | andc. r5,r5,r3 ; Clear out the "free" bit | |
1427 | add r2,r2,r0 ; Sum to multiply by 640 | |
1428 | ||
1429 | stw r5,SACalloc(r6) ; Set new allocation bits | |
1430 | ||
1431 | add r2,r2,r6 ; Get the actual address of the savearea | |
1432 | ld r3,SVfree(0) ; Get the head of the chain | |
1433 | cmplwi cr1,r8,0 ; Do we actually need any more? | |
1434 | std r2,SVfree(0) ; Push ourselves in the front | |
1435 | std r3,SAVprev(r2) ; Chain the rest of the list behind | |
1436 | ||
1437 | bne++ srcnext64 ; The pool block is not empty yet, try for another... | |
1438 | ||
1439 | ld r2,SACnext(r6) ; Get the next pointer | |
1440 | ld r3,SACprev(r6) ; Get the previous pointer | |
1441 | std r3,SACprev(r2) ; The previous of my next points to my previous | |
1442 | std r2,SACnext(r3) ; The next of my previous points to my next | |
1443 | bne++ cr1,srcnpool64 ; We still have more to do... | |
1444 | ||
1445 | b srcdone | |
1446 | ||
1447 | ||
1448 | /* | |
1449 | * ******************* | |
1450 | * * s a v e l o c k * | |
1451 | * ******************* | |
1452 | * | |
1453 | * Lock the savearea anchor, so we can manipulate the free list. | |
1454 | * msr = interrupts and translation off | |
1455 | * We destroy: | |
1456 | * r8, r3, r12 | |
1457 | */ | |
1458 | .align 5 | |
1459 | ||
1460 | savelock: lwz r8,SVlock(0) ; See if lock is held | |
1461 | cmpwi r8,0 | |
1462 | li r12,saveanchor ; Point to the saveanchor | |
1463 | bne-- savelock ; loop until lock released... | |
1464 | ||
1465 | savelock0: lwarx r8,0,r12 ; Grab the lock value | |
1466 | cmpwi r8,0 ; taken? | |
1467 | li r8,1 ; get nonzero to lock it with | |
1468 | bne-- savelock1 ; already locked, wait for it to clear... | |
1469 | stwcx. r8,0,r12 ; Try to seize that there durn lock | |
1470 | isync ; assume we got it | |
1471 | beqlr++ ; reservation not lost, so we have the lock | |
1472 | b savelock0 ; Try again... | |
1473 | ||
1474 | savelock1: li r8,lgKillResv ; Point to killing field | |
1475 | stwcx. r8,0,r8 ; Kill reservation | |
1476 | b savelock ; Start over.... | |
1477 | ||
1478 | ||
1479 | /* | |
1480 | * *********************** | |
1481 | * * s a v e u n l o c k * | |
1482 | * *********************** | |
1483 | * | |
1484 | * | |
1485 | * This is the common routine that sets the saveadjust field and unlocks the savearea | |
1486 | * anchor. | |
1487 | * msr = interrupts and translation off | |
1488 | * We destroy: | |
1489 | * r2, r5, r6, r8. | |
1490 | */ | |
1491 | .align 5 | |
1492 | saveunlock: | |
1493 | lwz r6,SVfreecnt(0) ; and the number on the free list | |
1494 | lwz r5,SVinuse(0) ; Pick up the in use count | |
1495 | subic. r8,r6,FreeListMin ; do we have at least the minimum? | |
1496 | lwz r2,SVtarget(0) ; Get the target | |
1497 | neg r8,r8 ; assuming we are short, get r8 <- shortfall | |
1498 | blt-- saveunlock1 ; skip if fewer than minimum on free list | |
1499 | ||
1500 | add r6,r6,r5 ; Get the total number of saveareas | |
1501 | addi r5,r2,-SaveLowHysteresis ; Find low end of acceptible range | |
1502 | sub r5,r6,r5 ; Make everything below hysteresis negative | |
1503 | sub r2,r2,r6 ; Get the distance from the target | |
1504 | addi r5,r5,-(SaveLowHysteresis + SaveHighHysteresis + 1) ; Subtract full hysteresis range | |
1505 | srawi r5,r5,31 ; Get 0xFFFFFFFF if outside range or 0 if inside | |
1506 | and r8,r2,r5 ; r8 <- 0 if in range or distance to target if not | |
1507 | ||
1508 | saveunlock1: | |
1509 | li r5,0 ; Set a clear value | |
1510 | stw r8,SVadjust(0) ; Set the adjustment value | |
1511 | eieio ; Make sure everything is done | |
1512 | stw r5,SVlock(0) ; Unlock the savearea chain | |
1513 | blr | |
1514 | ||
1515 | ||
1516 | /* | |
1517 | * ******************* | |
1518 | * * s a v e _ c p v * | |
1519 | * ******************* | |
1520 | * | |
1521 | * struct savearea *save_cpv(addr64_t saveAreaPhysAddr); | |
1522 | * | |
1523 | * Converts a physical savearea address to virtual. Called with translation on | |
1524 | * and in 32-bit mode. Note that the argument is passed as a long long in (r3,r4). | |
1525 | */ | |
1526 | ||
1527 | .align 5 | |
1528 | .globl EXT(save_cpv) | |
1529 | ||
1530 | LEXT(save_cpv) | |
1531 | mflr r9 ; save return address | |
1532 | mr r8,r3 ; save upper half of phys address here | |
1533 | bl saveSetup ; turn off translation and interrupts, turn SF on | |
1534 | rlwinm r5,r4,0,0,19 ; Round back to the start of the physical savearea block | |
1535 | bf-- pf64Bitb,save_cpv1 ; skip if 32-bit processor | |
1536 | rldimi r5,r8,32,0 ; r5 <- 64-bit phys address of block | |
1537 | save_cpv1: | |
1538 | lwz r6,SACvrswap+4(r5) ; Get the conversion to virtual (only need low half if 64-bit) | |
1539 | mtlr r9 ; restore return address | |
1540 | xor r3,r4,r6 ; convert phys to virtual | |
1541 | rlwinm r3,r3,0,0,31 ; if 64-bit, zero upper half of virtual address | |
1542 | b saveRestore ; turn translation etc back on, SF off, and return r3 | |
1543 | ||
1544 | ||
1545 | /* | |
1546 | * ********************* | |
1547 | * * s a v e S e t u p * | |
1548 | * ********************* | |
1549 | * | |
1550 | * This routine is called at the start of all the save-area subroutines. | |
1551 | * It turns off translation, disabled interrupts, turns on 64-bit mode, | |
1552 | * and sets up cr6 with the feature flags (especially pf64Bit). | |
1553 | * | |
1554 | * Note that most save-area routines cannot take _any_ interrupt (such as a | |
1555 | * PTE miss) once the savearea anchor is locked, since that would result in | |
1556 | * instant deadlock as we need a save-area to process any exception. | |
1557 | * We set up: | |
1558 | * r10 = per-proc ptr | |
1559 | * r11 = old MSR | |
1560 | * cr5 = pfNoMSRir feature flag | |
1561 | * cr6 = pf64Bit feature flag | |
1562 | * | |
1563 | * We use r0, r3, r10, and r11. | |
1564 | */ | |
1565 | ||
1566 | saveSetup: | |
1567 | mfmsr r11 ; get msr | |
1568 | mfsprg r3,2 ; get feature flags | |
1569 | li r0,0 | |
1570 | mtcrf 0x2,r3 ; copy pf64Bit to cr6 | |
1571 | ori r0,r0,lo16(MASK(MSR_IR)+MASK(MSR_DR)+MASK(MSR_EE)) | |
1572 | mtcrf 0x4,r3 ; copy pfNoMSRir to cr5 | |
1573 | andc r3,r11,r0 ; turn off IR, DR, and EE | |
1574 | li r0,1 ; get a 1 in case its a 64-bit machine | |
1575 | bf-- pf64Bitb,saveSetup1 ; skip if not a 64-bit machine | |
1576 | rldimi r3,r0,63,MSR_SF_BIT ; turn SF (bit 0) on | |
1577 | mtmsrd r3 ; turn translation and interrupts off, 64-bit mode on | |
1578 | isync ; wait for it to happen | |
1579 | mfsprg r10,0 ; get per-proc ptr | |
1580 | blr | |
1581 | saveSetup1: ; here on 32-bit machines | |
1582 | bt- pfNoMSRirb,saveSetup2 ; skip if cannot turn off IR with a mtmsr | |
1583 | mtmsr r3 ; turn translation and interrupts off | |
1584 | isync ; wait for it to happen | |
1585 | mfsprg r10,0 ; get per-proc ptr | |
1586 | blr | |
1587 | saveSetup2: ; here if pfNoMSRir set for this machine | |
1588 | li r0,loadMSR ; we will "mtmsr r3" via system call | |
1589 | sc | |
1590 | mfsprg r10,0 ; get per-proc ptr | |
1591 | blr | |
1592 | ||
1593 | ||
1594 | /* | |
1595 | * ************************* | |
1596 | * * s a v e R e s t o r e * | |
1597 | * ************************* | |
1598 | * | |
1599 | * Undoes the effect of calling "saveSetup", ie it turns relocation and interrupts back on, | |
1600 | * and turns 64-bit mode back off. | |
1601 | * r11 = old MSR | |
1602 | * cr6 = pf64Bit feature flag | |
1603 | */ | |
1604 | ||
1605 | saveRestore: | |
1606 | bt++ pf64Bitb,saveRestore64 ; handle a 64-bit processor | |
1607 | saveRestore32: | |
1608 | mtmsr r11 ; restore MSR | |
1609 | isync ; wait for translation to start up | |
1610 | blr | |
1611 | saveRestore64: ; 64-bit processor | |
1612 | mtmsrd r11 ; restore MSR | |
1613 | isync ; wait for changes to happen | |
1614 | blr | |
1615 |