]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/savearea_asm.s
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / ppc / savearea_asm.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b 29 */
9bccf70c
A
30
31#define FPVECDBG 0
32
1c79356b
A
33#include <assym.s>
34#include <debug.h>
1c79356b
A
35#include <db_machine_commands.h>
36#include <mach_rt.h>
37
38#include <mach_debug.h>
39#include <ppc/asm.h>
40#include <ppc/proc_reg.h>
41#include <ppc/exception.h>
42#include <ppc/Performance.h>
43#include <ppc/exception.h>
9bccf70c 44#include <ppc/savearea.h>
1c79356b
A
45#include <mach/ppc/vm_param.h>
46
47 .text
48
55e303ae
A
49/* Register usage conventions in this code:
50 * r9 = return address
51 * r10 = per-proc ptr
52 * r11 = MSR at entry
53 * cr6 = feature flags (ie, pf64Bit)
54 *
55 * Because much of this code deals with physical addresses,
56 * there are parallel paths for 32- and 64-bit machines.
57 */
58
91447636
A
59
60/*
61 * *****************************
62 * * s a v e _ s n a p s h o t *
63 * *****************************
64 *
65 * void save_snapshot();
66 *
67 * Link the current free list & processor local list on an independent list.
68 */
69 .align 5
70 .globl EXT(save_snapshot)
71
72LEXT(save_snapshot)
73 mflr r9 ; get return address
74 bl saveSetup ; turn translation off, 64-bit on, load many regs
75 bf-- pf64Bitb,save_snapshot32 ; skip if 32-bit processor
76
77 ; Handle 64-bit processor.
78
79save_snapshot64:
80
81 ld r8,next_savearea(r10) ; Start with the current savearea
82 std r8,SVsavefreesnapshot(0) ; Make it the restore list anchor
83 ld r5,SVfree(0) ; Get free save area list anchor
84
85save_snapshot64nextfree:
86 mr r7,r5
87 std r7,savemisc1(r8) ; Link this one
88 ld r5,SAVprev(r7) ; Get the next
89 mr r8,r7
90 mr. r0,r5
91 bne save_snapshot64nextfree
92
93 lwz r6,SVinuse(0) ; Get inuse count
94 ld r5,lclfree(r10) ; Get the local savearea list
95 subi r6,r6,1 ; Count the first as free
96
97save_snapshot64nextlocalfree:
98 subi r6,r6,1 ; Count as free
99 mr r7,r5
100 std r7,savemisc1(r8) ; Link this one
101 ld r5,SAVprev(r7) ; Get the next
102 mr r8,r7
103 mr. r0,r5
104 bne save_snapshot64nextlocalfree
105
106 std r5,savemisc1(r8) ; End the list
107 stw r6,SVsaveinusesnapshot(0) ; Save the new number of inuse saveareas
108
109 mtlr r9 ; Restore the return
110 b saveRestore64 ; Restore interrupts and translation
111
112 ; Handle 32-bit processor.
113
114save_snapshot32:
115 lwz r8,next_savearea+4(r10) ; Start with the current savearea
116 stw r8,SVsavefreesnapshot+4(0) ; Make it the restore list anchor
117 lwz r5,SVfree+4(0) ; Get free save area list anchor
118
119save_snapshot32nextfree:
120 mr r7,r5
121 stw r7,savemisc1+4(r8) ; Link this one
122 lwz r5,SAVprev+4(r7) ; Get the next
123 mr r8,r7
124 mr. r0,r5
125 bne save_snapshot32nextfree
126
127 lwz r6,SVinuse(0) ; Get inuse count
128 lwz r5,lclfree+4(r10) ; Get the local savearea list
129 subi r6,r6,1 ; Count the first as free
130
131save_snapshot32nextlocalfree:
132 subi r6,r6,1 ; Count as free
133 mr r7,r5
134 stw r7,savemisc1+4(r8) ; Link this one
135 lwz r5,SAVprev+4(r7) ; Get the next
136 mr r8,r7
137 mr. r0,r5
138 bne save_snapshot32nextlocalfree
139
140 stw r5,savemisc1+4(r8) ; End the list
141 stw r6,SVsaveinusesnapshot(0) ; Save the new number of inuse saveareas
142
143 mtlr r9 ; Restore the return
144 b saveRestore32 ; Restore interrupts and translation
145
146/*
147 * *********************************************
148 * * s a v e _ s n a p s h o t _ r e s t o r e *
149 * *********************************************
150 *
151 * void save_snapshot_restore();
152 *
153 * Restore the free list from the snapshot list, and reset the processors next savearea.
154 */
155 .align 5
156 .globl EXT(save_snapshot_restore)
157
158LEXT(save_snapshot_restore)
159 mflr r9 ; get return address
160 bl saveSetup ; turn translation off, 64-bit on, load many regs
161 bf-- pf64Bitb,save_snapshot_restore32 ; skip if 32-bit processor
162
163 ; Handle 64-bit processor.
164
165save_snapshot_restore64:
166 lwz r7,SVsaveinusesnapshot(0)
167 stw r7,SVinuse(0) ; Set the new inuse count
168
169 li r6,0
170 stw r6,lclfreecnt(r10) ; None local now
171 std r6,lclfree(r10) ; None local now
172
173 ld r8,SVsavefreesnapshot(0) ; Get the restore list anchor
174 std r8,SVfree(0) ; Make it the free list anchor
175 li r5,SAVempty ; Get marker for free savearea
176
177save_snapshot_restore64nextfree:
178 addi r6,r6,1 ; Count as free
179 stb r5,SAVflags+2(r8) ; Mark savearea free
180 ld r7,savemisc1(r8) ; Get the next
181 std r7,SAVprev(r8) ; Set the next in free list
182 mr. r8,r7
183 bne save_snapshot_restore64nextfree
184
185 stw r6,SVfreecnt(0) ; Set the new free count
186
187 bl saveGet64
188 std r3,next_savearea(r10) ; Get the next savearea
189
190 mtlr r9 ; Restore the return
191 b saveRestore64 ; Restore interrupts and translation
192
193 ; Handle 32-bit processor.
194
195save_snapshot_restore32:
196 lwz r7,SVsaveinusesnapshot(0)
197 stw r7,SVinuse(0) ; Set the new inuse count
198
199 li r6,0
200 stw r6,lclfreecnt(r10) ; None local now
201 stw r6,lclfree+4(r10) ; None local now
202
203 lwz r8,SVsavefreesnapshot+4(0) ; Get the restore list anchor
204 stw r8,SVfree+4(0) ; Make it the free list anchor
205 li r5,SAVempty ; Get marker for free savearea
206
207save_snapshot_restore32nextfree:
208 addi r6,r6,1 ; Count as free
209 stb r5,SAVflags+2(r8) ; Mark savearea free
210 lwz r7,savemisc1+4(r8) ; Get the next
211 stw r7,SAVprev+4(r8) ; Set the next in free list
212 mr. r8,r7
213 bne save_snapshot_restore32nextfree
214
215 stw r6,SVfreecnt(0) ; Set the new free count
216
217 bl saveGet32
218 stw r3,next_savearea+4(r10) ; Get the next savearea
219
220 mtlr r9 ; Restore the return
221 b saveRestore32 ; Restore interrupts and translation
222
1c79356b 223/*
55e303ae
A
224 * ***********************
225 * * s a v e _ q u e u e *
226 * ***********************
227 *
228 * void save_queue(ppnum_t pagenum);
de355530 229 *
55e303ae 230 * This routine will add a savearea block to the free list.
9bccf70c
A
231 * We also queue the block to the free pool list. This is a
232 * circular double linked list. Because this block has no free entries,
233 * it gets queued to the end of the list
1c79356b 234 */
9bccf70c
A
235 .align 5
236 .globl EXT(save_queue)
1c79356b 237
9bccf70c 238LEXT(save_queue)
55e303ae
A
239 mflr r9 ; get return address
240 mr r8,r3 ; move pagenum out of the way
241 bl saveSetup ; turn translation off, 64-bit on, load many regs
242 bf-- pf64Bitb,saveQueue32 ; skip if 32-bit processor
243
244 sldi r2,r8,12 ; r2 <-- phys address of page
245 li r8,sac_cnt ; Get the number of saveareas per page
246 mr r4,r2 ; Point to start of chain
247 li r0,SAVempty ; Get empty marker
1c79356b 248
55e303ae
A
249saveQueue64a:
250 addic. r8,r8,-1 ; Keep track of how many we did
251 stb r0,SAVflags+2(r4) ; Set empty
252 addi r7,r4,SAVsize ; Point to the next slot
253 ble- saveQueue64b ; We are done with the chain
254 std r7,SAVprev(r4) ; Set this chain
255 mr r4,r7 ; Step to the next
256 b saveQueue64a ; Fill the whole block...
1c79356b 257
55e303ae
A
258saveQueue64b:
259 bl savelock ; Go lock the save anchor
260
261 ld r7,SVfree(0) ; Get the free save area list anchor
262 lwz r6,SVfreecnt(0) ; Get the number of free saveareas
1c79356b 263
55e303ae
A
264 std r2,SVfree(0) ; Queue in the new one
265 addi r6,r6,sac_cnt ; Count the ones we are linking in
266 std r7,SAVprev(r4) ; Queue the old first one off of us
267 stw r6,SVfreecnt(0) ; Save the new count
268 b saveQueueExit
269
270 ; Handle 32-bit processor.
271
272saveQueue32:
273 slwi r2,r8,12 ; r2 <-- phys address of page
9bccf70c 274 li r8,sac_cnt ; Get the number of saveareas per page
55e303ae 275 mr r4,r2 ; Point to start of chain
9bccf70c 276 li r0,SAVempty ; Get empty marker
1c79356b 277
55e303ae
A
278saveQueue32a:
279 addic. r8,r8,-1 ; Keep track of how many we did
9bccf70c 280 stb r0,SAVflags+2(r4) ; Set empty
55e303ae
A
281 addi r7,r4,SAVsize ; Point to the next slot
282 ble- saveQueue32b ; We are done with the chain
283 stw r7,SAVprev+4(r4) ; Set this chain
284 mr r4,r7 ; Step to the next
285 b saveQueue32a ; Fill the whole block...
de355530 286
55e303ae 287saveQueue32b:
9bccf70c 288 bl savelock ; Go lock the save anchor
1c79356b 289
55e303ae
A
290 lwz r7,SVfree+4(0) ; Get the free save area list anchor
291 lwz r6,SVfreecnt(0) ; Get the number of free saveareas
150bd074 292
55e303ae 293 stw r2,SVfree+4(0) ; Queue in the new one
9bccf70c 294 addi r6,r6,sac_cnt ; Count the ones we are linking in
55e303ae
A
295 stw r7,SAVprev+4(r4) ; Queue the old first one off of us
296 stw r6,SVfreecnt(0) ; Save the new count
297
298saveQueueExit: ; join here from 64-bit path
9bccf70c 299 bl saveunlock ; Unlock the list and set the adjust count
9bccf70c 300 mtlr r9 ; Restore the return
9bccf70c
A
301
302#if FPVECDBG
55e303ae 303 mfsprg r2,1 ; (TEST/DEBUG)
9bccf70c 304 mr. r2,r2 ; (TEST/DEBUG)
55e303ae 305 beq-- saveRestore ; (TEST/DEBUG)
9bccf70c
A
306 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
307 li r2,0x2201 ; (TEST/DEBUG)
308 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
309 sc ; (TEST/DEBUG)
1c79356b 310#endif
55e303ae 311 b saveRestore ; Restore interrupts and translation
1c79356b
A
312
313/*
55e303ae
A
314 * *****************************
315 * * s a v e _ g e t _ i n i t *
316 * *****************************
de355530 317 *
55e303ae 318 * addr64_t save_get_init(void);
9bccf70c
A
319 *
320 * Note that save_get_init is used in initial processor startup only. It
321 * is used because translation is on, but no tables exist yet and we have
322 * no V=R BAT registers that cover the entire physical memory.
1c79356b 323 */
9bccf70c
A
324 .align 5
325 .globl EXT(save_get_init)
326
327LEXT(save_get_init)
55e303ae
A
328 mflr r9 ; get return address
329 bl saveSetup ; turn translation off, 64-bit on, load many regs
330 bfl-- pf64Bitb,saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC)
331 btl++ pf64Bitb,saveGet64 ; get one on a 64-bit machine
332 bl saveRestore ; restore translation etc
333 mtlr r9
334
335 ; unpack the physaddr in r3 into a long long in (r3,r4)
336
337 mr r4,r3 ; copy low word of phys address to r4
338 li r3,0 ; assume upper word was 0
339 bflr-- pf64Bitb ; if 32-bit processor, return
340 srdi r3,r4,32 ; unpack reg64_t to addr64_t on 64-bit machine
341 rlwinm r4,r4,0,0,31
342 blr
343
9bccf70c 344
55e303ae
A
345/*
346 * *******************
347 * * s a v e _ g e t *
348 * *******************
349 *
350 * savearea *save_get(void);
351 *
352 * Allocate a savearea, returning a virtual address. NOTE: we must preserve
353 * r0, r2, and r12. Our callers in cswtch.s depend on this.
354 */
9bccf70c
A
355 .align 5
356 .globl EXT(save_get)
1c79356b 357
9bccf70c 358LEXT(save_get)
55e303ae
A
359 mflr r9 ; get return address
360 mr r5,r0 ; copy regs before saveSetup nails them
361 bl saveSetup ; turn translation off, 64-bit on, load many regs
362 bf-- pf64Bitb,svgt1 ; skip if 32-bit processor
363
364 std r5,tempr0(r10) ; save r0 in per-proc across call to saveGet64
365 std r2,tempr2(r10) ; and r2
366 std r12,tempr4(r10) ; and r12
367 bl saveGet64 ; get r3 <- savearea, r5 <- page address (with SAC)
368 ld r0,tempr0(r10) ; restore callers regs
369 ld r2,tempr2(r10)
370 ld r12,tempr4(r10)
371 b svgt2
372
373svgt1: ; handle 32-bit processor
374 stw r5,tempr0+4(r10) ; save r0 in per-proc across call to saveGet32
375 stw r2,tempr2+4(r10) ; and r2
376 stw r12,tempr4+4(r10) ; and r12
377 bl saveGet32 ; get r3 <- savearea, r5 <- page address (with SAC)
378 lwz r0,tempr0+4(r10) ; restore callers regs
379 lwz r2,tempr2+4(r10)
380 lwz r12,tempr4+4(r10)
381
382svgt2:
383 lwz r5,SACvrswap+4(r5) ; Get the virtual to real translation (only need low word)
384 mtlr r9 ; restore return address
385 xor r3,r3,r5 ; convert physaddr to virtual
386 rlwinm r3,r3,0,0,31 ; 0 upper word if a 64-bit machine
d7e50217 387
de355530 388#if FPVECDBG
55e303ae
A
389 mr r6,r0 ; (TEST/DEBUG)
390 mr r7,r2 ; (TEST/DEBUG)
391 mfsprg r2,1 ; (TEST/DEBUG)
392 mr. r2,r2 ; (TEST/DEBUG)
393 beq-- svgDBBypass ; (TEST/DEBUG)
de355530
A
394 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
395 li r2,0x2203 ; (TEST/DEBUG)
396 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
397 sc ; (TEST/DEBUG)
55e303ae
A
398svgDBBypass: ; (TEST/DEBUG)
399 mr r0,r6 ; (TEST/DEBUG)
400 mr r2,r7 ; (TEST/DEBUG)
de355530 401#endif
55e303ae
A
402 b saveRestore ; restore MSR and return to our caller
403
404
405/*
406 * ***********************************
407 * * s a v e _ g e t _ p h y s _ 3 2 *
408 * ***********************************
409 *
410 * reg64_t save_get_phys(void);
411 *
412 * This is the entry normally called from lowmem_vectors.s with
413 * translation and interrupts already off.
414 * MUST NOT TOUCH CR7
415 */
416 .align 5
417 .globl EXT(save_get_phys_32)
de355530 418
55e303ae
A
419LEXT(save_get_phys_32)
420 mfsprg r10,0 ; get the per-proc ptr
421 b saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC)
422
423
424/*
425 * ***********************************
426 * * s a v e _ g e t _ p h y s _ 6 4 *
427 * ***********************************
428 *
429 * reg64_t save_get_phys_64(void);
430 *
431 * This is the entry normally called from lowmem_vectors.s with
432 * translation and interrupts already off, and in 64-bit mode.
433 * MUST NOT TOUCH CR7
434 */
de355530 435 .align 5
55e303ae
A
436 .globl EXT(save_get_phys_64)
437
438LEXT(save_get_phys_64)
439 mfsprg r10,0 ; get the per-proc ptr
440 b saveGet64 ; Get r3 <- savearea, r5 <- page address (with SAC)
441
442
443/*
444 * *********************
445 * * s a v e G e t 6 4 *
446 * *********************
447 *
448 * This is the internal routine to allocate a savearea on a 64-bit processor.
449 * Note that we must not take any exceptions of any kind, including PTE misses, as that
450 * would deadlock trying to reenter this routine. We pass back the 64-bit physical address.
451 * First we try the local list. If that is below a threshold, we try the global free list,
452 * which requires taking a lock, and replenish. If there are no saveareas in either list,
453 * we will install the backpocket and choke. This routine assumes that the caller has
454 * turned translation off, masked interrupts, turned on 64-bit mode, and set up:
455 * r10 = per-proc ptr
456 *
457 * We return:
458 * r3 = 64-bit physical address of the savearea
459 * r5 = 64-bit physical address of the page the savearea is in, with SAC
460 *
461 * We destroy:
462 * r2-r8.
463 *
464 * MUST NOT TOUCH CR7
465 */
466
467saveGet64:
468 lwz r8,lclfreecnt(r10) ; Get the count
469 ld r3,lclfree(r10) ; Get the start of local savearea list
470 cmplwi r8,LocalSaveMin ; Are we too low?
471 ble-- saveGet64GetGlobal ; We are too low and need to grow list...
472
473 ; Get it from the per-processor local list.
474
475saveGet64GetLocal:
476 li r2,0x5555 ; get r2 <-- 0x55555555 55555555, our bugbug constant
477 ld r4,SAVprev(r3) ; Chain to the next one
478 oris r2,r2,0x5555
479 subi r8,r8,1 ; Back down count
480 rldimi r2,r2,32,0
481
482 std r2,SAVprev(r3) ; bug next ptr
483 stw r2,SAVlevel(r3) ; bug context ID
484 li r6,0
485 std r4,lclfree(r10) ; Unchain first savearea
486 stw r2,SAVact(r3) ; bug activation ptr
487 rldicr r5,r3,0,51 ; r5 <-- page ptr, where SAC is kept
488 stw r8,lclfreecnt(r10) ; Set new count
489 stw r6,SAVflags(r3) ; clear the flags
490
491 blr
492
493 ; Local list was low so replenish from global list.
494 ; r7 = return address to caller of saveGet64
495 ; r8 = lclfreecnt
496 ; r10 = per-proc ptr
497
498saveGet64GetGlobal:
499 mflr r7 ; save return adress
500 subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target
9bccf70c 501 bl savelock ; Go lock up the anchor
9bccf70c 502
55e303ae
A
503 lwz r2,SVfreecnt(0) ; Get the number on this list
504 ld r8,SVfree(0) ; Get the head of the save area list
9bccf70c 505
55e303ae
A
506 sub r3,r2,r5 ; Get number left after we swipe enough for local list
507 sradi r3,r3,63 ; Get 0 if enough or -1 if not
9bccf70c 508 andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise
55e303ae
A
509 and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise
510 or. r5,r4,r5 ; r5 <- number we will move from global to local list
511 beq-- saveGet64NoFree ; There are none to get...
9bccf70c
A
512
513 mtctr r5 ; Get loop count
514 mr r6,r8 ; Remember the first in the list
9bccf70c 515
55e303ae
A
516saveGet64c:
517 bdz saveGet64d ; Count down and branch when we hit 0...
518 ld r8,SAVprev(r8) ; Get the next
519 b saveGet64c ; Keep going...
520
521saveGet64d:
522 ld r3,SAVprev(r8) ; Get the next one
523 lwz r4,SVinuse(0) ; Get the in use count
524 sub r2,r2,r5 ; Count down what we stole
525 std r3,SVfree(0) ; Set the new first in list
9bccf70c 526 add r4,r4,r5 ; Count the ones we just put in the local list as "in use"
55e303ae
A
527 stw r2,SVfreecnt(0) ; Set the new count
528 stw r4,SVinuse(0) ; Set the new in use count
9bccf70c 529
55e303ae
A
530 ld r4,lclfree(r10) ; Get the old head of list
531 lwz r3,lclfreecnt(r10) ; Get the old count
532 std r6,lclfree(r10) ; Set the new head of the list
9bccf70c 533 add r3,r3,r5 ; Get the new count
55e303ae
A
534 std r4,SAVprev(r8) ; Point to the old head
535 stw r3,lclfreecnt(r10) ; Set the new count
9bccf70c 536
9bccf70c 537 bl saveunlock ; Update the adjust field and unlock
55e303ae
A
538 mtlr r7 ; restore return address
539 b saveGet64 ; Start over and finally allocate the savearea...
540
541 ; The local list is below the repopulate threshold and the global list is empty.
542 ; First we check if there are any left in the local list and if so, we allow
543 ; them to be allocated. If not, we release the backpocket list and choke.
544 ; There is nothing more that we can do at this point. Hopefully we stay alive
545 ; long enough to grab some much-needed panic information.
546 ; r7 = return address to caller of saveGet64
547 ; r10 = per-proc ptr
548
549saveGet64NoFree:
550 lwz r8,lclfreecnt(r10) ; Get the count
9bccf70c 551 mr. r8,r8 ; Are there any reserve to get?
55e303ae 552 beq-- saveGet64Choke ; No, go choke and die...
9bccf70c 553 bl saveunlock ; Update the adjust field and unlock
55e303ae
A
554 ld r3,lclfree(r10) ; Get the start of local savearea list
555 lwz r8,lclfreecnt(r10) ; Get the count
556 mtlr r7 ; restore return address
557 b saveGet64GetLocal ; We have some left, dip on in...
9bccf70c 558
9bccf70c
A
559; We who are about to die salute you. The savearea chain is messed up or
560; empty. Add in a few so we have enough to take down the system.
9bccf70c 561
55e303ae
A
562saveGet64Choke:
563 lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket
9bccf70c
A
564 ori r9,r9,lo16(EXT(backpocket)) ; and low part
565
55e303ae
A
566 lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements
567 ld r7,SVfree-saveanchor(r9) ; Get the head of the chain
568 lwz r6,SVinuse(0) ; Get total in the old list
9bccf70c 569
55e303ae 570 stw r8,SVfreecnt(0) ; Set the new number of free elements
9bccf70c 571 add r6,r6,r8 ; Add in the new ones
55e303ae
A
572 std r7,SVfree(0) ; Set the new head of the chain
573 stw r6,SVinuse(0) ; Set total in the new list
9bccf70c 574
55e303ae 575saveGetChokeJoin: ; join in the fun from 32-bit mode
9bccf70c
A
576 lis r0,hi16(Choke) ; Set choke firmware call
577 li r7,0 ; Get a clear register to unlock
578 ori r0,r0,lo16(Choke) ; Set the rest of the choke call
579 li r3,failNoSavearea ; Set failure code
580
55e303ae
A
581 eieio ; Make sure all is committed
582 stw r7,SVlock(0) ; Unlock the free list
9bccf70c
A
583 sc ; System ABEND
584
585
1c79356b 586/*
55e303ae
A
587 * *********************
588 * * s a v e G e t 3 2 *
589 * *********************
de355530 590 *
55e303ae
A
591 * This is the internal routine to allocate a savearea on a 32-bit processor.
592 * Note that we must not take any exceptions of any kind, including PTE misses, as that
593 * would deadlock trying to reenter this routine. We pass back the 32-bit physical address.
594 * First we try the local list. If that is below a threshold, we try the global free list,
595 * which requires taking a lock, and replenish. If there are no saveareas in either list,
596 * we will install the backpocket and choke. This routine assumes that the caller has
597 * turned translation off, masked interrupts, and set up:
598 * r10 = per-proc ptr
9bccf70c 599 *
55e303ae
A
600 * We return:
601 * r3 = 32-bit physical address of the savearea
602 * r5 = 32-bit physical address of the page the savearea is in, with SAC
1c79356b 603 *
55e303ae
A
604 * We destroy:
605 * r2-r8.
1c79356b
A
606 */
607
55e303ae
A
608saveGet32:
609 lwz r8,lclfreecnt(r10) ; Get the count
610 lwz r3,lclfree+4(r10) ; Get the start of local savearea list
611 cmplwi r8,LocalSaveMin ; Are we too low?
612 ble- saveGet32GetGlobal ; We are too low and need to grow list...
613
614 ; Get savearea from per-processor local list.
615
616saveGet32GetLocal:
617 li r2,0x5555 ; get r2 <-- 0x55555555, our bugbug constant
618 lwz r4,SAVprev+4(r3) ; Chain to the next one
619 oris r2,r2,0x5555
620 subi r8,r8,1 ; Back down count
1c79356b 621
55e303ae
A
622 stw r2,SAVprev+4(r3) ; bug next ptr
623 stw r2,SAVlevel(r3) ; bug context ID
624 li r6,0
625 stw r4,lclfree+4(r10) ; Unchain first savearea
626 stw r2,SAVact(r3) ; bug activation ptr
627 rlwinm r5,r3,0,0,19 ; r5 <-- page ptr, where SAC is kept
628 stw r8,lclfreecnt(r10) ; Set new count
629 stw r6,SAVflags(r3) ; clear the flags
630
631 blr
632
633 ; Local list was low so replenish from global list.
634 ; r7 = return address to caller of saveGet32
635 ; r8 = lclfreecnt
636 ; r10 = per-proc ptr
637
638saveGet32GetGlobal:
639 mflr r7 ; save return adress
640 subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target
641 bl savelock ; Go lock up the anchor
642
643 lwz r2,SVfreecnt(0) ; Get the number on this list
644 lwz r8,SVfree+4(0) ; Get the head of the save area list
645
646 sub r3,r2,r5 ; Get number left after we swipe enough for local list
647 srawi r3,r3,31 ; Get 0 if enough or -1 if not
648 andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise
649 and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise
650 or. r5,r4,r5 ; r5 <- number we will move from global to local list
651 beq- saveGet32NoFree ; There are none to get...
652
653 mtctr r5 ; Get loop count
654 mr r6,r8 ; Remember the first in the list
9bccf70c 655
55e303ae
A
656saveGet32c:
657 bdz saveGet32d ; Count down and branch when we hit 0...
658 lwz r8,SAVprev+4(r8) ; Get the next
659 b saveGet32c ; Keep going...
9bccf70c 660
55e303ae
A
661saveGet32d:
662 lwz r3,SAVprev+4(r8) ; Get the next one
663 lwz r4,SVinuse(0) ; Get the in use count
664 sub r2,r2,r5 ; Count down what we stole
665 stw r3,SVfree+4(0) ; Set the new first in list
666 add r4,r4,r5 ; Count the ones we just put in the local list as "in use"
667 stw r2,SVfreecnt(0) ; Set the new count
668 stw r4,SVinuse(0) ; Set the new in use count
669
670 lwz r4,lclfree+4(r10) ; Get the old head of list
671 lwz r3,lclfreecnt(r10) ; Get the old count
672 stw r6,lclfree+4(r10) ; Set the new head of the list
673 add r3,r3,r5 ; Get the new count
674 stw r4,SAVprev+4(r8) ; Point to the old head
675 stw r3,lclfreecnt(r10) ; Set the new count
1c79356b 676
55e303ae
A
677 bl saveunlock ; Update the adjust field and unlock
678 mtlr r7 ; restore return address
679 b saveGet32 ; Start over and finally allocate the savearea...
680
681 ; The local list is below the repopulate threshold and the global list is empty.
682 ; First we check if there are any left in the local list and if so, we allow
683 ; them to be allocated. If not, we release the backpocket list and choke.
684 ; There is nothing more that we can do at this point. Hopefully we stay alive
685 ; long enough to grab some much-needed panic information.
686 ; r7 = return address to caller of saveGet32
687 ; r10 = per-proc ptr
688
689saveGet32NoFree:
690 lwz r8,lclfreecnt(r10) ; Get the count
691 mr. r8,r8 ; Are there any reserve to get?
692 beq- saveGet32Choke ; No, go choke and die...
693 bl saveunlock ; Update the adjust field and unlock
694 lwz r3,lclfree+4(r10) ; Get the start of local savearea list
695 lwz r8,lclfreecnt(r10) ; Get the count
696 mtlr r7 ; restore return address
697 b saveGet32GetLocal ; We have some left, dip on in...
698
699; We who are about to die salute you. The savearea chain is messed up or
700; empty. Add in a few so we have enough to take down the system.
9bccf70c 701
55e303ae
A
702saveGet32Choke:
703 lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket
704 ori r9,r9,lo16(EXT(backpocket)) ; and low part
705
706 lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements
707 lwz r7,SVfree+4-saveanchor(r9) ; Get the head of the chain
708 lwz r6,SVinuse(0) ; Get total in the old list
150bd074 709
55e303ae
A
710 stw r8,SVfreecnt(0) ; Set the new number of free elements
711 add r6,r6,r8 ; Add in the new ones (why?)
712 stw r7,SVfree+4(0) ; Set the new head of the chain
713 stw r6,SVinuse(0) ; Set total in the new list
714
715 b saveGetChokeJoin
150bd074 716
1c79356b 717
55e303ae
A
718/*
719 * *******************
720 * * s a v e _ r e t *
721 * *******************
722 *
723 * void save_ret(struct savearea *); // normal call
724 * void save_ret_wMSR(struct savearea *,reg64_t); // passes MSR to restore as 2nd arg
725 *
726 * Return a savearea passed by virtual address to the free list.
727 * Note really well: we can take NO exceptions of any kind,
728 * including a PTE miss once the savearea lock is held. That's
729 * a guaranteed deadlock. That means we must disable for interrutions
730 * and turn all translation off.
731 */
732 .globl EXT(save_ret_wMSR) ; alternate entry pt w MSR to restore in r4
733
734LEXT(save_ret_wMSR)
735 crset 31 ; set flag for save_ret_wMSR
736 b svrt1 ; join common code
737
738 .align 5
739 .globl EXT(save_ret)
740
741LEXT(save_ret)
742 crclr 31 ; clear flag for save_ret_wMSR
743svrt1: ; join from save_ret_wMSR
744 mflr r9 ; get return address
745 rlwinm r7,r3,0,0,19 ; get virtual address of SAC area at start of page
746 mr r8,r3 ; save virtual address
747 lwz r5,SACvrswap+0(r7) ; get 64-bit converter from V to R
748 lwz r6,SACvrswap+4(r7) ; both halves, though only bottom used on 32-bit machine
749#if FPVECDBG
750 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
751 li r2,0x2204 ; (TEST/DEBUG)
752 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
753 sc ; (TEST/DEBUG)
754#endif
755 bl saveSetup ; turn translation off, 64-bit on, load many regs
756 bf++ 31,svrt3 ; skip if not save_ret_wMSR
757 mr r11,r4 ; was save_ret_wMSR, so overwrite saved MSR
758svrt3:
759 bf-- pf64Bitb,svrt4 ; skip if a 32-bit processor
760
761 ; Handle 64-bit processor.
762
763 rldimi r6,r5,32,0 ; merge upper and lower halves of SACvrswap together
764 xor r3,r8,r6 ; get r3 <- 64-bit physical address of this savearea
765 bl saveRet64 ; return it
766 mtlr r9 ; restore return address
767 b saveRestore64 ; restore MSR
768
769 ; Handle 32-bit processor.
770
771svrt4:
772 xor r3,r8,r6 ; get r3 <- 32-bit physical address of this savearea
773 bl saveRet32 ; return it
774 mtlr r9 ; restore return address
775 b saveRestore32 ; restore MSR
776
9bccf70c 777
55e303ae
A
778/*
779 * *****************************
780 * * s a v e _ r e t _ p h y s *
781 * *****************************
782 *
783 * void save_ret_phys(reg64_t);
784 *
785 * Called from lowmem vectors to return (ie, free) a savearea by physical address.
786 * Translation and interrupts are already off, and 64-bit mode is set if defined.
787 * We can take _no_ exceptions of any kind in this code, including PTE miss, since
788 * that would result in a deadlock. We expect:
789 * r3 = phys addr of savearea
790 * msr = IR, DR, and EE off, SF on
791 * cr6 = pf64Bit flag
792 * We destroy:
793 * r0,r2-r10.
794 */
9bccf70c
A
795 .align 5
796 .globl EXT(save_ret_phys)
797
798LEXT(save_ret_phys)
55e303ae
A
799 mfsprg r10,0 ; get the per-proc ptr
800 bf-- pf64Bitb,saveRet32 ; handle 32-bit machine
801 b saveRet64 ; handle 64-bit machine
802
9bccf70c 803
55e303ae
A
804/*
805 * *********************
806 * * s a v e R e t 6 4 *
807 * *********************
808 *
809 * This is the internal routine to free a savearea, passed by 64-bit physical
810 * address. We assume that IR, DR, and EE are all off, that SF is on, and:
811 * r3 = phys address of the savearea
812 * r10 = per-proc ptr
813 * We destroy:
814 * r0,r2-r8.
815 */
816 .align 5
817 saveRet64:
9bccf70c 818 li r0,SAVempty ; Get marker for free savearea
55e303ae
A
819 lwz r7,lclfreecnt(r10) ; Get the local count
820 ld r6,lclfree(r10) ; Get the old local header
9bccf70c 821 addi r7,r7,1 ; Pop up the free count
55e303ae 822 std r6,SAVprev(r3) ; Plant free chain pointer
9bccf70c 823 cmplwi r7,LocalSaveMax ; Has the list gotten too long?
55e303ae
A
824 stb r0,SAVflags+2(r3) ; Mark savearea free
825 std r3,lclfree(r10) ; Chain us on in
826 stw r7,lclfreecnt(r10) ; Bump up the count
827 bltlr++ ; List not too long, so done
de355530 828
55e303ae
A
829/* The local savearea chain has gotten too long. Trim it down to the target.
830 * Here's a tricky bit, and important:
831 *
832 * When we trim the list, we NEVER trim the very first one. This is because that is
833 * the very last one released and the exception exit code will release the savearea
834 * BEFORE it is done using it. Wouldn't be too good if another processor started
835 * using it, eh? So for this case, we are safe so long as the savearea stays on
836 * the local list. (Note: the exit routine needs to do this because it is in the
837 * process of restoring all context and it needs to keep it until the last second.)
838 */
de355530 839
55e303ae
A
840 mflr r0 ; save return to caller of saveRet64
841 mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed
842 ld r3,SAVprev(r3) ; Skip over the first
9bccf70c 843 subi r7,r7,LocalSaveTarget ; Figure out how much to trim
55e303ae 844 mr r6,r3 ; r6 <- first one to trim
9bccf70c
A
845 mr r5,r7 ; Save the number we are trimming
846
55e303ae
A
847saveRet64a:
848 addic. r7,r7,-1 ; Any left to do?
849 ble-- saveRet64b ; Nope...
850 ld r3,SAVprev(r3) ; Skip to the next one
851 b saveRet64a ; Keep going...
9bccf70c 852
55e303ae
A
853saveRet64b: ; r3 <- last one to trim
854 ld r7,SAVprev(r3) ; Point to the first one not to trim
9bccf70c 855 li r4,LocalSaveTarget ; Set the target count
55e303ae
A
856 std r7,SAVprev(r2) ; Trim stuff leaving the one just released as first
857 stw r4,lclfreecnt(r10) ; Set the current count
9bccf70c 858
9bccf70c
A
859 bl savelock ; Lock up the anchor
860
55e303ae
A
861 ld r8,SVfree(0) ; Get the old head of the free list
862 lwz r4,SVfreecnt(0) ; Get the number of free ones
863 lwz r7,SVinuse(0) ; Get the number that are in use
864 std r6,SVfree(0) ; Point to the first trimmed savearea
9bccf70c 865 add r4,r4,r5 ; Add number trimmed to free count
55e303ae 866 std r8,SAVprev(r3) ; Chain the old head to the tail of the trimmed guys
9bccf70c 867 sub r7,r7,r5 ; Remove the trims from the in use count
55e303ae
A
868 stw r4,SVfreecnt(0) ; Set new free count
869 stw r7,SVinuse(0) ; Set new in use count
9bccf70c 870
55e303ae
A
871 mtlr r0 ; Restore the return to our caller
872 b saveunlock ; Set adjust count, unlock the saveanchor, and return
873
9bccf70c 874
55e303ae
A
875/*
876 * *********************
877 * * s a v e R e t 3 2 *
878 * *********************
879 *
880 * This is the internal routine to free a savearea, passed by 32-bit physical
881 * address. We assume that IR, DR, and EE are all off, and:
882 * r3 = phys address of the savearea
883 * r10 = per-proc ptr
884 * We destroy:
885 * r0,r2-r8.
886 */
887 .align 5
888 saveRet32:
889 li r0,SAVempty ; Get marker for free savearea
890 lwz r7,lclfreecnt(r10) ; Get the local count
891 lwz r6,lclfree+4(r10) ; Get the old local header
892 addi r7,r7,1 ; Pop up the free count
893 stw r6,SAVprev+4(r3) ; Plant free chain pointer
894 cmplwi r7,LocalSaveMax ; Has the list gotten too long?
895 stb r0,SAVflags+2(r3) ; Mark savearea free
896 stw r3,lclfree+4(r10) ; Chain us on in
897 stw r7,lclfreecnt(r10) ; Bump up the count
898 bltlr+ ; List not too long, so done
d7e50217 899
55e303ae
A
900/* The local savearea chain has gotten too long. Trim it down to the target.
901 * Here's a tricky bit, and important:
902 *
903 * When we trim the list, we NEVER trim the very first one. This is because that is
904 * the very last one released and the exception exit code will release the savearea
905 * BEFORE it is done using it. Wouldn't be too good if another processor started
906 * using it, eh? So for this case, we are safe so long as the savearea stays on
907 * the local list. (Note: the exit routine needs to do this because it is in the
908 * process of restoring all context and it needs to keep it until the last second.)
909 */
9bccf70c 910
55e303ae
A
911 mflr r0 ; save return to caller of saveRet32
912 mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed
913 lwz r3,SAVprev+4(r3) ; Skip over the first
914 subi r7,r7,LocalSaveTarget ; Figure out how much to trim
915 mr r6,r3 ; r6 <- first one to trim
916 mr r5,r7 ; Save the number we are trimming
917
918saveRet32a:
919 addic. r7,r7,-1 ; Any left to do?
920 ble- saveRet32b ; Nope...
921 lwz r3,SAVprev+4(r3) ; Skip to the next one
922 b saveRet32a ; Keep going...
923
924saveRet32b: ; r3 <- last one to trim
925 lwz r7,SAVprev+4(r3) ; Point to the first one not to trim
926 li r4,LocalSaveTarget ; Set the target count
927 stw r7,SAVprev+4(r2) ; Trim stuff leaving the one just released as first
928 stw r4,lclfreecnt(r10) ; Set the current count
929
930 bl savelock ; Lock up the anchor
931
932 lwz r8,SVfree+4(0) ; Get the old head of the free list
933 lwz r4,SVfreecnt(0) ; Get the number of free ones
934 lwz r7,SVinuse(0) ; Get the number that are in use
935 stw r6,SVfree+4(0) ; Point to the first trimmed savearea
936 add r4,r4,r5 ; Add number trimmed to free count
937 stw r8,SAVprev+4(r3) ; Chain the old head to the tail of the trimmed guys
938 sub r7,r7,r5 ; Remove the trims from the in use count
939 stw r4,SVfreecnt(0) ; Set new free count
940 stw r7,SVinuse(0) ; Set new in use count
de355530 941
55e303ae
A
942 mtlr r0 ; Restore the return to our caller
943 b saveunlock ; Set adjust count, unlock the saveanchor, and return
de355530 944
1c79356b 945
55e303ae
A
946/*
947 * *******************************
948 * * s a v e _ t r i m _ f r e e *
949 * *******************************
950 *
951 * struct savearea_comm *save_trim_free(void);
952 *
953 * Trim the free list down to the target count, ie by -(SVadjust) save areas.
954 * It trims the list and, if a pool page was fully allocated, puts that page on
955 * the start of the pool list.
956 *
957 * If the savearea being released is the last on a pool page (i.e., all entries
958 * are released), the page is dequeued from the pool and queued to any other
959 * found during this scan. Note that this queue is maintained virtually.
960 *
961 * When the scan is done, the saveanchor lock is released and the list of
962 * freed pool pages is returned to our caller.
963 *
964 * For latency sake we may want to revisit this code. If we are trimming a
965 * large number of saveareas, we could be disabled and holding the savearea lock
966 * for quite a while. It may be that we want to break the trim down into parts.
967 * Possibly trimming the free list, then individually pushing them into the free pool.
968 *
969 * This function expects to be called with translation on and a valid stack.
970 * It uses the standard ABI, ie we destroy r2 and r3-r11, and return the ptr in r3.
971 */
1c79356b 972 .align 5
9bccf70c 973 .globl EXT(save_trim_free)
1c79356b 974
9bccf70c
A
975LEXT(save_trim_free)
976
977 subi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Make space for 4 registers on stack
55e303ae 978 mflr r9 ; save our return address
9bccf70c 979 stw r28,FM_SIZE+0(r1) ; Save R28
55e303ae
A
980 stw r29,FM_SIZE+4(r1) ; Save R29
981 stw r30,FM_SIZE+8(r1) ; Save R30
982 stw r31,FM_SIZE+12(r1) ; Save R31
983
984 bl saveSetup ; turn off translation and interrupts, load many regs
985 bl savelock ; Go lock up the anchor
1c79356b 986
55e303ae 987 lwz r8,SVadjust(0) ; How many do we need to clear out?
9bccf70c
A
988 li r3,0 ; Get a 0
989 neg. r8,r8 ; Get the actual we need to toss (adjust is neg if too many)
55e303ae
A
990 ble- save_trim_free1 ; skip if no trimming needed anymore
991 bf-- pf64Bitb,saveTrim32 ; handle 32-bit processors
992 b saveTrim64 ; handle 64-bit processors
de355530 993
55e303ae
A
994save_trim_free1: ; by the time we were called, no need to trim anymore
995 stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed)
996 mtlr r9 ; Restore return
9bccf70c
A
997
998#if FPVECDBG
999 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1000 li r2,0x2206 ; (TEST/DEBUG)
1001 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1002 sc ; (TEST/DEBUG)
1003#endif
55e303ae
A
1004 addi r1,r1,(FM_ALIGN(16)+FM_SIZE); Pop stack - have not trashed register so no need to reload
1005 b saveRestore ; restore translation and EE, turn SF off, return to our caller
d7e50217 1006
55e303ae
A
1007
1008/*
1009 * ***********************
1010 * * s a v e T r i m 3 2 *
1011 * ***********************
1012 *
1013 * Handle "save_trim_free" on 32-bit processors. At this point, translation and interrupts
1014 * are off, the savearea anchor is locked, and:
1015 * r8 = #pages to trim (>0)
1016 * r9 = return address
1017 * r10 = per-proc ptr
1018 * r11 = MSR at entry
1019 */
1020
1021saveTrim32:
1022 lwz r7,SVfree+4(0) ; Get the first on the free list
1023 mr r6,r7 ; Save the first one
9bccf70c
A
1024 mr r5,r8 ; Save the number we are trimming
1025
9bccf70c
A
1026sttrimming: addic. r5,r5,-1 ; Any left to do?
1027 ble- sttrimmed ; Nope...
55e303ae 1028 lwz r7,SAVprev+4(r7) ; Skip to the next one
9bccf70c 1029 b sttrimming ; Keep going...
9bccf70c 1030
55e303ae
A
1031sttrimmed: lwz r5,SAVprev+4(r7) ; Get the next one (for new head of free list)
1032 lwz r4,SVfreecnt(0) ; Get the free count
1033 stw r5,SVfree+4(0) ; Set new head
9bccf70c
A
1034 sub r4,r4,r8 ; Calculate the new free count
1035 li r31,0 ; Show we have no free pool blocks yet
55e303ae
A
1036 crclr cr1_eq ; dont exit loop before 1st iteration
1037 stw r4,SVfreecnt(0) ; Set new free count
9bccf70c
A
1038 lis r30,hi16(sac_empty) ; Get what empty looks like
1039
9bccf70c
A
1040; NOTE: The savearea size must be 640 (0x280). We are doing a divide by shifts and stuff
1041; here.
1042;
1043#if SAVsize != 640
1044#error Savearea size is not 640!!!!!!!!!!!!
1045#endif
1046
55e303ae
A
1047 ; Loop over each savearea we are trimming.
1048 ; r6 = next savearea to trim
1049 ; r7 = last savearea to trim
1050 ; r8 = #pages to trim (>0)
1051 ; r9 = return address
1052 ; r10 = per-proc ptr
1053 ; r11 = MSR at entry
1054 ; r30 = what SACalloc looks like when all saveareas are free
1055 ; r31 = free pool block list
1056 ; cr1 = beq set if we just trimmed the last, ie if we are done
1057
9bccf70c
A
1058sttoss: beq+ cr1,stdone ; All done now...
1059
1060 cmplw cr1,r6,r7 ; Have we finished the loop?
1061
1062 lis r0,0x0044 ; Get top of table
1063 rlwinm r2,r6,0,0,19 ; Back down to the savearea control stuff
1064 ori r0,r0,0x2200 ; Finish shift table
1065 rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
1066 lwz r5,SACalloc(r2) ; Get the allocation bits
1067 addi r4,r4,1 ; Shift 1 extra
1068 rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
1069 rlwnm r0,r0,r4,29,31 ; Get partial index
1070 lis r4,lo16(0x8000) ; Get the bit mask
1071 add r0,r0,r3 ; Make the real index
1072 srw r4,r4,r0 ; Get the allocation mask
1073 or r5,r5,r4 ; Free this entry
1074 cmplw r5,r4 ; Is this the only free entry?
55e303ae 1075 lwz r6,SAVprev+4(r6) ; Chain to the next trimmed savearea
9bccf70c
A
1076 cmplw cr7,r30,r5 ; Does this look empty?
1077 stw r5,SACalloc(r2) ; Save back the allocation bits
1078 beq- stputpool ; First free entry, go put it into the pool...
1079 bne+ cr7,sttoss ; Not an empty block
1080
1081;
1082; We have an empty block. Remove it from the pool list.
1083;
1084
1085 lwz r29,SACflags(r2) ; Get the flags
1086 cmplwi cr5,r31,0 ; Is this guy on the release list?
55e303ae 1087 lwz r28,SACnext+4(r2) ; Get the forward chain
9bccf70c
A
1088
1089 rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
1090 bne- sttoss ; This is permanent entry, do not try to release...
1091
55e303ae 1092 lwz r29,SACprev+4(r2) ; and the previous
9bccf70c 1093 beq- cr5,stnot1st ; Not first
55e303ae 1094 lwz r0,SACvrswap+4(r31) ; Load the previous pool page vr conversion
9bccf70c 1095
55e303ae 1096stnot1st: stw r28,SACnext+4(r29) ; Previous guy points to my next
9bccf70c 1097 xor r0,r0,r31 ; Make the last guy virtual
55e303ae
A
1098 stw r29,SACprev+4(r28) ; Next guy points back to my previous
1099 stw r0,SAVprev+4(r2) ; Store the old top virtual as my back chain
9bccf70c
A
1100 mr r31,r2 ; My physical is now the head of the chain
1101 b sttoss ; Get the next one...
1102
1103;
1104; A pool block that had no free entries now has one. Stick it on the pool list.
1105;
1106
55e303ae
A
1107stputpool: lwz r28,SVpoolfwd+4(0) ; Get the first guy on the list
1108 li r0,saveanchor ; Point to the saveanchor
1109 stw r2,SVpoolfwd+4(0) ; Put us on the top of the list
1110 stw r28,SACnext+4(r2) ; We point to the old top
1111 stw r2,SACprev+4(r28) ; Old top guy points back to us
1112 stw r0,SACprev+4(r2) ; Our back points to the anchor
de355530 1113 b sttoss ; Go on to the next one...
55e303ae
A
1114
1115
1116/*
1117 * ***********************
1118 * * s a v e T r i m 6 4 *
1119 * ***********************
1120 *
1121 * Handle "save_trim_free" on 64-bit processors. At this point, translation and interrupts
1122 * are off, SF is on, the savearea anchor is locked, and:
1123 * r8 = #pages to trim (>0)
1124 * r9 = return address
1125 * r10 = per-proc ptr
1126 * r11 = MSR at entry
1127 */
1128
1129saveTrim64:
1130 ld r7,SVfree(0) ; Get the first on the free list
1131 mr r6,r7 ; Save the first one
1132 mr r5,r8 ; Save the number we are trimming
d7e50217 1133
55e303ae
A
1134sttrimming64:
1135 addic. r5,r5,-1 ; Any left to do?
1136 ble-- sttrimmed64 ; Nope...
1137 ld r7,SAVprev(r7) ; Skip to the next one
1138 b sttrimming64 ; Keep going...
d7e50217 1139
55e303ae
A
1140sttrimmed64:
1141 ld r5,SAVprev(r7) ; Get the next one (for new head of free list)
1142 lwz r4,SVfreecnt(0) ; Get the free count
1143 std r5,SVfree(0) ; Set new head
1144 sub r4,r4,r8 ; Calculate the new free count
1145 li r31,0 ; Show we have no free pool blocks yet
1146 crclr cr1_eq ; dont exit loop before 1st iteration
1147 stw r4,SVfreecnt(0) ; Set new free count
1148 lis r30,hi16(sac_empty) ; Get what empty looks like
1149
1150
1151 ; Loop over each savearea we are trimming.
1152 ; r6 = next savearea to trim
1153 ; r7 = last savearea to trim
1154 ; r8 = #pages to trim (>0)
1155 ; r9 = return address
1156 ; r10 = per-proc ptr
1157 ; r11 = MSR at entry
1158 ; r30 = what SACalloc looks like when all saveareas are free
1159 ; r31 = free pool block list
1160 ; cr1 = beq set if we just trimmed the last, ie if we are done
1161 ;
1162 ; WARNING: as in the 32-bit path, this code is doing a divide by 640 (SAVsize).
1163
1164sttoss64:
1165 beq++ cr1,stdone ; All done now...
1166
1167 cmpld cr1,r6,r7 ; Have we finished the loop?
1168
1169 lis r0,0x0044 ; Get top of table
1170 rldicr r2,r6,0,51 ; r2 <- phys addr of savearea block (with control area)
1171 ori r0,r0,0x2200 ; Finish shift table
1172 rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
1173 lwz r5,SACalloc(r2) ; Get the allocation bits
1174 addi r4,r4,1 ; Shift 1 extra
1175 rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
1176 rlwnm r0,r0,r4,29,31 ; Get partial index
1177 lis r4,lo16(0x8000) ; Get the bit mask
1178 add r0,r0,r3 ; Make the real index
1179 srw r4,r4,r0 ; Get the allocation mask
1180 or r5,r5,r4 ; Free this entry
1181 cmplw r5,r4 ; Is this the only free entry?
1182 ld r6,SAVprev(r6) ; Chain to the next trimmed savearea
1183 cmplw cr7,r30,r5 ; Does this look empty?
1184 stw r5,SACalloc(r2) ; Save back the allocation bits
1185 beq-- stputpool64 ; First free entry, go put it into the pool...
1186 bne++ cr7,sttoss64 ; Not an empty block
1187
1188; We have an empty block. Remove it from the pool list.
1189
1190 lwz r29,SACflags(r2) ; Get the flags
1191 cmpldi cr5,r31,0 ; Is this guy on the release list?
1192 ld r28,SACnext(r2) ; Get the forward chain
1193
1194 rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
1195 bne-- sttoss64 ; This is permanent entry, do not try to release...
1196
1197 ld r29,SACprev(r2) ; and the previous
1198 beq-- cr5,stnot1st64 ; Not first
1199 ld r0,SACvrswap(r31) ; Load the previous pool page vr conversion
1200
1201stnot1st64:
1202 std r28,SACnext(r29) ; Previous guy points to my next
1203 xor r0,r0,r31 ; Make the last guy virtual
1204 std r29,SACprev(r28) ; Next guy points back to my previous
1205 std r0,SAVprev(r2) ; Store the old top virtual as my back chain
1206 mr r31,r2 ; My physical is now the head of the chain
1207 b sttoss64 ; Get the next one...
1208
1209; A pool block that had no free entries now has one. Stick it on the pool list.
1210
1211stputpool64:
1212 ld r28,SVpoolfwd(0) ; Get the first guy on the list
1213 li r0,saveanchor ; Point to the saveanchor
1214 std r2,SVpoolfwd(0) ; Put us on the top of the list
1215 std r28,SACnext(r2) ; We point to the old top
1216 std r2,SACprev(r28) ; Old top guy points back to us
1217 std r0,SACprev(r2) ; Our back points to the anchor
1218 b sttoss64 ; Go on to the next one...
d7e50217 1219
55e303ae
A
1220
1221; We are all done. Relocate pool release head, restore all, and go. This code
1222; is used both by the 32 and 64-bit paths.
1223; r9 = return address
1224; r10 = per-proc ptr
1225; r11 = MSR at entry
1226; r31 = free pool block list
1227
9bccf70c
A
1228stdone: bl saveunlock ; Unlock the saveanchor and set adjust field
1229
1230 mr. r3,r31 ; Move release chain and see if there are any
1231 li r5,0 ; Assume either V=R or no release chain
1232 beq- stnorel ; Nothing to release...
55e303ae 1233 lwz r5,SACvrswap+4(r31) ; Get the vr conversion (only need low half if 64-bit)
9bccf70c 1234
55e303ae
A
1235stnorel:
1236 bl saveRestore ; restore translation and exceptions, turn off SF
9bccf70c
A
1237 mtlr r9 ; Restore the return
1238
1239 lwz r28,FM_SIZE+0(r1) ; Restore R28
1240 lwz r29,FM_SIZE+4(r1) ; Restore R29
1241 lwz r30,FM_SIZE+8(r1) ; Restore R30
1242 lwz r31,FM_SIZE+12(r1) ; Restore R31
1243 addi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Pop the stack
1244 xor r3,r3,r5 ; Convert release chain address to virtual
55e303ae 1245 rlwinm r3,r3,0,0,31 ; if 64-bit, clear upper half of virtual address
9bccf70c
A
1246
1247#if FPVECDBG
1248 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1249 li r2,0x2207 ; (TEST/DEBUG)
1250 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1251 sc ; (TEST/DEBUG)
1252#endif
1253 blr ; Return...
55e303ae
A
1254
1255
1256/*
1257 * ***************************
1258 * * s a v e _ r e c o v e r *
1259 * ***************************
1260 *
1261 * int save_recover(void);
1262 *
1263 * Returns nonzero if we can get enough saveareas to hit the target. We scan the free
1264 * pool. If we empty a pool block, we remove it from the pool list.
1265 */
9bccf70c
A
1266
1267 .align 5
1268 .globl EXT(save_recover)
1269
1270LEXT(save_recover)
55e303ae
A
1271 mflr r9 ; save return address
1272 bl saveSetup ; turn translation and interrupts off, SF on, load many regs
1273 bl savelock ; lock the savearea anchor
150bd074 1274
55e303ae 1275 lwz r8,SVadjust(0) ; How many do we need to clear get?
9bccf70c
A
1276 li r3,0 ; Get a 0
1277 mr. r8,r8 ; Do we need any?
55e303ae
A
1278 ble-- save_recover1 ; not any more
1279 bf-- pf64Bitb,saveRecover32 ; handle 32-bit processor
1280 b saveRecover64 ; handle 64-bit processor
1281
1282save_recover1: ; by the time we locked the anchor, no longer short
9bccf70c 1283 mtlr r9 ; Restore return
55e303ae 1284 stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed)
9bccf70c
A
1285#if FPVECDBG
1286 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1287 li r2,0x2208 ; (TEST/DEBUG)
1288 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1289 sc ; (TEST/DEBUG)
1290#endif
55e303ae 1291 b saveRestore ; turn translation etc back on, return to our caller
d7e50217 1292
55e303ae
A
1293
1294/*
1295 * *****************************
1296 * * s a v e R e c o v e r 3 2 *
1297 * *****************************
1298 *
1299 * Handle "save_recover" on 32-bit processors. At this point, translation and interrupts
1300 * are off, the savearea anchor is locked, and:
1301 * r8 = #pages to recover
1302 * r9 = return address
1303 * r10 = per-proc ptr
1304 * r11 = MSR at entry
1305 */
1306
1307saveRecover32:
1308 li r6,saveanchor ; Start at pool anchor
1309 crclr cr1_eq ; initialize the loop test
1310 lwz r7,SVfreecnt(0) ; Get the current free count
1311
1312
1313; Loop over next block in free pool. r6 is the ptr to the last block we looked at.
1314
1315srcnpool: lwz r6,SACnext+4(r6) ; Point to the next one
1316 cmplwi r6,saveanchor ; Have we wrapped?
9bccf70c
A
1317 beq- srcdone ; Yes, did not have enough...
1318
1319 lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block
1320
1321;
1322; NOTE: The savearea size must be 640 (0x280). We are doing a multiply by shifts and add.
1323; offset = (index << 9) + (index << 7)
1324;
1325#if SAVsize != 640
1326#error Savearea size is not 640!!!!!!!!!!!!
1327#endif
1328
55e303ae
A
1329; Loop over free savearea in current block.
1330; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc)
1331; r6 = ptr to current free pool block
1332; r7 = free count
1333; r8 = #pages more we still need to recover
1334; r9 = return address
1335; r10 = per-proc ptr
1336; r11 = MSR at entry
1337; cr1 = beq if (r8==0)
1338
9bccf70c
A
1339srcnext: beq- cr1,srcdone ; We have no more to get...
1340
1341 lis r3,0x8000 ; Get the top bit on
1342 cntlzw r4,r5 ; Find a free slot
1343 addi r7,r7,1 ; Bump up the free count
1344 srw r3,r3,r4 ; Make a mask
1345 slwi r0,r4,7 ; First multiply by 128
1346 subi r8,r8,1 ; Decrement the need count
1347 slwi r2,r4,9 ; Then multiply by 512
1348 andc. r5,r5,r3 ; Clear out the "free" bit
1349 add r2,r2,r0 ; Sum to multiply by 640
1350
1351 stw r5,SACalloc(r6) ; Set new allocation bits
1352
1353 add r2,r2,r6 ; Get the actual address of the savearea
55e303ae 1354 lwz r3,SVfree+4(0) ; Get the head of the chain
9bccf70c 1355 cmplwi cr1,r8,0 ; Do we actually need any more?
55e303ae
A
1356 stw r2,SVfree+4(0) ; Push ourselves in the front
1357 stw r3,SAVprev+4(r2) ; Chain the rest of the list behind
9bccf70c
A
1358
1359 bne+ srcnext ; The pool block is not empty yet, try for another...
1360
55e303ae
A
1361 lwz r2,SACnext+4(r6) ; Get the next pointer
1362 lwz r3,SACprev+4(r6) ; Get the previous pointer
1363 stw r3,SACprev+4(r2) ; The previous of my next points to my previous
1364 stw r2,SACnext+4(r3) ; The next of my previous points to my next
9bccf70c 1365 bne+ cr1,srcnpool ; We still have more to do...
55e303ae
A
1366
1367
1368; Join here from 64-bit path when we have recovered all the saveareas we need to.
1369
1370srcdone: stw r7,SVfreecnt(0) ; Set the new free count
9bccf70c
A
1371 bl saveunlock ; Unlock the save and set adjust field
1372
1373 mtlr r9 ; Restore the return
9bccf70c
A
1374#if FPVECDBG
1375 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1376 li r2,0x2209 ; (TEST/DEBUG)
1377 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1378 sc ; (TEST/DEBUG)
1379#endif
55e303ae
A
1380 b saveRestore ; turn xlate and EE back on, SF off, and return to our caller
1381
1382
1383/*
1384 * *****************************
1385 * * s a v e R e c o v e r 6 4 *
1386 * *****************************
1387 *
1388 * Handle "save_recover" on 64-bit processors. At this point, translation and interrupts
1389 * are off, the savearea anchor is locked, and:
1390 * r8 = #pages to recover
1391 * r9 = return address
1392 * r10 = per-proc ptr
1393 * r11 = MSR at entry
1394 */
1395
1396saveRecover64:
1397 li r6,saveanchor ; Start at pool anchor
1398 crclr cr1_eq ; initialize the loop test
1399 lwz r7,SVfreecnt(0) ; Get the current free count
1400
1401
1402; Loop over next block in free pool. r6 is the ptr to the last block we looked at.
1403
1404srcnpool64:
1405 ld r6,SACnext(r6) ; Point to the next one
1406 cmpldi r6,saveanchor ; Have we wrapped?
1407 beq-- srcdone ; Yes, did not have enough...
9bccf70c 1408
55e303ae
A
1409 lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block
1410
1411
1412; Loop over free savearea in current block.
1413; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc)
1414; r6 = ptr to current free pool block
1415; r7 = free count
1416; r8 = #pages more we still need to recover
1417; r9 = return address
1418; r10 = per-proc ptr
1419; r11 = MSR at entry
1420; cr1 = beq if (r8==0)
9bccf70c 1421;
55e303ae 1422; WARNING: as in the 32-bit path, we depend on (SAVsize==640)
9bccf70c 1423
55e303ae
A
1424srcnext64:
1425 beq-- cr1,srcdone ; We have no more to get...
9bccf70c 1426
55e303ae
A
1427 lis r3,0x8000 ; Get the top bit on
1428 cntlzw r4,r5 ; Find a free slot
1429 addi r7,r7,1 ; Bump up the free count
1430 srw r3,r3,r4 ; Make a mask
1431 slwi r0,r4,7 ; First multiply by 128
1432 subi r8,r8,1 ; Decrement the need count
1433 slwi r2,r4,9 ; Then multiply by 512
1434 andc. r5,r5,r3 ; Clear out the "free" bit
1435 add r2,r2,r0 ; Sum to multiply by 640
d7e50217 1436
55e303ae
A
1437 stw r5,SACalloc(r6) ; Set new allocation bits
1438
1439 add r2,r2,r6 ; Get the actual address of the savearea
1440 ld r3,SVfree(0) ; Get the head of the chain
1441 cmplwi cr1,r8,0 ; Do we actually need any more?
1442 std r2,SVfree(0) ; Push ourselves in the front
1443 std r3,SAVprev(r2) ; Chain the rest of the list behind
9bccf70c 1444
55e303ae 1445 bne++ srcnext64 ; The pool block is not empty yet, try for another...
9bccf70c 1446
55e303ae
A
1447 ld r2,SACnext(r6) ; Get the next pointer
1448 ld r3,SACprev(r6) ; Get the previous pointer
1449 std r3,SACprev(r2) ; The previous of my next points to my previous
1450 std r2,SACnext(r3) ; The next of my previous points to my next
1451 bne++ cr1,srcnpool64 ; We still have more to do...
1452
1453 b srcdone
9bccf70c 1454
d7e50217 1455
55e303ae
A
1456/*
1457 * *******************
1458 * * s a v e l o c k *
1459 * *******************
1460 *
1461 * Lock the savearea anchor, so we can manipulate the free list.
1462 * msr = interrupts and translation off
1463 * We destroy:
1464 * r8, r3, r12
1465 */
d7e50217 1466 .align 5
de355530 1467
55e303ae
A
1468savelock: lwz r8,SVlock(0) ; See if lock is held
1469 cmpwi r8,0
1470 li r12,saveanchor ; Point to the saveanchor
1471 bne-- savelock ; loop until lock released...
1472
1473savelock0: lwarx r8,0,r12 ; Grab the lock value
1474 cmpwi r8,0 ; taken?
1475 li r8,1 ; get nonzero to lock it with
1476 bne-- savelock1 ; already locked, wait for it to clear...
1477 stwcx. r8,0,r12 ; Try to seize that there durn lock
1478 isync ; assume we got it
1479 beqlr++ ; reservation not lost, so we have the lock
1480 b savelock0 ; Try again...
1481
1482savelock1: li r8,lgKillResv ; Point to killing field
1483 stwcx. r8,0,r8 ; Kill reservation
1484 b savelock ; Start over....
1485
1486
1487/*
1488 * ***********************
1489 * * s a v e u n l o c k *
1490 * ***********************
1491 *
1492 *
1493 * This is the common routine that sets the saveadjust field and unlocks the savearea
1494 * anchor.
1495 * msr = interrupts and translation off
1496 * We destroy:
1497 * r2, r5, r6, r8.
1498 */
1499 .align 5
9bccf70c 1500saveunlock:
55e303ae
A
1501 lwz r6,SVfreecnt(0) ; and the number on the free list
1502 lwz r5,SVinuse(0) ; Pick up the in use count
1503 subic. r8,r6,FreeListMin ; do we have at least the minimum?
1504 lwz r2,SVtarget(0) ; Get the target
1505 neg r8,r8 ; assuming we are short, get r8 <- shortfall
1506 blt-- saveunlock1 ; skip if fewer than minimum on free list
1c79356b 1507
9bccf70c 1508 add r6,r6,r5 ; Get the total number of saveareas
55e303ae 1509 addi r5,r2,-SaveLowHysteresis ; Find low end of acceptible range
9bccf70c 1510 sub r5,r6,r5 ; Make everything below hysteresis negative
55e303ae 1511 sub r2,r2,r6 ; Get the distance from the target
9bccf70c
A
1512 addi r5,r5,-(SaveLowHysteresis + SaveHighHysteresis + 1) ; Subtract full hysteresis range
1513 srawi r5,r5,31 ; Get 0xFFFFFFFF if outside range or 0 if inside
55e303ae 1514 and r8,r2,r5 ; r8 <- 0 if in range or distance to target if not
9bccf70c 1515
55e303ae
A
1516saveunlock1:
1517 li r5,0 ; Set a clear value
1518 stw r8,SVadjust(0) ; Set the adjustment value
1519 eieio ; Make sure everything is done
1520 stw r5,SVlock(0) ; Unlock the savearea chain
9bccf70c 1521 blr
9bccf70c 1522
1c79356b 1523
9bccf70c 1524/*
55e303ae
A
1525 * *******************
1526 * * s a v e _ c p v *
1527 * *******************
1528 *
1529 * struct savearea *save_cpv(addr64_t saveAreaPhysAddr);
1530 *
1531 * Converts a physical savearea address to virtual. Called with translation on
1532 * and in 32-bit mode. Note that the argument is passed as a long long in (r3,r4).
9bccf70c 1533 */
1c79356b 1534
9bccf70c
A
1535 .align 5
1536 .globl EXT(save_cpv)
1537
1538LEXT(save_cpv)
55e303ae
A
1539 mflr r9 ; save return address
1540 mr r8,r3 ; save upper half of phys address here
1541 bl saveSetup ; turn off translation and interrupts, turn SF on
1542 rlwinm r5,r4,0,0,19 ; Round back to the start of the physical savearea block
1543 bf-- pf64Bitb,save_cpv1 ; skip if 32-bit processor
1544 rldimi r5,r8,32,0 ; r5 <- 64-bit phys address of block
1545save_cpv1:
1546 lwz r6,SACvrswap+4(r5) ; Get the conversion to virtual (only need low half if 64-bit)
1547 mtlr r9 ; restore return address
1548 xor r3,r4,r6 ; convert phys to virtual
1549 rlwinm r3,r3,0,0,31 ; if 64-bit, zero upper half of virtual address
1550 b saveRestore ; turn translation etc back on, SF off, and return r3
1551
9bccf70c 1552
55e303ae
A
1553/*
1554 * *********************
1555 * * s a v e S e t u p *
1556 * *********************
1557 *
1558 * This routine is called at the start of all the save-area subroutines.
1559 * It turns off translation, disabled interrupts, turns on 64-bit mode,
1560 * and sets up cr6 with the feature flags (especially pf64Bit).
1561 *
1562 * Note that most save-area routines cannot take _any_ interrupt (such as a
1563 * PTE miss) once the savearea anchor is locked, since that would result in
1564 * instant deadlock as we need a save-area to process any exception.
1565 * We set up:
1566 * r10 = per-proc ptr
1567 * r11 = old MSR
1568 * cr5 = pfNoMSRir feature flag
1569 * cr6 = pf64Bit feature flag
1570 *
1571 * We use r0, r3, r10, and r11.
1572 */
1573
1574saveSetup:
1575 mfmsr r11 ; get msr
1576 mfsprg r3,2 ; get feature flags
1577 li r0,0
1578 mtcrf 0x2,r3 ; copy pf64Bit to cr6
1579 ori r0,r0,lo16(MASK(MSR_IR)+MASK(MSR_DR)+MASK(MSR_EE))
1580 mtcrf 0x4,r3 ; copy pfNoMSRir to cr5
1581 andc r3,r11,r0 ; turn off IR, DR, and EE
1582 li r0,1 ; get a 1 in case its a 64-bit machine
1583 bf-- pf64Bitb,saveSetup1 ; skip if not a 64-bit machine
1584 rldimi r3,r0,63,MSR_SF_BIT ; turn SF (bit 0) on
1585 mtmsrd r3 ; turn translation and interrupts off, 64-bit mode on
1586 isync ; wait for it to happen
1587 mfsprg r10,0 ; get per-proc ptr
1588 blr
1589saveSetup1: ; here on 32-bit machines
1590 bt- pfNoMSRirb,saveSetup2 ; skip if cannot turn off IR with a mtmsr
1591 mtmsr r3 ; turn translation and interrupts off
1592 isync ; wait for it to happen
1593 mfsprg r10,0 ; get per-proc ptr
1594 blr
1595saveSetup2: ; here if pfNoMSRir set for this machine
1596 li r0,loadMSR ; we will "mtmsr r3" via system call
1597 sc
1598 mfsprg r10,0 ; get per-proc ptr
1599 blr
1600
1601
1602/*
1603 * *************************
1604 * * s a v e R e s t o r e *
1605 * *************************
1606 *
1607 * Undoes the effect of calling "saveSetup", ie it turns relocation and interrupts back on,
1608 * and turns 64-bit mode back off.
1609 * r11 = old MSR
1610 * cr6 = pf64Bit feature flag
1611 */
1612
1613saveRestore:
1614 bt++ pf64Bitb,saveRestore64 ; handle a 64-bit processor
1615saveRestore32:
1616 mtmsr r11 ; restore MSR
1617 isync ; wait for translation to start up
1618 blr
1619saveRestore64: ; 64-bit processor
1620 mtmsrd r11 ; restore MSR
1621 isync ; wait for changes to happen
1622 blr
1623