]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/savearea_asm.s
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / ppc / savearea_asm.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
9bccf70c
A
28
29#define FPVECDBG 0
30
1c79356b
A
31#include <assym.s>
32#include <debug.h>
1c79356b
A
33#include <db_machine_commands.h>
34#include <mach_rt.h>
35
36#include <mach_debug.h>
37#include <ppc/asm.h>
38#include <ppc/proc_reg.h>
39#include <ppc/exception.h>
40#include <ppc/Performance.h>
41#include <ppc/exception.h>
9bccf70c 42#include <ppc/savearea.h>
1c79356b
A
43#include <mach/ppc/vm_param.h>
44
45 .text
46
55e303ae
A
47/* Register usage conventions in this code:
48 * r9 = return address
49 * r10 = per-proc ptr
50 * r11 = MSR at entry
51 * cr6 = feature flags (ie, pf64Bit)
52 *
53 * Because much of this code deals with physical addresses,
54 * there are parallel paths for 32- and 64-bit machines.
55 */
56
91447636
A
57
58/*
59 * *****************************
60 * * s a v e _ s n a p s h o t *
61 * *****************************
62 *
63 * void save_snapshot();
64 *
65 * Link the current free list & processor local list on an independent list.
66 */
67 .align 5
68 .globl EXT(save_snapshot)
69
70LEXT(save_snapshot)
71 mflr r9 ; get return address
72 bl saveSetup ; turn translation off, 64-bit on, load many regs
73 bf-- pf64Bitb,save_snapshot32 ; skip if 32-bit processor
74
75 ; Handle 64-bit processor.
76
77save_snapshot64:
78
79 ld r8,next_savearea(r10) ; Start with the current savearea
80 std r8,SVsavefreesnapshot(0) ; Make it the restore list anchor
81 ld r5,SVfree(0) ; Get free save area list anchor
82
83save_snapshot64nextfree:
84 mr r7,r5
85 std r7,savemisc1(r8) ; Link this one
86 ld r5,SAVprev(r7) ; Get the next
87 mr r8,r7
88 mr. r0,r5
89 bne save_snapshot64nextfree
90
91 lwz r6,SVinuse(0) ; Get inuse count
92 ld r5,lclfree(r10) ; Get the local savearea list
93 subi r6,r6,1 ; Count the first as free
94
95save_snapshot64nextlocalfree:
96 subi r6,r6,1 ; Count as free
97 mr r7,r5
98 std r7,savemisc1(r8) ; Link this one
99 ld r5,SAVprev(r7) ; Get the next
100 mr r8,r7
101 mr. r0,r5
102 bne save_snapshot64nextlocalfree
103
104 std r5,savemisc1(r8) ; End the list
105 stw r6,SVsaveinusesnapshot(0) ; Save the new number of inuse saveareas
106
107 mtlr r9 ; Restore the return
108 b saveRestore64 ; Restore interrupts and translation
109
110 ; Handle 32-bit processor.
111
112save_snapshot32:
113 lwz r8,next_savearea+4(r10) ; Start with the current savearea
114 stw r8,SVsavefreesnapshot+4(0) ; Make it the restore list anchor
115 lwz r5,SVfree+4(0) ; Get free save area list anchor
116
117save_snapshot32nextfree:
118 mr r7,r5
119 stw r7,savemisc1+4(r8) ; Link this one
120 lwz r5,SAVprev+4(r7) ; Get the next
121 mr r8,r7
122 mr. r0,r5
123 bne save_snapshot32nextfree
124
125 lwz r6,SVinuse(0) ; Get inuse count
126 lwz r5,lclfree+4(r10) ; Get the local savearea list
127 subi r6,r6,1 ; Count the first as free
128
129save_snapshot32nextlocalfree:
130 subi r6,r6,1 ; Count as free
131 mr r7,r5
132 stw r7,savemisc1+4(r8) ; Link this one
133 lwz r5,SAVprev+4(r7) ; Get the next
134 mr r8,r7
135 mr. r0,r5
136 bne save_snapshot32nextlocalfree
137
138 stw r5,savemisc1+4(r8) ; End the list
139 stw r6,SVsaveinusesnapshot(0) ; Save the new number of inuse saveareas
140
141 mtlr r9 ; Restore the return
142 b saveRestore32 ; Restore interrupts and translation
143
144/*
145 * *********************************************
146 * * s a v e _ s n a p s h o t _ r e s t o r e *
147 * *********************************************
148 *
149 * void save_snapshot_restore();
150 *
151 * Restore the free list from the snapshot list, and reset the processors next savearea.
152 */
153 .align 5
154 .globl EXT(save_snapshot_restore)
155
156LEXT(save_snapshot_restore)
157 mflr r9 ; get return address
158 bl saveSetup ; turn translation off, 64-bit on, load many regs
159 bf-- pf64Bitb,save_snapshot_restore32 ; skip if 32-bit processor
160
161 ; Handle 64-bit processor.
162
163save_snapshot_restore64:
164 lwz r7,SVsaveinusesnapshot(0)
165 stw r7,SVinuse(0) ; Set the new inuse count
166
167 li r6,0
168 stw r6,lclfreecnt(r10) ; None local now
169 std r6,lclfree(r10) ; None local now
170
171 ld r8,SVsavefreesnapshot(0) ; Get the restore list anchor
172 std r8,SVfree(0) ; Make it the free list anchor
173 li r5,SAVempty ; Get marker for free savearea
174
175save_snapshot_restore64nextfree:
176 addi r6,r6,1 ; Count as free
177 stb r5,SAVflags+2(r8) ; Mark savearea free
178 ld r7,savemisc1(r8) ; Get the next
179 std r7,SAVprev(r8) ; Set the next in free list
180 mr. r8,r7
181 bne save_snapshot_restore64nextfree
182
183 stw r6,SVfreecnt(0) ; Set the new free count
184
185 bl saveGet64
186 std r3,next_savearea(r10) ; Get the next savearea
187
188 mtlr r9 ; Restore the return
189 b saveRestore64 ; Restore interrupts and translation
190
191 ; Handle 32-bit processor.
192
193save_snapshot_restore32:
194 lwz r7,SVsaveinusesnapshot(0)
195 stw r7,SVinuse(0) ; Set the new inuse count
196
197 li r6,0
198 stw r6,lclfreecnt(r10) ; None local now
199 stw r6,lclfree+4(r10) ; None local now
200
201 lwz r8,SVsavefreesnapshot+4(0) ; Get the restore list anchor
202 stw r8,SVfree+4(0) ; Make it the free list anchor
203 li r5,SAVempty ; Get marker for free savearea
204
205save_snapshot_restore32nextfree:
206 addi r6,r6,1 ; Count as free
207 stb r5,SAVflags+2(r8) ; Mark savearea free
208 lwz r7,savemisc1+4(r8) ; Get the next
209 stw r7,SAVprev+4(r8) ; Set the next in free list
210 mr. r8,r7
211 bne save_snapshot_restore32nextfree
212
213 stw r6,SVfreecnt(0) ; Set the new free count
214
215 bl saveGet32
216 stw r3,next_savearea+4(r10) ; Get the next savearea
217
218 mtlr r9 ; Restore the return
219 b saveRestore32 ; Restore interrupts and translation
220
1c79356b 221/*
55e303ae
A
222 * ***********************
223 * * s a v e _ q u e u e *
224 * ***********************
225 *
226 * void save_queue(ppnum_t pagenum);
de355530 227 *
55e303ae 228 * This routine will add a savearea block to the free list.
9bccf70c
A
229 * We also queue the block to the free pool list. This is a
230 * circular double linked list. Because this block has no free entries,
231 * it gets queued to the end of the list
1c79356b 232 */
9bccf70c
A
233 .align 5
234 .globl EXT(save_queue)
1c79356b 235
9bccf70c 236LEXT(save_queue)
55e303ae
A
237 mflr r9 ; get return address
238 mr r8,r3 ; move pagenum out of the way
239 bl saveSetup ; turn translation off, 64-bit on, load many regs
240 bf-- pf64Bitb,saveQueue32 ; skip if 32-bit processor
241
242 sldi r2,r8,12 ; r2 <-- phys address of page
243 li r8,sac_cnt ; Get the number of saveareas per page
244 mr r4,r2 ; Point to start of chain
245 li r0,SAVempty ; Get empty marker
1c79356b 246
55e303ae
A
247saveQueue64a:
248 addic. r8,r8,-1 ; Keep track of how many we did
249 stb r0,SAVflags+2(r4) ; Set empty
250 addi r7,r4,SAVsize ; Point to the next slot
251 ble- saveQueue64b ; We are done with the chain
252 std r7,SAVprev(r4) ; Set this chain
253 mr r4,r7 ; Step to the next
254 b saveQueue64a ; Fill the whole block...
1c79356b 255
55e303ae
A
256saveQueue64b:
257 bl savelock ; Go lock the save anchor
258
259 ld r7,SVfree(0) ; Get the free save area list anchor
260 lwz r6,SVfreecnt(0) ; Get the number of free saveareas
1c79356b 261
55e303ae
A
262 std r2,SVfree(0) ; Queue in the new one
263 addi r6,r6,sac_cnt ; Count the ones we are linking in
264 std r7,SAVprev(r4) ; Queue the old first one off of us
265 stw r6,SVfreecnt(0) ; Save the new count
266 b saveQueueExit
267
268 ; Handle 32-bit processor.
269
270saveQueue32:
271 slwi r2,r8,12 ; r2 <-- phys address of page
9bccf70c 272 li r8,sac_cnt ; Get the number of saveareas per page
55e303ae 273 mr r4,r2 ; Point to start of chain
9bccf70c 274 li r0,SAVempty ; Get empty marker
1c79356b 275
55e303ae
A
276saveQueue32a:
277 addic. r8,r8,-1 ; Keep track of how many we did
9bccf70c 278 stb r0,SAVflags+2(r4) ; Set empty
55e303ae
A
279 addi r7,r4,SAVsize ; Point to the next slot
280 ble- saveQueue32b ; We are done with the chain
281 stw r7,SAVprev+4(r4) ; Set this chain
282 mr r4,r7 ; Step to the next
283 b saveQueue32a ; Fill the whole block...
de355530 284
55e303ae 285saveQueue32b:
9bccf70c 286 bl savelock ; Go lock the save anchor
1c79356b 287
55e303ae
A
288 lwz r7,SVfree+4(0) ; Get the free save area list anchor
289 lwz r6,SVfreecnt(0) ; Get the number of free saveareas
150bd074 290
55e303ae 291 stw r2,SVfree+4(0) ; Queue in the new one
9bccf70c 292 addi r6,r6,sac_cnt ; Count the ones we are linking in
55e303ae
A
293 stw r7,SAVprev+4(r4) ; Queue the old first one off of us
294 stw r6,SVfreecnt(0) ; Save the new count
295
296saveQueueExit: ; join here from 64-bit path
9bccf70c 297 bl saveunlock ; Unlock the list and set the adjust count
9bccf70c 298 mtlr r9 ; Restore the return
9bccf70c
A
299
300#if FPVECDBG
55e303ae 301 mfsprg r2,1 ; (TEST/DEBUG)
9bccf70c 302 mr. r2,r2 ; (TEST/DEBUG)
55e303ae 303 beq-- saveRestore ; (TEST/DEBUG)
9bccf70c
A
304 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
305 li r2,0x2201 ; (TEST/DEBUG)
306 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
307 sc ; (TEST/DEBUG)
1c79356b 308#endif
55e303ae 309 b saveRestore ; Restore interrupts and translation
1c79356b
A
310
311/*
55e303ae
A
312 * *****************************
313 * * s a v e _ g e t _ i n i t *
314 * *****************************
de355530 315 *
55e303ae 316 * addr64_t save_get_init(void);
9bccf70c
A
317 *
318 * Note that save_get_init is used in initial processor startup only. It
319 * is used because translation is on, but no tables exist yet and we have
320 * no V=R BAT registers that cover the entire physical memory.
1c79356b 321 */
9bccf70c
A
322 .align 5
323 .globl EXT(save_get_init)
324
325LEXT(save_get_init)
55e303ae
A
326 mflr r9 ; get return address
327 bl saveSetup ; turn translation off, 64-bit on, load many regs
328 bfl-- pf64Bitb,saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC)
329 btl++ pf64Bitb,saveGet64 ; get one on a 64-bit machine
330 bl saveRestore ; restore translation etc
331 mtlr r9
332
333 ; unpack the physaddr in r3 into a long long in (r3,r4)
334
335 mr r4,r3 ; copy low word of phys address to r4
336 li r3,0 ; assume upper word was 0
337 bflr-- pf64Bitb ; if 32-bit processor, return
338 srdi r3,r4,32 ; unpack reg64_t to addr64_t on 64-bit machine
339 rlwinm r4,r4,0,0,31
340 blr
341
9bccf70c 342
55e303ae
A
343/*
344 * *******************
345 * * s a v e _ g e t *
346 * *******************
347 *
348 * savearea *save_get(void);
349 *
350 * Allocate a savearea, returning a virtual address. NOTE: we must preserve
351 * r0, r2, and r12. Our callers in cswtch.s depend on this.
352 */
9bccf70c
A
353 .align 5
354 .globl EXT(save_get)
1c79356b 355
9bccf70c 356LEXT(save_get)
55e303ae
A
357 mflr r9 ; get return address
358 mr r5,r0 ; copy regs before saveSetup nails them
359 bl saveSetup ; turn translation off, 64-bit on, load many regs
360 bf-- pf64Bitb,svgt1 ; skip if 32-bit processor
361
362 std r5,tempr0(r10) ; save r0 in per-proc across call to saveGet64
363 std r2,tempr2(r10) ; and r2
364 std r12,tempr4(r10) ; and r12
365 bl saveGet64 ; get r3 <- savearea, r5 <- page address (with SAC)
366 ld r0,tempr0(r10) ; restore callers regs
367 ld r2,tempr2(r10)
368 ld r12,tempr4(r10)
369 b svgt2
370
371svgt1: ; handle 32-bit processor
372 stw r5,tempr0+4(r10) ; save r0 in per-proc across call to saveGet32
373 stw r2,tempr2+4(r10) ; and r2
374 stw r12,tempr4+4(r10) ; and r12
375 bl saveGet32 ; get r3 <- savearea, r5 <- page address (with SAC)
376 lwz r0,tempr0+4(r10) ; restore callers regs
377 lwz r2,tempr2+4(r10)
378 lwz r12,tempr4+4(r10)
379
380svgt2:
381 lwz r5,SACvrswap+4(r5) ; Get the virtual to real translation (only need low word)
382 mtlr r9 ; restore return address
383 xor r3,r3,r5 ; convert physaddr to virtual
384 rlwinm r3,r3,0,0,31 ; 0 upper word if a 64-bit machine
d7e50217 385
de355530 386#if FPVECDBG
55e303ae
A
387 mr r6,r0 ; (TEST/DEBUG)
388 mr r7,r2 ; (TEST/DEBUG)
389 mfsprg r2,1 ; (TEST/DEBUG)
390 mr. r2,r2 ; (TEST/DEBUG)
391 beq-- svgDBBypass ; (TEST/DEBUG)
de355530
A
392 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
393 li r2,0x2203 ; (TEST/DEBUG)
394 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
395 sc ; (TEST/DEBUG)
55e303ae
A
396svgDBBypass: ; (TEST/DEBUG)
397 mr r0,r6 ; (TEST/DEBUG)
398 mr r2,r7 ; (TEST/DEBUG)
de355530 399#endif
55e303ae
A
400 b saveRestore ; restore MSR and return to our caller
401
402
403/*
404 * ***********************************
405 * * s a v e _ g e t _ p h y s _ 3 2 *
406 * ***********************************
407 *
408 * reg64_t save_get_phys(void);
409 *
410 * This is the entry normally called from lowmem_vectors.s with
411 * translation and interrupts already off.
412 * MUST NOT TOUCH CR7
413 */
414 .align 5
415 .globl EXT(save_get_phys_32)
de355530 416
55e303ae
A
417LEXT(save_get_phys_32)
418 mfsprg r10,0 ; get the per-proc ptr
419 b saveGet32 ; Get r3 <- savearea, r5 <- page address (with SAC)
420
421
422/*
423 * ***********************************
424 * * s a v e _ g e t _ p h y s _ 6 4 *
425 * ***********************************
426 *
427 * reg64_t save_get_phys_64(void);
428 *
429 * This is the entry normally called from lowmem_vectors.s with
430 * translation and interrupts already off, and in 64-bit mode.
431 * MUST NOT TOUCH CR7
432 */
de355530 433 .align 5
55e303ae
A
434 .globl EXT(save_get_phys_64)
435
436LEXT(save_get_phys_64)
437 mfsprg r10,0 ; get the per-proc ptr
438 b saveGet64 ; Get r3 <- savearea, r5 <- page address (with SAC)
439
440
441/*
442 * *********************
443 * * s a v e G e t 6 4 *
444 * *********************
445 *
446 * This is the internal routine to allocate a savearea on a 64-bit processor.
447 * Note that we must not take any exceptions of any kind, including PTE misses, as that
448 * would deadlock trying to reenter this routine. We pass back the 64-bit physical address.
449 * First we try the local list. If that is below a threshold, we try the global free list,
450 * which requires taking a lock, and replenish. If there are no saveareas in either list,
451 * we will install the backpocket and choke. This routine assumes that the caller has
452 * turned translation off, masked interrupts, turned on 64-bit mode, and set up:
453 * r10 = per-proc ptr
454 *
455 * We return:
456 * r3 = 64-bit physical address of the savearea
457 * r5 = 64-bit physical address of the page the savearea is in, with SAC
458 *
459 * We destroy:
460 * r2-r8.
461 *
462 * MUST NOT TOUCH CR7
463 */
464
465saveGet64:
466 lwz r8,lclfreecnt(r10) ; Get the count
467 ld r3,lclfree(r10) ; Get the start of local savearea list
468 cmplwi r8,LocalSaveMin ; Are we too low?
469 ble-- saveGet64GetGlobal ; We are too low and need to grow list...
470
471 ; Get it from the per-processor local list.
472
473saveGet64GetLocal:
474 li r2,0x5555 ; get r2 <-- 0x55555555 55555555, our bugbug constant
475 ld r4,SAVprev(r3) ; Chain to the next one
476 oris r2,r2,0x5555
477 subi r8,r8,1 ; Back down count
478 rldimi r2,r2,32,0
479
480 std r2,SAVprev(r3) ; bug next ptr
481 stw r2,SAVlevel(r3) ; bug context ID
482 li r6,0
483 std r4,lclfree(r10) ; Unchain first savearea
484 stw r2,SAVact(r3) ; bug activation ptr
485 rldicr r5,r3,0,51 ; r5 <-- page ptr, where SAC is kept
486 stw r8,lclfreecnt(r10) ; Set new count
487 stw r6,SAVflags(r3) ; clear the flags
488
489 blr
490
491 ; Local list was low so replenish from global list.
492 ; r7 = return address to caller of saveGet64
493 ; r8 = lclfreecnt
494 ; r10 = per-proc ptr
495
496saveGet64GetGlobal:
497 mflr r7 ; save return adress
498 subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target
9bccf70c 499 bl savelock ; Go lock up the anchor
9bccf70c 500
55e303ae
A
501 lwz r2,SVfreecnt(0) ; Get the number on this list
502 ld r8,SVfree(0) ; Get the head of the save area list
9bccf70c 503
55e303ae
A
504 sub r3,r2,r5 ; Get number left after we swipe enough for local list
505 sradi r3,r3,63 ; Get 0 if enough or -1 if not
9bccf70c 506 andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise
55e303ae
A
507 and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise
508 or. r5,r4,r5 ; r5 <- number we will move from global to local list
509 beq-- saveGet64NoFree ; There are none to get...
9bccf70c
A
510
511 mtctr r5 ; Get loop count
512 mr r6,r8 ; Remember the first in the list
9bccf70c 513
55e303ae
A
514saveGet64c:
515 bdz saveGet64d ; Count down and branch when we hit 0...
516 ld r8,SAVprev(r8) ; Get the next
517 b saveGet64c ; Keep going...
518
519saveGet64d:
520 ld r3,SAVprev(r8) ; Get the next one
521 lwz r4,SVinuse(0) ; Get the in use count
522 sub r2,r2,r5 ; Count down what we stole
523 std r3,SVfree(0) ; Set the new first in list
9bccf70c 524 add r4,r4,r5 ; Count the ones we just put in the local list as "in use"
55e303ae
A
525 stw r2,SVfreecnt(0) ; Set the new count
526 stw r4,SVinuse(0) ; Set the new in use count
9bccf70c 527
55e303ae
A
528 ld r4,lclfree(r10) ; Get the old head of list
529 lwz r3,lclfreecnt(r10) ; Get the old count
530 std r6,lclfree(r10) ; Set the new head of the list
9bccf70c 531 add r3,r3,r5 ; Get the new count
55e303ae
A
532 std r4,SAVprev(r8) ; Point to the old head
533 stw r3,lclfreecnt(r10) ; Set the new count
9bccf70c 534
9bccf70c 535 bl saveunlock ; Update the adjust field and unlock
55e303ae
A
536 mtlr r7 ; restore return address
537 b saveGet64 ; Start over and finally allocate the savearea...
538
539 ; The local list is below the repopulate threshold and the global list is empty.
540 ; First we check if there are any left in the local list and if so, we allow
541 ; them to be allocated. If not, we release the backpocket list and choke.
542 ; There is nothing more that we can do at this point. Hopefully we stay alive
543 ; long enough to grab some much-needed panic information.
544 ; r7 = return address to caller of saveGet64
545 ; r10 = per-proc ptr
546
547saveGet64NoFree:
548 lwz r8,lclfreecnt(r10) ; Get the count
9bccf70c 549 mr. r8,r8 ; Are there any reserve to get?
55e303ae 550 beq-- saveGet64Choke ; No, go choke and die...
9bccf70c 551 bl saveunlock ; Update the adjust field and unlock
55e303ae
A
552 ld r3,lclfree(r10) ; Get the start of local savearea list
553 lwz r8,lclfreecnt(r10) ; Get the count
554 mtlr r7 ; restore return address
555 b saveGet64GetLocal ; We have some left, dip on in...
9bccf70c 556
9bccf70c
A
557; We who are about to die salute you. The savearea chain is messed up or
558; empty. Add in a few so we have enough to take down the system.
9bccf70c 559
55e303ae
A
560saveGet64Choke:
561 lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket
9bccf70c
A
562 ori r9,r9,lo16(EXT(backpocket)) ; and low part
563
55e303ae
A
564 lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements
565 ld r7,SVfree-saveanchor(r9) ; Get the head of the chain
566 lwz r6,SVinuse(0) ; Get total in the old list
9bccf70c 567
55e303ae 568 stw r8,SVfreecnt(0) ; Set the new number of free elements
9bccf70c 569 add r6,r6,r8 ; Add in the new ones
55e303ae
A
570 std r7,SVfree(0) ; Set the new head of the chain
571 stw r6,SVinuse(0) ; Set total in the new list
9bccf70c 572
55e303ae 573saveGetChokeJoin: ; join in the fun from 32-bit mode
9bccf70c
A
574 lis r0,hi16(Choke) ; Set choke firmware call
575 li r7,0 ; Get a clear register to unlock
576 ori r0,r0,lo16(Choke) ; Set the rest of the choke call
577 li r3,failNoSavearea ; Set failure code
578
55e303ae
A
579 eieio ; Make sure all is committed
580 stw r7,SVlock(0) ; Unlock the free list
9bccf70c
A
581 sc ; System ABEND
582
583
1c79356b 584/*
55e303ae
A
585 * *********************
586 * * s a v e G e t 3 2 *
587 * *********************
de355530 588 *
55e303ae
A
589 * This is the internal routine to allocate a savearea on a 32-bit processor.
590 * Note that we must not take any exceptions of any kind, including PTE misses, as that
591 * would deadlock trying to reenter this routine. We pass back the 32-bit physical address.
592 * First we try the local list. If that is below a threshold, we try the global free list,
593 * which requires taking a lock, and replenish. If there are no saveareas in either list,
594 * we will install the backpocket and choke. This routine assumes that the caller has
595 * turned translation off, masked interrupts, and set up:
596 * r10 = per-proc ptr
9bccf70c 597 *
55e303ae
A
598 * We return:
599 * r3 = 32-bit physical address of the savearea
600 * r5 = 32-bit physical address of the page the savearea is in, with SAC
1c79356b 601 *
55e303ae
A
602 * We destroy:
603 * r2-r8.
1c79356b
A
604 */
605
55e303ae
A
606saveGet32:
607 lwz r8,lclfreecnt(r10) ; Get the count
608 lwz r3,lclfree+4(r10) ; Get the start of local savearea list
609 cmplwi r8,LocalSaveMin ; Are we too low?
610 ble- saveGet32GetGlobal ; We are too low and need to grow list...
611
612 ; Get savearea from per-processor local list.
613
614saveGet32GetLocal:
615 li r2,0x5555 ; get r2 <-- 0x55555555, our bugbug constant
616 lwz r4,SAVprev+4(r3) ; Chain to the next one
617 oris r2,r2,0x5555
618 subi r8,r8,1 ; Back down count
1c79356b 619
55e303ae
A
620 stw r2,SAVprev+4(r3) ; bug next ptr
621 stw r2,SAVlevel(r3) ; bug context ID
622 li r6,0
623 stw r4,lclfree+4(r10) ; Unchain first savearea
624 stw r2,SAVact(r3) ; bug activation ptr
625 rlwinm r5,r3,0,0,19 ; r5 <-- page ptr, where SAC is kept
626 stw r8,lclfreecnt(r10) ; Set new count
627 stw r6,SAVflags(r3) ; clear the flags
628
629 blr
630
631 ; Local list was low so replenish from global list.
632 ; r7 = return address to caller of saveGet32
633 ; r8 = lclfreecnt
634 ; r10 = per-proc ptr
635
636saveGet32GetGlobal:
637 mflr r7 ; save return adress
638 subfic r5,r8,LocalSaveTarget ; Get the number of saveareas we need to grab to get to target
639 bl savelock ; Go lock up the anchor
640
641 lwz r2,SVfreecnt(0) ; Get the number on this list
642 lwz r8,SVfree+4(0) ; Get the head of the save area list
643
644 sub r3,r2,r5 ; Get number left after we swipe enough for local list
645 srawi r3,r3,31 ; Get 0 if enough or -1 if not
646 andc r4,r5,r3 ; Get number to get if there are enough, 0 otherwise
647 and r5,r2,r3 ; Get 0 if there are enough, number on list otherwise
648 or. r5,r4,r5 ; r5 <- number we will move from global to local list
649 beq- saveGet32NoFree ; There are none to get...
650
651 mtctr r5 ; Get loop count
652 mr r6,r8 ; Remember the first in the list
9bccf70c 653
55e303ae
A
654saveGet32c:
655 bdz saveGet32d ; Count down and branch when we hit 0...
656 lwz r8,SAVprev+4(r8) ; Get the next
657 b saveGet32c ; Keep going...
9bccf70c 658
55e303ae
A
659saveGet32d:
660 lwz r3,SAVprev+4(r8) ; Get the next one
661 lwz r4,SVinuse(0) ; Get the in use count
662 sub r2,r2,r5 ; Count down what we stole
663 stw r3,SVfree+4(0) ; Set the new first in list
664 add r4,r4,r5 ; Count the ones we just put in the local list as "in use"
665 stw r2,SVfreecnt(0) ; Set the new count
666 stw r4,SVinuse(0) ; Set the new in use count
667
668 lwz r4,lclfree+4(r10) ; Get the old head of list
669 lwz r3,lclfreecnt(r10) ; Get the old count
670 stw r6,lclfree+4(r10) ; Set the new head of the list
671 add r3,r3,r5 ; Get the new count
672 stw r4,SAVprev+4(r8) ; Point to the old head
673 stw r3,lclfreecnt(r10) ; Set the new count
1c79356b 674
55e303ae
A
675 bl saveunlock ; Update the adjust field and unlock
676 mtlr r7 ; restore return address
677 b saveGet32 ; Start over and finally allocate the savearea...
678
679 ; The local list is below the repopulate threshold and the global list is empty.
680 ; First we check if there are any left in the local list and if so, we allow
681 ; them to be allocated. If not, we release the backpocket list and choke.
682 ; There is nothing more that we can do at this point. Hopefully we stay alive
683 ; long enough to grab some much-needed panic information.
684 ; r7 = return address to caller of saveGet32
685 ; r10 = per-proc ptr
686
687saveGet32NoFree:
688 lwz r8,lclfreecnt(r10) ; Get the count
689 mr. r8,r8 ; Are there any reserve to get?
690 beq- saveGet32Choke ; No, go choke and die...
691 bl saveunlock ; Update the adjust field and unlock
692 lwz r3,lclfree+4(r10) ; Get the start of local savearea list
693 lwz r8,lclfreecnt(r10) ; Get the count
694 mtlr r7 ; restore return address
695 b saveGet32GetLocal ; We have some left, dip on in...
696
697; We who are about to die salute you. The savearea chain is messed up or
698; empty. Add in a few so we have enough to take down the system.
9bccf70c 699
55e303ae
A
700saveGet32Choke:
701 lis r9,hi16(EXT(backpocket)) ; Get high order of back pocket
702 ori r9,r9,lo16(EXT(backpocket)) ; and low part
703
704 lwz r8,SVfreecnt-saveanchor(r9) ; Get the new number of free elements
705 lwz r7,SVfree+4-saveanchor(r9) ; Get the head of the chain
706 lwz r6,SVinuse(0) ; Get total in the old list
150bd074 707
55e303ae
A
708 stw r8,SVfreecnt(0) ; Set the new number of free elements
709 add r6,r6,r8 ; Add in the new ones (why?)
710 stw r7,SVfree+4(0) ; Set the new head of the chain
711 stw r6,SVinuse(0) ; Set total in the new list
712
713 b saveGetChokeJoin
150bd074 714
1c79356b 715
55e303ae
A
716/*
717 * *******************
718 * * s a v e _ r e t *
719 * *******************
720 *
721 * void save_ret(struct savearea *); // normal call
722 * void save_ret_wMSR(struct savearea *,reg64_t); // passes MSR to restore as 2nd arg
723 *
724 * Return a savearea passed by virtual address to the free list.
725 * Note really well: we can take NO exceptions of any kind,
726 * including a PTE miss once the savearea lock is held. That's
727 * a guaranteed deadlock. That means we must disable for interrutions
728 * and turn all translation off.
729 */
730 .globl EXT(save_ret_wMSR) ; alternate entry pt w MSR to restore in r4
731
732LEXT(save_ret_wMSR)
733 crset 31 ; set flag for save_ret_wMSR
734 b svrt1 ; join common code
735
736 .align 5
737 .globl EXT(save_ret)
738
739LEXT(save_ret)
740 crclr 31 ; clear flag for save_ret_wMSR
741svrt1: ; join from save_ret_wMSR
742 mflr r9 ; get return address
743 rlwinm r7,r3,0,0,19 ; get virtual address of SAC area at start of page
744 mr r8,r3 ; save virtual address
745 lwz r5,SACvrswap+0(r7) ; get 64-bit converter from V to R
746 lwz r6,SACvrswap+4(r7) ; both halves, though only bottom used on 32-bit machine
747#if FPVECDBG
748 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
749 li r2,0x2204 ; (TEST/DEBUG)
750 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
751 sc ; (TEST/DEBUG)
752#endif
753 bl saveSetup ; turn translation off, 64-bit on, load many regs
754 bf++ 31,svrt3 ; skip if not save_ret_wMSR
755 mr r11,r4 ; was save_ret_wMSR, so overwrite saved MSR
756svrt3:
757 bf-- pf64Bitb,svrt4 ; skip if a 32-bit processor
758
759 ; Handle 64-bit processor.
760
761 rldimi r6,r5,32,0 ; merge upper and lower halves of SACvrswap together
762 xor r3,r8,r6 ; get r3 <- 64-bit physical address of this savearea
763 bl saveRet64 ; return it
764 mtlr r9 ; restore return address
765 b saveRestore64 ; restore MSR
766
767 ; Handle 32-bit processor.
768
769svrt4:
770 xor r3,r8,r6 ; get r3 <- 32-bit physical address of this savearea
771 bl saveRet32 ; return it
772 mtlr r9 ; restore return address
773 b saveRestore32 ; restore MSR
774
9bccf70c 775
55e303ae
A
776/*
777 * *****************************
778 * * s a v e _ r e t _ p h y s *
779 * *****************************
780 *
781 * void save_ret_phys(reg64_t);
782 *
783 * Called from lowmem vectors to return (ie, free) a savearea by physical address.
784 * Translation and interrupts are already off, and 64-bit mode is set if defined.
785 * We can take _no_ exceptions of any kind in this code, including PTE miss, since
786 * that would result in a deadlock. We expect:
787 * r3 = phys addr of savearea
788 * msr = IR, DR, and EE off, SF on
789 * cr6 = pf64Bit flag
790 * We destroy:
791 * r0,r2-r10.
792 */
9bccf70c
A
793 .align 5
794 .globl EXT(save_ret_phys)
795
796LEXT(save_ret_phys)
55e303ae
A
797 mfsprg r10,0 ; get the per-proc ptr
798 bf-- pf64Bitb,saveRet32 ; handle 32-bit machine
799 b saveRet64 ; handle 64-bit machine
800
9bccf70c 801
55e303ae
A
802/*
803 * *********************
804 * * s a v e R e t 6 4 *
805 * *********************
806 *
807 * This is the internal routine to free a savearea, passed by 64-bit physical
808 * address. We assume that IR, DR, and EE are all off, that SF is on, and:
809 * r3 = phys address of the savearea
810 * r10 = per-proc ptr
811 * We destroy:
812 * r0,r2-r8.
813 */
814 .align 5
815 saveRet64:
9bccf70c 816 li r0,SAVempty ; Get marker for free savearea
55e303ae
A
817 lwz r7,lclfreecnt(r10) ; Get the local count
818 ld r6,lclfree(r10) ; Get the old local header
9bccf70c 819 addi r7,r7,1 ; Pop up the free count
55e303ae 820 std r6,SAVprev(r3) ; Plant free chain pointer
9bccf70c 821 cmplwi r7,LocalSaveMax ; Has the list gotten too long?
55e303ae
A
822 stb r0,SAVflags+2(r3) ; Mark savearea free
823 std r3,lclfree(r10) ; Chain us on in
824 stw r7,lclfreecnt(r10) ; Bump up the count
825 bltlr++ ; List not too long, so done
de355530 826
55e303ae
A
827/* The local savearea chain has gotten too long. Trim it down to the target.
828 * Here's a tricky bit, and important:
829 *
830 * When we trim the list, we NEVER trim the very first one. This is because that is
831 * the very last one released and the exception exit code will release the savearea
832 * BEFORE it is done using it. Wouldn't be too good if another processor started
833 * using it, eh? So for this case, we are safe so long as the savearea stays on
834 * the local list. (Note: the exit routine needs to do this because it is in the
835 * process of restoring all context and it needs to keep it until the last second.)
836 */
de355530 837
55e303ae
A
838 mflr r0 ; save return to caller of saveRet64
839 mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed
840 ld r3,SAVprev(r3) ; Skip over the first
9bccf70c 841 subi r7,r7,LocalSaveTarget ; Figure out how much to trim
55e303ae 842 mr r6,r3 ; r6 <- first one to trim
9bccf70c
A
843 mr r5,r7 ; Save the number we are trimming
844
55e303ae
A
845saveRet64a:
846 addic. r7,r7,-1 ; Any left to do?
847 ble-- saveRet64b ; Nope...
848 ld r3,SAVprev(r3) ; Skip to the next one
849 b saveRet64a ; Keep going...
9bccf70c 850
55e303ae
A
851saveRet64b: ; r3 <- last one to trim
852 ld r7,SAVprev(r3) ; Point to the first one not to trim
9bccf70c 853 li r4,LocalSaveTarget ; Set the target count
55e303ae
A
854 std r7,SAVprev(r2) ; Trim stuff leaving the one just released as first
855 stw r4,lclfreecnt(r10) ; Set the current count
9bccf70c 856
9bccf70c
A
857 bl savelock ; Lock up the anchor
858
55e303ae
A
859 ld r8,SVfree(0) ; Get the old head of the free list
860 lwz r4,SVfreecnt(0) ; Get the number of free ones
861 lwz r7,SVinuse(0) ; Get the number that are in use
862 std r6,SVfree(0) ; Point to the first trimmed savearea
9bccf70c 863 add r4,r4,r5 ; Add number trimmed to free count
55e303ae 864 std r8,SAVprev(r3) ; Chain the old head to the tail of the trimmed guys
9bccf70c 865 sub r7,r7,r5 ; Remove the trims from the in use count
55e303ae
A
866 stw r4,SVfreecnt(0) ; Set new free count
867 stw r7,SVinuse(0) ; Set new in use count
9bccf70c 868
55e303ae
A
869 mtlr r0 ; Restore the return to our caller
870 b saveunlock ; Set adjust count, unlock the saveanchor, and return
871
9bccf70c 872
55e303ae
A
873/*
874 * *********************
875 * * s a v e R e t 3 2 *
876 * *********************
877 *
878 * This is the internal routine to free a savearea, passed by 32-bit physical
879 * address. We assume that IR, DR, and EE are all off, and:
880 * r3 = phys address of the savearea
881 * r10 = per-proc ptr
882 * We destroy:
883 * r0,r2-r8.
884 */
885 .align 5
886 saveRet32:
887 li r0,SAVempty ; Get marker for free savearea
888 lwz r7,lclfreecnt(r10) ; Get the local count
889 lwz r6,lclfree+4(r10) ; Get the old local header
890 addi r7,r7,1 ; Pop up the free count
891 stw r6,SAVprev+4(r3) ; Plant free chain pointer
892 cmplwi r7,LocalSaveMax ; Has the list gotten too long?
893 stb r0,SAVflags+2(r3) ; Mark savearea free
894 stw r3,lclfree+4(r10) ; Chain us on in
895 stw r7,lclfreecnt(r10) ; Bump up the count
896 bltlr+ ; List not too long, so done
d7e50217 897
55e303ae
A
898/* The local savearea chain has gotten too long. Trim it down to the target.
899 * Here's a tricky bit, and important:
900 *
901 * When we trim the list, we NEVER trim the very first one. This is because that is
902 * the very last one released and the exception exit code will release the savearea
903 * BEFORE it is done using it. Wouldn't be too good if another processor started
904 * using it, eh? So for this case, we are safe so long as the savearea stays on
905 * the local list. (Note: the exit routine needs to do this because it is in the
906 * process of restoring all context and it needs to keep it until the last second.)
907 */
9bccf70c 908
55e303ae
A
909 mflr r0 ; save return to caller of saveRet32
910 mr r2,r3 ; r2 <- 1st one on local list, which must not be trimmed
911 lwz r3,SAVprev+4(r3) ; Skip over the first
912 subi r7,r7,LocalSaveTarget ; Figure out how much to trim
913 mr r6,r3 ; r6 <- first one to trim
914 mr r5,r7 ; Save the number we are trimming
915
916saveRet32a:
917 addic. r7,r7,-1 ; Any left to do?
918 ble- saveRet32b ; Nope...
919 lwz r3,SAVprev+4(r3) ; Skip to the next one
920 b saveRet32a ; Keep going...
921
922saveRet32b: ; r3 <- last one to trim
923 lwz r7,SAVprev+4(r3) ; Point to the first one not to trim
924 li r4,LocalSaveTarget ; Set the target count
925 stw r7,SAVprev+4(r2) ; Trim stuff leaving the one just released as first
926 stw r4,lclfreecnt(r10) ; Set the current count
927
928 bl savelock ; Lock up the anchor
929
930 lwz r8,SVfree+4(0) ; Get the old head of the free list
931 lwz r4,SVfreecnt(0) ; Get the number of free ones
932 lwz r7,SVinuse(0) ; Get the number that are in use
933 stw r6,SVfree+4(0) ; Point to the first trimmed savearea
934 add r4,r4,r5 ; Add number trimmed to free count
935 stw r8,SAVprev+4(r3) ; Chain the old head to the tail of the trimmed guys
936 sub r7,r7,r5 ; Remove the trims from the in use count
937 stw r4,SVfreecnt(0) ; Set new free count
938 stw r7,SVinuse(0) ; Set new in use count
de355530 939
55e303ae
A
940 mtlr r0 ; Restore the return to our caller
941 b saveunlock ; Set adjust count, unlock the saveanchor, and return
de355530 942
1c79356b 943
55e303ae
A
944/*
945 * *******************************
946 * * s a v e _ t r i m _ f r e e *
947 * *******************************
948 *
949 * struct savearea_comm *save_trim_free(void);
950 *
951 * Trim the free list down to the target count, ie by -(SVadjust) save areas.
952 * It trims the list and, if a pool page was fully allocated, puts that page on
953 * the start of the pool list.
954 *
955 * If the savearea being released is the last on a pool page (i.e., all entries
956 * are released), the page is dequeued from the pool and queued to any other
957 * found during this scan. Note that this queue is maintained virtually.
958 *
959 * When the scan is done, the saveanchor lock is released and the list of
960 * freed pool pages is returned to our caller.
961 *
962 * For latency sake we may want to revisit this code. If we are trimming a
963 * large number of saveareas, we could be disabled and holding the savearea lock
964 * for quite a while. It may be that we want to break the trim down into parts.
965 * Possibly trimming the free list, then individually pushing them into the free pool.
966 *
967 * This function expects to be called with translation on and a valid stack.
968 * It uses the standard ABI, ie we destroy r2 and r3-r11, and return the ptr in r3.
969 */
1c79356b 970 .align 5
9bccf70c 971 .globl EXT(save_trim_free)
1c79356b 972
9bccf70c
A
973LEXT(save_trim_free)
974
975 subi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Make space for 4 registers on stack
55e303ae 976 mflr r9 ; save our return address
9bccf70c 977 stw r28,FM_SIZE+0(r1) ; Save R28
55e303ae
A
978 stw r29,FM_SIZE+4(r1) ; Save R29
979 stw r30,FM_SIZE+8(r1) ; Save R30
980 stw r31,FM_SIZE+12(r1) ; Save R31
981
982 bl saveSetup ; turn off translation and interrupts, load many regs
983 bl savelock ; Go lock up the anchor
1c79356b 984
55e303ae 985 lwz r8,SVadjust(0) ; How many do we need to clear out?
9bccf70c
A
986 li r3,0 ; Get a 0
987 neg. r8,r8 ; Get the actual we need to toss (adjust is neg if too many)
55e303ae
A
988 ble- save_trim_free1 ; skip if no trimming needed anymore
989 bf-- pf64Bitb,saveTrim32 ; handle 32-bit processors
990 b saveTrim64 ; handle 64-bit processors
de355530 991
55e303ae
A
992save_trim_free1: ; by the time we were called, no need to trim anymore
993 stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed)
994 mtlr r9 ; Restore return
9bccf70c
A
995
996#if FPVECDBG
997 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
998 li r2,0x2206 ; (TEST/DEBUG)
999 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1000 sc ; (TEST/DEBUG)
1001#endif
55e303ae
A
1002 addi r1,r1,(FM_ALIGN(16)+FM_SIZE); Pop stack - have not trashed register so no need to reload
1003 b saveRestore ; restore translation and EE, turn SF off, return to our caller
d7e50217 1004
55e303ae
A
1005
1006/*
1007 * ***********************
1008 * * s a v e T r i m 3 2 *
1009 * ***********************
1010 *
1011 * Handle "save_trim_free" on 32-bit processors. At this point, translation and interrupts
1012 * are off, the savearea anchor is locked, and:
1013 * r8 = #pages to trim (>0)
1014 * r9 = return address
1015 * r10 = per-proc ptr
1016 * r11 = MSR at entry
1017 */
1018
1019saveTrim32:
1020 lwz r7,SVfree+4(0) ; Get the first on the free list
1021 mr r6,r7 ; Save the first one
9bccf70c
A
1022 mr r5,r8 ; Save the number we are trimming
1023
9bccf70c
A
1024sttrimming: addic. r5,r5,-1 ; Any left to do?
1025 ble- sttrimmed ; Nope...
55e303ae 1026 lwz r7,SAVprev+4(r7) ; Skip to the next one
9bccf70c 1027 b sttrimming ; Keep going...
9bccf70c 1028
55e303ae
A
1029sttrimmed: lwz r5,SAVprev+4(r7) ; Get the next one (for new head of free list)
1030 lwz r4,SVfreecnt(0) ; Get the free count
1031 stw r5,SVfree+4(0) ; Set new head
9bccf70c
A
1032 sub r4,r4,r8 ; Calculate the new free count
1033 li r31,0 ; Show we have no free pool blocks yet
55e303ae
A
1034 crclr cr1_eq ; dont exit loop before 1st iteration
1035 stw r4,SVfreecnt(0) ; Set new free count
9bccf70c
A
1036 lis r30,hi16(sac_empty) ; Get what empty looks like
1037
9bccf70c
A
1038; NOTE: The savearea size must be 640 (0x280). We are doing a divide by shifts and stuff
1039; here.
1040;
1041#if SAVsize != 640
1042#error Savearea size is not 640!!!!!!!!!!!!
1043#endif
1044
55e303ae
A
1045 ; Loop over each savearea we are trimming.
1046 ; r6 = next savearea to trim
1047 ; r7 = last savearea to trim
1048 ; r8 = #pages to trim (>0)
1049 ; r9 = return address
1050 ; r10 = per-proc ptr
1051 ; r11 = MSR at entry
1052 ; r30 = what SACalloc looks like when all saveareas are free
1053 ; r31 = free pool block list
1054 ; cr1 = beq set if we just trimmed the last, ie if we are done
1055
9bccf70c
A
1056sttoss: beq+ cr1,stdone ; All done now...
1057
1058 cmplw cr1,r6,r7 ; Have we finished the loop?
1059
1060 lis r0,0x0044 ; Get top of table
1061 rlwinm r2,r6,0,0,19 ; Back down to the savearea control stuff
1062 ori r0,r0,0x2200 ; Finish shift table
1063 rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
1064 lwz r5,SACalloc(r2) ; Get the allocation bits
1065 addi r4,r4,1 ; Shift 1 extra
1066 rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
1067 rlwnm r0,r0,r4,29,31 ; Get partial index
1068 lis r4,lo16(0x8000) ; Get the bit mask
1069 add r0,r0,r3 ; Make the real index
1070 srw r4,r4,r0 ; Get the allocation mask
1071 or r5,r5,r4 ; Free this entry
1072 cmplw r5,r4 ; Is this the only free entry?
55e303ae 1073 lwz r6,SAVprev+4(r6) ; Chain to the next trimmed savearea
9bccf70c
A
1074 cmplw cr7,r30,r5 ; Does this look empty?
1075 stw r5,SACalloc(r2) ; Save back the allocation bits
1076 beq- stputpool ; First free entry, go put it into the pool...
1077 bne+ cr7,sttoss ; Not an empty block
1078
1079;
1080; We have an empty block. Remove it from the pool list.
1081;
1082
1083 lwz r29,SACflags(r2) ; Get the flags
1084 cmplwi cr5,r31,0 ; Is this guy on the release list?
55e303ae 1085 lwz r28,SACnext+4(r2) ; Get the forward chain
9bccf70c
A
1086
1087 rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
1088 bne- sttoss ; This is permanent entry, do not try to release...
1089
55e303ae 1090 lwz r29,SACprev+4(r2) ; and the previous
9bccf70c 1091 beq- cr5,stnot1st ; Not first
55e303ae 1092 lwz r0,SACvrswap+4(r31) ; Load the previous pool page vr conversion
9bccf70c 1093
55e303ae 1094stnot1st: stw r28,SACnext+4(r29) ; Previous guy points to my next
9bccf70c 1095 xor r0,r0,r31 ; Make the last guy virtual
55e303ae
A
1096 stw r29,SACprev+4(r28) ; Next guy points back to my previous
1097 stw r0,SAVprev+4(r2) ; Store the old top virtual as my back chain
9bccf70c
A
1098 mr r31,r2 ; My physical is now the head of the chain
1099 b sttoss ; Get the next one...
1100
1101;
1102; A pool block that had no free entries now has one. Stick it on the pool list.
1103;
1104
55e303ae
A
1105stputpool: lwz r28,SVpoolfwd+4(0) ; Get the first guy on the list
1106 li r0,saveanchor ; Point to the saveanchor
1107 stw r2,SVpoolfwd+4(0) ; Put us on the top of the list
1108 stw r28,SACnext+4(r2) ; We point to the old top
1109 stw r2,SACprev+4(r28) ; Old top guy points back to us
1110 stw r0,SACprev+4(r2) ; Our back points to the anchor
de355530 1111 b sttoss ; Go on to the next one...
55e303ae
A
1112
1113
1114/*
1115 * ***********************
1116 * * s a v e T r i m 6 4 *
1117 * ***********************
1118 *
1119 * Handle "save_trim_free" on 64-bit processors. At this point, translation and interrupts
1120 * are off, SF is on, the savearea anchor is locked, and:
1121 * r8 = #pages to trim (>0)
1122 * r9 = return address
1123 * r10 = per-proc ptr
1124 * r11 = MSR at entry
1125 */
1126
1127saveTrim64:
1128 ld r7,SVfree(0) ; Get the first on the free list
1129 mr r6,r7 ; Save the first one
1130 mr r5,r8 ; Save the number we are trimming
d7e50217 1131
55e303ae
A
1132sttrimming64:
1133 addic. r5,r5,-1 ; Any left to do?
1134 ble-- sttrimmed64 ; Nope...
1135 ld r7,SAVprev(r7) ; Skip to the next one
1136 b sttrimming64 ; Keep going...
d7e50217 1137
55e303ae
A
1138sttrimmed64:
1139 ld r5,SAVprev(r7) ; Get the next one (for new head of free list)
1140 lwz r4,SVfreecnt(0) ; Get the free count
1141 std r5,SVfree(0) ; Set new head
1142 sub r4,r4,r8 ; Calculate the new free count
1143 li r31,0 ; Show we have no free pool blocks yet
1144 crclr cr1_eq ; dont exit loop before 1st iteration
1145 stw r4,SVfreecnt(0) ; Set new free count
1146 lis r30,hi16(sac_empty) ; Get what empty looks like
1147
1148
1149 ; Loop over each savearea we are trimming.
1150 ; r6 = next savearea to trim
1151 ; r7 = last savearea to trim
1152 ; r8 = #pages to trim (>0)
1153 ; r9 = return address
1154 ; r10 = per-proc ptr
1155 ; r11 = MSR at entry
1156 ; r30 = what SACalloc looks like when all saveareas are free
1157 ; r31 = free pool block list
1158 ; cr1 = beq set if we just trimmed the last, ie if we are done
1159 ;
1160 ; WARNING: as in the 32-bit path, this code is doing a divide by 640 (SAVsize).
1161
1162sttoss64:
1163 beq++ cr1,stdone ; All done now...
1164
1165 cmpld cr1,r6,r7 ; Have we finished the loop?
1166
1167 lis r0,0x0044 ; Get top of table
1168 rldicr r2,r6,0,51 ; r2 <- phys addr of savearea block (with control area)
1169 ori r0,r0,0x2200 ; Finish shift table
1170 rlwinm r4,r6,25,27,30 ; Get (addr >> 7) & 0x1E (same as twice high nybble)
1171 lwz r5,SACalloc(r2) ; Get the allocation bits
1172 addi r4,r4,1 ; Shift 1 extra
1173 rlwinm r3,r6,25,31,31 ; Get (addr >> 7) & 1
1174 rlwnm r0,r0,r4,29,31 ; Get partial index
1175 lis r4,lo16(0x8000) ; Get the bit mask
1176 add r0,r0,r3 ; Make the real index
1177 srw r4,r4,r0 ; Get the allocation mask
1178 or r5,r5,r4 ; Free this entry
1179 cmplw r5,r4 ; Is this the only free entry?
1180 ld r6,SAVprev(r6) ; Chain to the next trimmed savearea
1181 cmplw cr7,r30,r5 ; Does this look empty?
1182 stw r5,SACalloc(r2) ; Save back the allocation bits
1183 beq-- stputpool64 ; First free entry, go put it into the pool...
1184 bne++ cr7,sttoss64 ; Not an empty block
1185
1186; We have an empty block. Remove it from the pool list.
1187
1188 lwz r29,SACflags(r2) ; Get the flags
1189 cmpldi cr5,r31,0 ; Is this guy on the release list?
1190 ld r28,SACnext(r2) ; Get the forward chain
1191
1192 rlwinm. r0,r29,0,sac_permb,sac_permb ; Is this a permanently allocated area? (also sets 0 needed below)
1193 bne-- sttoss64 ; This is permanent entry, do not try to release...
1194
1195 ld r29,SACprev(r2) ; and the previous
1196 beq-- cr5,stnot1st64 ; Not first
1197 ld r0,SACvrswap(r31) ; Load the previous pool page vr conversion
1198
1199stnot1st64:
1200 std r28,SACnext(r29) ; Previous guy points to my next
1201 xor r0,r0,r31 ; Make the last guy virtual
1202 std r29,SACprev(r28) ; Next guy points back to my previous
1203 std r0,SAVprev(r2) ; Store the old top virtual as my back chain
1204 mr r31,r2 ; My physical is now the head of the chain
1205 b sttoss64 ; Get the next one...
1206
1207; A pool block that had no free entries now has one. Stick it on the pool list.
1208
1209stputpool64:
1210 ld r28,SVpoolfwd(0) ; Get the first guy on the list
1211 li r0,saveanchor ; Point to the saveanchor
1212 std r2,SVpoolfwd(0) ; Put us on the top of the list
1213 std r28,SACnext(r2) ; We point to the old top
1214 std r2,SACprev(r28) ; Old top guy points back to us
1215 std r0,SACprev(r2) ; Our back points to the anchor
1216 b sttoss64 ; Go on to the next one...
d7e50217 1217
55e303ae
A
1218
1219; We are all done. Relocate pool release head, restore all, and go. This code
1220; is used both by the 32 and 64-bit paths.
1221; r9 = return address
1222; r10 = per-proc ptr
1223; r11 = MSR at entry
1224; r31 = free pool block list
1225
9bccf70c
A
1226stdone: bl saveunlock ; Unlock the saveanchor and set adjust field
1227
1228 mr. r3,r31 ; Move release chain and see if there are any
1229 li r5,0 ; Assume either V=R or no release chain
1230 beq- stnorel ; Nothing to release...
55e303ae 1231 lwz r5,SACvrswap+4(r31) ; Get the vr conversion (only need low half if 64-bit)
9bccf70c 1232
55e303ae
A
1233stnorel:
1234 bl saveRestore ; restore translation and exceptions, turn off SF
9bccf70c
A
1235 mtlr r9 ; Restore the return
1236
1237 lwz r28,FM_SIZE+0(r1) ; Restore R28
1238 lwz r29,FM_SIZE+4(r1) ; Restore R29
1239 lwz r30,FM_SIZE+8(r1) ; Restore R30
1240 lwz r31,FM_SIZE+12(r1) ; Restore R31
1241 addi r1,r1,(FM_ALIGN(16)+FM_SIZE) ; Pop the stack
1242 xor r3,r3,r5 ; Convert release chain address to virtual
55e303ae 1243 rlwinm r3,r3,0,0,31 ; if 64-bit, clear upper half of virtual address
9bccf70c
A
1244
1245#if FPVECDBG
1246 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1247 li r2,0x2207 ; (TEST/DEBUG)
1248 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1249 sc ; (TEST/DEBUG)
1250#endif
1251 blr ; Return...
55e303ae
A
1252
1253
1254/*
1255 * ***************************
1256 * * s a v e _ r e c o v e r *
1257 * ***************************
1258 *
1259 * int save_recover(void);
1260 *
1261 * Returns nonzero if we can get enough saveareas to hit the target. We scan the free
1262 * pool. If we empty a pool block, we remove it from the pool list.
1263 */
9bccf70c
A
1264
1265 .align 5
1266 .globl EXT(save_recover)
1267
1268LEXT(save_recover)
55e303ae
A
1269 mflr r9 ; save return address
1270 bl saveSetup ; turn translation and interrupts off, SF on, load many regs
1271 bl savelock ; lock the savearea anchor
150bd074 1272
55e303ae 1273 lwz r8,SVadjust(0) ; How many do we need to clear get?
9bccf70c
A
1274 li r3,0 ; Get a 0
1275 mr. r8,r8 ; Do we need any?
55e303ae
A
1276 ble-- save_recover1 ; not any more
1277 bf-- pf64Bitb,saveRecover32 ; handle 32-bit processor
1278 b saveRecover64 ; handle 64-bit processor
1279
1280save_recover1: ; by the time we locked the anchor, no longer short
9bccf70c 1281 mtlr r9 ; Restore return
55e303ae 1282 stw r3,SVlock(0) ; Quick unlock (no need for sync or to set adjust, nothing changed)
9bccf70c
A
1283#if FPVECDBG
1284 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1285 li r2,0x2208 ; (TEST/DEBUG)
1286 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1287 sc ; (TEST/DEBUG)
1288#endif
55e303ae 1289 b saveRestore ; turn translation etc back on, return to our caller
d7e50217 1290
55e303ae
A
1291
1292/*
1293 * *****************************
1294 * * s a v e R e c o v e r 3 2 *
1295 * *****************************
1296 *
1297 * Handle "save_recover" on 32-bit processors. At this point, translation and interrupts
1298 * are off, the savearea anchor is locked, and:
1299 * r8 = #pages to recover
1300 * r9 = return address
1301 * r10 = per-proc ptr
1302 * r11 = MSR at entry
1303 */
1304
1305saveRecover32:
1306 li r6,saveanchor ; Start at pool anchor
1307 crclr cr1_eq ; initialize the loop test
1308 lwz r7,SVfreecnt(0) ; Get the current free count
1309
1310
1311; Loop over next block in free pool. r6 is the ptr to the last block we looked at.
1312
1313srcnpool: lwz r6,SACnext+4(r6) ; Point to the next one
1314 cmplwi r6,saveanchor ; Have we wrapped?
9bccf70c
A
1315 beq- srcdone ; Yes, did not have enough...
1316
1317 lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block
1318
1319;
1320; NOTE: The savearea size must be 640 (0x280). We are doing a multiply by shifts and add.
1321; offset = (index << 9) + (index << 7)
1322;
1323#if SAVsize != 640
1324#error Savearea size is not 640!!!!!!!!!!!!
1325#endif
1326
55e303ae
A
1327; Loop over free savearea in current block.
1328; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc)
1329; r6 = ptr to current free pool block
1330; r7 = free count
1331; r8 = #pages more we still need to recover
1332; r9 = return address
1333; r10 = per-proc ptr
1334; r11 = MSR at entry
1335; cr1 = beq if (r8==0)
1336
9bccf70c
A
1337srcnext: beq- cr1,srcdone ; We have no more to get...
1338
1339 lis r3,0x8000 ; Get the top bit on
1340 cntlzw r4,r5 ; Find a free slot
1341 addi r7,r7,1 ; Bump up the free count
1342 srw r3,r3,r4 ; Make a mask
1343 slwi r0,r4,7 ; First multiply by 128
1344 subi r8,r8,1 ; Decrement the need count
1345 slwi r2,r4,9 ; Then multiply by 512
1346 andc. r5,r5,r3 ; Clear out the "free" bit
1347 add r2,r2,r0 ; Sum to multiply by 640
1348
1349 stw r5,SACalloc(r6) ; Set new allocation bits
1350
1351 add r2,r2,r6 ; Get the actual address of the savearea
55e303ae 1352 lwz r3,SVfree+4(0) ; Get the head of the chain
9bccf70c 1353 cmplwi cr1,r8,0 ; Do we actually need any more?
55e303ae
A
1354 stw r2,SVfree+4(0) ; Push ourselves in the front
1355 stw r3,SAVprev+4(r2) ; Chain the rest of the list behind
9bccf70c
A
1356
1357 bne+ srcnext ; The pool block is not empty yet, try for another...
1358
55e303ae
A
1359 lwz r2,SACnext+4(r6) ; Get the next pointer
1360 lwz r3,SACprev+4(r6) ; Get the previous pointer
1361 stw r3,SACprev+4(r2) ; The previous of my next points to my previous
1362 stw r2,SACnext+4(r3) ; The next of my previous points to my next
9bccf70c 1363 bne+ cr1,srcnpool ; We still have more to do...
55e303ae
A
1364
1365
1366; Join here from 64-bit path when we have recovered all the saveareas we need to.
1367
1368srcdone: stw r7,SVfreecnt(0) ; Set the new free count
9bccf70c
A
1369 bl saveunlock ; Unlock the save and set adjust field
1370
1371 mtlr r9 ; Restore the return
9bccf70c
A
1372#if FPVECDBG
1373 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1374 li r2,0x2209 ; (TEST/DEBUG)
1375 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1376 sc ; (TEST/DEBUG)
1377#endif
55e303ae
A
1378 b saveRestore ; turn xlate and EE back on, SF off, and return to our caller
1379
1380
1381/*
1382 * *****************************
1383 * * s a v e R e c o v e r 6 4 *
1384 * *****************************
1385 *
1386 * Handle "save_recover" on 64-bit processors. At this point, translation and interrupts
1387 * are off, the savearea anchor is locked, and:
1388 * r8 = #pages to recover
1389 * r9 = return address
1390 * r10 = per-proc ptr
1391 * r11 = MSR at entry
1392 */
1393
1394saveRecover64:
1395 li r6,saveanchor ; Start at pool anchor
1396 crclr cr1_eq ; initialize the loop test
1397 lwz r7,SVfreecnt(0) ; Get the current free count
1398
1399
1400; Loop over next block in free pool. r6 is the ptr to the last block we looked at.
1401
1402srcnpool64:
1403 ld r6,SACnext(r6) ; Point to the next one
1404 cmpldi r6,saveanchor ; Have we wrapped?
1405 beq-- srcdone ; Yes, did not have enough...
9bccf70c 1406
55e303ae
A
1407 lwz r5,SACalloc(r6) ; Pick up the allocation for this pool block
1408
1409
1410; Loop over free savearea in current block.
1411; r5 = bitmap of free saveareas in block at r6 (ie, SACalloc)
1412; r6 = ptr to current free pool block
1413; r7 = free count
1414; r8 = #pages more we still need to recover
1415; r9 = return address
1416; r10 = per-proc ptr
1417; r11 = MSR at entry
1418; cr1 = beq if (r8==0)
9bccf70c 1419;
55e303ae 1420; WARNING: as in the 32-bit path, we depend on (SAVsize==640)
9bccf70c 1421
55e303ae
A
1422srcnext64:
1423 beq-- cr1,srcdone ; We have no more to get...
9bccf70c 1424
55e303ae
A
1425 lis r3,0x8000 ; Get the top bit on
1426 cntlzw r4,r5 ; Find a free slot
1427 addi r7,r7,1 ; Bump up the free count
1428 srw r3,r3,r4 ; Make a mask
1429 slwi r0,r4,7 ; First multiply by 128
1430 subi r8,r8,1 ; Decrement the need count
1431 slwi r2,r4,9 ; Then multiply by 512
1432 andc. r5,r5,r3 ; Clear out the "free" bit
1433 add r2,r2,r0 ; Sum to multiply by 640
d7e50217 1434
55e303ae
A
1435 stw r5,SACalloc(r6) ; Set new allocation bits
1436
1437 add r2,r2,r6 ; Get the actual address of the savearea
1438 ld r3,SVfree(0) ; Get the head of the chain
1439 cmplwi cr1,r8,0 ; Do we actually need any more?
1440 std r2,SVfree(0) ; Push ourselves in the front
1441 std r3,SAVprev(r2) ; Chain the rest of the list behind
9bccf70c 1442
55e303ae 1443 bne++ srcnext64 ; The pool block is not empty yet, try for another...
9bccf70c 1444
55e303ae
A
1445 ld r2,SACnext(r6) ; Get the next pointer
1446 ld r3,SACprev(r6) ; Get the previous pointer
1447 std r3,SACprev(r2) ; The previous of my next points to my previous
1448 std r2,SACnext(r3) ; The next of my previous points to my next
1449 bne++ cr1,srcnpool64 ; We still have more to do...
1450
1451 b srcdone
9bccf70c 1452
d7e50217 1453
55e303ae
A
1454/*
1455 * *******************
1456 * * s a v e l o c k *
1457 * *******************
1458 *
1459 * Lock the savearea anchor, so we can manipulate the free list.
1460 * msr = interrupts and translation off
1461 * We destroy:
1462 * r8, r3, r12
1463 */
d7e50217 1464 .align 5
de355530 1465
55e303ae
A
1466savelock: lwz r8,SVlock(0) ; See if lock is held
1467 cmpwi r8,0
1468 li r12,saveanchor ; Point to the saveanchor
1469 bne-- savelock ; loop until lock released...
1470
1471savelock0: lwarx r8,0,r12 ; Grab the lock value
1472 cmpwi r8,0 ; taken?
1473 li r8,1 ; get nonzero to lock it with
1474 bne-- savelock1 ; already locked, wait for it to clear...
1475 stwcx. r8,0,r12 ; Try to seize that there durn lock
1476 isync ; assume we got it
1477 beqlr++ ; reservation not lost, so we have the lock
1478 b savelock0 ; Try again...
1479
1480savelock1: li r8,lgKillResv ; Point to killing field
1481 stwcx. r8,0,r8 ; Kill reservation
1482 b savelock ; Start over....
1483
1484
1485/*
1486 * ***********************
1487 * * s a v e u n l o c k *
1488 * ***********************
1489 *
1490 *
1491 * This is the common routine that sets the saveadjust field and unlocks the savearea
1492 * anchor.
1493 * msr = interrupts and translation off
1494 * We destroy:
1495 * r2, r5, r6, r8.
1496 */
1497 .align 5
9bccf70c 1498saveunlock:
55e303ae
A
1499 lwz r6,SVfreecnt(0) ; and the number on the free list
1500 lwz r5,SVinuse(0) ; Pick up the in use count
1501 subic. r8,r6,FreeListMin ; do we have at least the minimum?
1502 lwz r2,SVtarget(0) ; Get the target
1503 neg r8,r8 ; assuming we are short, get r8 <- shortfall
1504 blt-- saveunlock1 ; skip if fewer than minimum on free list
1c79356b 1505
9bccf70c 1506 add r6,r6,r5 ; Get the total number of saveareas
55e303ae 1507 addi r5,r2,-SaveLowHysteresis ; Find low end of acceptible range
9bccf70c 1508 sub r5,r6,r5 ; Make everything below hysteresis negative
55e303ae 1509 sub r2,r2,r6 ; Get the distance from the target
9bccf70c
A
1510 addi r5,r5,-(SaveLowHysteresis + SaveHighHysteresis + 1) ; Subtract full hysteresis range
1511 srawi r5,r5,31 ; Get 0xFFFFFFFF if outside range or 0 if inside
55e303ae 1512 and r8,r2,r5 ; r8 <- 0 if in range or distance to target if not
9bccf70c 1513
55e303ae
A
1514saveunlock1:
1515 li r5,0 ; Set a clear value
1516 stw r8,SVadjust(0) ; Set the adjustment value
1517 eieio ; Make sure everything is done
1518 stw r5,SVlock(0) ; Unlock the savearea chain
9bccf70c 1519 blr
9bccf70c 1520
1c79356b 1521
9bccf70c 1522/*
55e303ae
A
1523 * *******************
1524 * * s a v e _ c p v *
1525 * *******************
1526 *
1527 * struct savearea *save_cpv(addr64_t saveAreaPhysAddr);
1528 *
1529 * Converts a physical savearea address to virtual. Called with translation on
1530 * and in 32-bit mode. Note that the argument is passed as a long long in (r3,r4).
9bccf70c 1531 */
1c79356b 1532
9bccf70c
A
1533 .align 5
1534 .globl EXT(save_cpv)
1535
1536LEXT(save_cpv)
55e303ae
A
1537 mflr r9 ; save return address
1538 mr r8,r3 ; save upper half of phys address here
1539 bl saveSetup ; turn off translation and interrupts, turn SF on
1540 rlwinm r5,r4,0,0,19 ; Round back to the start of the physical savearea block
1541 bf-- pf64Bitb,save_cpv1 ; skip if 32-bit processor
1542 rldimi r5,r8,32,0 ; r5 <- 64-bit phys address of block
1543save_cpv1:
1544 lwz r6,SACvrswap+4(r5) ; Get the conversion to virtual (only need low half if 64-bit)
1545 mtlr r9 ; restore return address
1546 xor r3,r4,r6 ; convert phys to virtual
1547 rlwinm r3,r3,0,0,31 ; if 64-bit, zero upper half of virtual address
1548 b saveRestore ; turn translation etc back on, SF off, and return r3
1549
9bccf70c 1550
55e303ae
A
1551/*
1552 * *********************
1553 * * s a v e S e t u p *
1554 * *********************
1555 *
1556 * This routine is called at the start of all the save-area subroutines.
1557 * It turns off translation, disabled interrupts, turns on 64-bit mode,
1558 * and sets up cr6 with the feature flags (especially pf64Bit).
1559 *
1560 * Note that most save-area routines cannot take _any_ interrupt (such as a
1561 * PTE miss) once the savearea anchor is locked, since that would result in
1562 * instant deadlock as we need a save-area to process any exception.
1563 * We set up:
1564 * r10 = per-proc ptr
1565 * r11 = old MSR
1566 * cr5 = pfNoMSRir feature flag
1567 * cr6 = pf64Bit feature flag
1568 *
1569 * We use r0, r3, r10, and r11.
1570 */
1571
1572saveSetup:
1573 mfmsr r11 ; get msr
1574 mfsprg r3,2 ; get feature flags
1575 li r0,0
1576 mtcrf 0x2,r3 ; copy pf64Bit to cr6
1577 ori r0,r0,lo16(MASK(MSR_IR)+MASK(MSR_DR)+MASK(MSR_EE))
1578 mtcrf 0x4,r3 ; copy pfNoMSRir to cr5
1579 andc r3,r11,r0 ; turn off IR, DR, and EE
1580 li r0,1 ; get a 1 in case its a 64-bit machine
1581 bf-- pf64Bitb,saveSetup1 ; skip if not a 64-bit machine
1582 rldimi r3,r0,63,MSR_SF_BIT ; turn SF (bit 0) on
1583 mtmsrd r3 ; turn translation and interrupts off, 64-bit mode on
1584 isync ; wait for it to happen
1585 mfsprg r10,0 ; get per-proc ptr
1586 blr
1587saveSetup1: ; here on 32-bit machines
1588 bt- pfNoMSRirb,saveSetup2 ; skip if cannot turn off IR with a mtmsr
1589 mtmsr r3 ; turn translation and interrupts off
1590 isync ; wait for it to happen
1591 mfsprg r10,0 ; get per-proc ptr
1592 blr
1593saveSetup2: ; here if pfNoMSRir set for this machine
1594 li r0,loadMSR ; we will "mtmsr r3" via system call
1595 sc
1596 mfsprg r10,0 ; get per-proc ptr
1597 blr
1598
1599
1600/*
1601 * *************************
1602 * * s a v e R e s t o r e *
1603 * *************************
1604 *
1605 * Undoes the effect of calling "saveSetup", ie it turns relocation and interrupts back on,
1606 * and turns 64-bit mode back off.
1607 * r11 = old MSR
1608 * cr6 = pf64Bit feature flag
1609 */
1610
1611saveRestore:
1612 bt++ pf64Bitb,saveRestore64 ; handle a 64-bit processor
1613saveRestore32:
1614 mtmsr r11 ; restore MSR
1615 isync ; wait for translation to start up
1616 blr
1617saveRestore64: ; 64-bit processor
1618 mtmsrd r11 ; restore MSR
1619 isync ; wait for changes to happen
1620 blr
1621