]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/hw_vm.s
xnu-517.3.15.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_vm.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25#include <assym.s>
26#include <debug.h>
27#include <cpus.h>
28#include <db_machine_commands.h>
29#include <mach_rt.h>
30
31#include <mach_debug.h>
32#include <ppc/asm.h>
33#include <ppc/proc_reg.h>
34#include <ppc/exception.h>
35#include <ppc/Performance.h>
36#include <ppc/exception.h>
1c79356b 37#include <mach/ppc/vm_param.h>
55e303ae
A
38
39#define INSTRUMENT 0
1c79356b
A
40
41 .text
42
55e303ae
A
43;
44; 0 0 1 2 3 4 4 5 6
45; 0 8 6 4 2 0 8 6 3
46; +--------+--------+--------+--------+--------+--------+--------+--------+
47; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
48; +--------+--------+--------+--------+--------+--------+--------+--------+
49;
50; 0 0 1
51; 0 8 6
52; +--------+--------+--------+
53; |//////BB|BBBBBBBB|BBBB////| - SID - base
54; +--------+--------+--------+
55;
56; 0 0 1
57; 0 8 6
58; +--------+--------+--------+
59; |////////|11111111|111111//| - SID - copy 1
60; +--------+--------+--------+
61;
62; 0 0 1
63; 0 8 6
64; +--------+--------+--------+
65; |////////|//222222|22222222| - SID - copy 2
66; +--------+--------+--------+
67;
68; 0 0 1
69; 0 8 6
70; +--------+--------+--------+
71; |//////33|33333333|33//////| - SID - copy 3 - not needed
72; +--------+--------+--------+ for 65 bit VPN
73;
74; 0 0 1 2 3 4 4 5 5
75; 0 8 6 4 2 0 8 1 5
76; +--------+--------+--------+--------+--------+--------+--------+
77; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
78; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
79; 0 0 1 2 3 4 4 5 5
80; 0 8 6 4 2 0 8 1 5
81; +--------+--------+--------+--------+--------+--------+--------+
82; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
83; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
84; part of EA to make
85; room for SID base
86;
87;
88; 0 0 1 2 3 4 4 5 5
89; 0 8 6 4 2 0 8 1 5
90; +--------+--------+--------+--------+--------+--------+--------+
91; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
92; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
93;
94; 0 0 1 2 3 4 4 5 6 7 7
95; 0 8 6 4 2 0 8 6 4 2 9
96; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
97; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
98; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
99;
1c79356b
A
100
101
55e303ae 102/* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
1c79356b 103 *
55e303ae 104 * Maps a page or block into a pmap
de355530 105 *
55e303ae 106 * Returns 0 if add worked or the vaddr of the first overlap if not
1c79356b 107 *
55e303ae
A
108 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
109 *
110 * 1) bump mapping busy count
111 * 2) lock pmap share
112 * 3) find mapping full path - finds all possible list previous elements
113 * 4) upgrade pmap to exclusive
114 * 5) add mapping to search list
115 * 6) find physent
116 * 7) lock physent
117 * 8) add to physent
118 * 9) unlock physent
119 * 10) unlock pmap
120 * 11) drop mapping busy count
121 *
122 *
123 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
124 *
125 * 1) bump mapping busy count
126 * 2) lock pmap share
127 * 3) find mapping full path - finds all possible list previous elements
128 * 4) upgrade pmap to exclusive
129 * 5) add mapping to search list
130 * 6) unlock pmap
131 * 7) drop mapping busy count
132 *
1c79356b
A
133 */
134
135 .align 5
136 .globl EXT(hw_add_map)
137
138LEXT(hw_add_map)
55e303ae
A
139
140 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
141 mflr r0 ; Save the link register
142 stw r17,FM_ARG0+0x00(r1) ; Save a register
143 stw r18,FM_ARG0+0x04(r1) ; Save a register
144 stw r19,FM_ARG0+0x08(r1) ; Save a register
145 mfsprg r19,2 ; Get feature flags
146 stw r20,FM_ARG0+0x0C(r1) ; Save a register
147 stw r21,FM_ARG0+0x10(r1) ; Save a register
148 mtcrf 0x02,r19 ; move pf64Bit cr6
149 stw r22,FM_ARG0+0x14(r1) ; Save a register
150 stw r23,FM_ARG0+0x18(r1) ; Save a register
151 stw r24,FM_ARG0+0x1C(r1) ; Save a register
152 stw r25,FM_ARG0+0x20(r1) ; Save a register
153 stw r26,FM_ARG0+0x24(r1) ; Save a register
154 stw r27,FM_ARG0+0x28(r1) ; Save a register
155 stw r28,FM_ARG0+0x2C(r1) ; Save a register
156 stw r29,FM_ARG0+0x30(r1) ; Save a register
157 stw r30,FM_ARG0+0x34(r1) ; Save a register
158 stw r31,FM_ARG0+0x38(r1) ; Save a register
159 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
160
161 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
162 mr r28,r3 ; Save the pmap
163 mr r31,r4 ; Save the mapping
164 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
165 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
166 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
167
168 b hamSF1x ; Done...
169
170hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
171 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
172
173hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
174
175 mr r17,r11 ; Save the MSR
176 xor r28,r28,r20 ; Convert the pmap to physical addressing
177 xor r31,r31,r21 ; Convert the mapping to physical addressing
178
179 la r3,pmapSXlk(r28) ; Point to the pmap search lock
180 bl sxlkShared ; Go get a shared lock on the mapping lists
181 mr. r3,r3 ; Did we get the lock?
182 lwz r24,mpFlags(r31) ; Pick up the flags
183 bne-- hamBadLock ; Nope...
184
185 li r21,0 ; Remember that we have the shared lock
1c79356b 186
55e303ae
A
187;
188; Note that we do a full search (i.e., no shortcut level skips, etc.)
189; here so that we will know the previous elements so we can dequeue them
190; later.
191;
de355530 192
55e303ae
A
193hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
194 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
195 mr r3,r28 ; Pass in pmap to search
196 lhz r23,mpBSize(r31) ; Get the block size for later
197 mr r29,r4 ; Save top half of vaddr for later
198 mr r30,r5 ; Save bottom half of vaddr for later
199
200#if INSTRUMENT
201 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[16] - Take stamp before mapSearchFull
202 stw r0,0x6100+(16*16)+0x0(0) ; INSTRUMENT - Save it
203 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
204 stw r0,0x6100+(16*16)+0x4(0) ; INSTRUMENT - Save it
205 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
206 stw r0,0x6100+(16*16)+0x8(0) ; INSTRUMENT - Save it
207 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
208 stw r0,0x6100+(16*16)+0xC(0) ; INSTRUMENT - Save it
209#endif
210
211 bl EXT(mapSearchFull) ; Go see if we can find it
212
213#if INSTRUMENT
214 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[14] - Take stamp after mapSearchFull
215 stw r0,0x6100+(17*16)+0x0(0) ; INSTRUMENT - Save it
216 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
217 stw r0,0x6100+(17*16)+0x4(0) ; INSTRUMENT - Save it
218 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
219 stw r0,0x6100+(17*16)+0x8(0) ; INSTRUMENT - Save it
220 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
221 stw r0,0x6100+(17*16)+0xC(0) ; INSTRUMENT - Save it
222#endif
223
224 andi. r0,r24,mpNest ; See if we are a nest
225 rlwinm r23,r23,12,0,19 ; Convert standard block size to bytes
226 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
227 li r22,0 ; Assume high part of size is 0
228 beq++ hamNoNest ; This is not a nest...
229
230 rlwinm r22,r23,16,16,31 ; Convert partially converted size to segments
231 rlwinm r23,r23,16,0,3 ; Finish shift
232
233hamNoNest: add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
234 mr. r3,r3 ; Did we find a mapping here?
235 or r0,r0,r30 ; Make sure a carry will propagate all the way in 64-bit
236 crmove cr5_eq,cr0_eq ; Remember that if we found the mapping
237 addc r9,r0,r23 ; Add size to get last page in new range
238 or. r0,r4,r5 ; Are we beyond the end?
239 adde r8,r29,r22 ; Add the rest of the length on
240 bne-- cr5,hamOverlay ; Yeah, this is no good, can not double map...
241 rlwinm r9,r9,0,0,31 ; Clean top half of sum
242 beq++ hamFits ; We are at the end...
243
244 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
245 cmplw r8,r4 ; Is our end before the next (top part)
246 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
247 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
248
249 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
d7e50217 250
55e303ae
A
251;
252; Here we try to convert to an exclusive lock. This will fail if someone else
253; has it shared.
254;
255hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
256 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1c79356b 257
55e303ae
A
258 bne-- hamGotX ; We already have the exclusive...
259
260 bl sxlkPromote ; Try to promote shared to exclusive
261 mr. r3,r3 ; Could we?
262 beq++ hamGotX ; Yeah...
263
264;
265; Since we could not promote our lock, we need to convert to it.
266; That means that we drop the shared lock and wait to get it
267; exclusive. Since we release the lock, we need to do the look up
268; again.
269;
d7e50217 270
55e303ae
A
271 la r3,pmapSXlk(r28) ; Point to the pmap search lock
272 bl sxlkConvert ; Convert shared to exclusive
273 mr. r3,r3 ; Could we?
274 bne-- hamBadLock ; Nope, we must have timed out...
1c79356b 275
55e303ae
A
276 li r21,1 ; Remember that we have the exclusive lock
277 b hamRescan ; Go look again...
1c79356b 278
55e303ae 279 .align 5
1c79356b 280
55e303ae
A
281hamGotX:
282#if INSTRUMENT
283 mfspr r3,pmc1 ; INSTRUMENT - saveinstr[18] - Take stamp before mapSearchFull
284 stw r3,0x6100+(18*16)+0x0(0) ; INSTRUMENT - Save it
285 mfspr r3,pmc2 ; INSTRUMENT - Get stamp
286 stw r3,0x6100+(18*16)+0x4(0) ; INSTRUMENT - Save it
287 mfspr r3,pmc3 ; INSTRUMENT - Get stamp
288 stw r3,0x6100+(18*16)+0x8(0) ; INSTRUMENT - Save it
289 mfspr r3,pmc4 ; INSTRUMENT - Get stamp
290 stw r4,0x6100+(18*16)+0xC(0) ; INSTRUMENT - Save it
291#endif
292 mr r3,r28 ; Get the pmap to insert into
293 mr r4,r31 ; Point to the mapping
294 bl EXT(mapInsert) ; Insert the mapping into the list
295
296#if INSTRUMENT
297 mfspr r4,pmc1 ; INSTRUMENT - saveinstr[19] - Take stamp before mapSearchFull
298 stw r4,0x6100+(19*16)+0x0(0) ; INSTRUMENT - Save it
299 mfspr r4,pmc2 ; INSTRUMENT - Get stamp
300 stw r4,0x6100+(19*16)+0x4(0) ; INSTRUMENT - Save it
301 mfspr r4,pmc3 ; INSTRUMENT - Get stamp
302 stw r4,0x6100+(19*16)+0x8(0) ; INSTRUMENT - Save it
303 mfspr r4,pmc4 ; INSTRUMENT - Get stamp
304 stw r4,0x6100+(19*16)+0xC(0) ; INSTRUMENT - Save it
305#endif
306
307 lhz r8,mpSpace(r31) ; Get the address space
308 mfsdr1 r7 ; Get the hash table base/bounds
309 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
310 andi. r0,r24,mpNest|mpBlock ; Is this a nest or block?
311
312 rlwimi r8,r8,14,4,17 ; Double address space
313 rlwinm r9,r30,20,16,31 ; Isolate the page number
314 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
315 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
316 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
317 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
318 addi r4,r4,1 ; Bump up the mapped page count
319 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
320 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
321 xor r9,r9,r10 ; Get the hash to the PTEG
322
323 bne-- hamDoneNP ; This is a block or nest, therefore, no physent...
324
325 bl mapPhysFindLock ; Go find and lock the physent
326
327 bt++ pf64Bitb,ham64 ; This is 64-bit...
328
329 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
330 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
331 slwi r9,r9,6 ; Make PTEG offset
332 ori r7,r7,0xFFC0 ; Stick in the bottom part
333 rlwinm r12,r11,0,0,25 ; Clean it up
334 and r9,r9,r7 ; Wrap offset into table
335 mr r4,r31 ; Set the link to install
336 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
337 stw r12,mpAlias+4(r31) ; Move to the mapping
338 bl mapPhyCSet32 ; Install the link
339 b hamDone ; Go finish up...
340
341 .align 5
1c79356b 342
55e303ae
A
343ham64: li r0,0xFF ; Get mask to clean up alias pointer
344 subfic r7,r7,46 ; Get number of leading zeros
345 eqv r4,r4,r4 ; Get all ones
346 ld r11,ppLink(r3) ; Get the alias chain pointer
347 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
348 srd r4,r4,r7 ; Get the wrap mask
349 sldi r9,r9,7 ; Change hash to PTEG offset
350 andc r11,r11,r0 ; Clean out the lock and flags
351 and r9,r9,r4 ; Wrap to PTEG
352 mr r4,r31
353 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
354 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
355
356 bl mapPhyCSet64 ; Install the link
357
358hamDone: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 359
55e303ae
A
360hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
361 bl sxlkUnlock ; Unlock the search list
1c79356b 362
55e303ae
A
363 mr r3,r31 ; Get the mapping pointer
364 bl mapDropBusy ; Drop the busy count
1c79356b 365
55e303ae
A
366 li r3,0 ; Set successful return
367 li r4,0 ; Set successful return
1c79356b 368
55e303ae 369hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
1c79356b 370
55e303ae
A
371 mtmsr r17 ; Restore enables/translation/etc.
372 isync
373 b hamReturnC ; Join common...
1c79356b 374
55e303ae
A
375hamR64: mtmsrd r17 ; Restore enables/translation/etc.
376 isync
1c79356b 377
55e303ae
A
378hamReturnC:
379#if INSTRUMENT
380 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[20] - Take stamp before mapSearchFull
381 stw r0,0x6100+(20*16)+0x0(0) ; INSTRUMENT - Save it
382 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
383 stw r0,0x6100+(20*16)+0x4(0) ; INSTRUMENT - Save it
384 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
385 stw r0,0x6100+(20*16)+0x8(0) ; INSTRUMENT - Save it
386 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
387 stw r0,0x6100+(20*16)+0xC(0) ; INSTRUMENT - Save it
388#endif
389 lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
390 lwz r17,FM_ARG0+0x00(r1) ; Save a register
391 lwz r18,FM_ARG0+0x04(r1) ; Save a register
392 lwz r19,FM_ARG0+0x08(r1) ; Save a register
393 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
394 mtlr r0 ; Restore the return
395 lwz r21,FM_ARG0+0x10(r1) ; Save a register
396 lwz r22,FM_ARG0+0x14(r1) ; Save a register
397 lwz r23,FM_ARG0+0x18(r1) ; Save a register
398 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
399 lwz r25,FM_ARG0+0x20(r1) ; Save a register
400 lwz r26,FM_ARG0+0x24(r1) ; Save a register
401 lwz r27,FM_ARG0+0x28(r1) ; Save a register
402 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
403 lwz r29,FM_ARG0+0x30(r1) ; Save a register
404 lwz r30,FM_ARG0+0x34(r1) ; Save a register
405 lwz r31,FM_ARG0+0x38(r1) ; Save a register
406 lwz r1,0(r1) ; Pop the stack
d7e50217 407
55e303ae 408 blr ; Leave...
d7e50217 409
de355530 410
de355530 411 .align 5
d7e50217 412
55e303ae
A
413hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
414 li r0,mpC|mpR ; Get a mask to turn off RC bits
415 lwz r23,mpFlags(r31) ; Get the requested flags
416 lwz r20,mpVAddr(r3) ; Get the overlay address
417 lwz r8,mpVAddr(r31) ; Get the requested address
418 lwz r21,mpVAddr+4(r3) ; Get the overlay address
419 lwz r9,mpVAddr+4(r31) ; Get the requested address
420 lhz r10,mpBSize(r3) ; Get the overlay length
421 lhz r11,mpBSize(r31) ; Get the requested length
422 lwz r24,mpPAddr(r3) ; Get the overlay physical address
423 lwz r25,mpPAddr(r31) ; Get the requested physical address
424 andc r21,r21,r0 ; Clear RC bits
425 andc r9,r9,r0 ; Clear RC bits
426
427 la r3,pmapSXlk(r28) ; Point to the pmap search lock
428 bl sxlkUnlock ; Unlock the search list
429
430 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
431 mr r3,r20 ; Save the top of the colliding address
432 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
433
434 bne++ hamRemv ; Removing, go say so so we help...
435
436 cmplw r20,r8 ; High part of vaddr the same?
437 cmplw cr1,r21,r9 ; Low part?
438 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
439
440 cmplw r10,r11 ; Size the same?
441 cmplw cr1,r24,r25 ; Physical address?
442 crand cr5_eq,cr5_eq,cr0_eq ; Remember
443 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
444
445 xor r23,r23,r22 ; Check for differences in flags
446 ori r23,r23,mpFIP ; "Fault in Progress" is ok to be different
447 xori r23,r23,mpFIP ; Force mpFIP off
448 rlwinm. r0,r23,0,mpSpecialb,mpListsb-1 ; See if any important flags are different
449 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
450 bf-- cr5_eq,hamReturn ; This is not the same, so we just return a collision...
451
452 ori r4,r4,mapRtMapDup ; Set duplicate
453 b hamReturn ; And leave...
454
455hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
456 b hamReturn ; Come back yall...
457
458 .align 5
459
460hamBadLock: li r3,0 ; Set lock time out error code
461 li r4,mapRtBadLk ; Set lock time out error code
462 b hamReturn ; Leave....
463
464
1c79356b 465
1c79356b
A
466
467
468/*
55e303ae 469 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
de355530 470 *
55e303ae
A
471 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
472 * a 64-bit quantity, it is a long long so it is in R4 and R5.
473 *
474 * We return the virtual address of the removed mapping as a
475 * R3.
1c79356b 476 *
55e303ae 477 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 478 *
55e303ae
A
479 * We disable translation and all interruptions here. This keeps is
480 * from having to worry about a deadlock due to having anything locked
481 * and needing it to process a fault.
1c79356b
A
482 *
483 * Note that this must be done with both interruptions off and VM off
484 *
55e303ae
A
485 * Remove mapping via pmap, regular page, no pte
486 *
487 * 1) lock pmap share
488 * 2) find mapping full path - finds all possible list previous elements
489 * 4) upgrade pmap to exclusive
490 * 3) bump mapping busy count
491 * 5) remove mapping from search list
492 * 6) unlock pmap
493 * 7) lock physent
494 * 8) remove from physent
495 * 9) unlock physent
496 * 10) drop mapping busy count
497 * 11) drain mapping busy count
498 *
499 *
500 * Remove mapping via pmap, regular page, with pte
501 *
502 * 1) lock pmap share
503 * 2) find mapping full path - finds all possible list previous elements
504 * 3) upgrade lock to exclusive
505 * 4) bump mapping busy count
506 * 5) lock PTEG
507 * 6) invalidate pte and tlbie
508 * 7) atomic merge rc into physent
509 * 8) unlock PTEG
510 * 9) remove mapping from search list
511 * 10) unlock pmap
512 * 11) lock physent
513 * 12) remove from physent
514 * 13) unlock physent
515 * 14) drop mapping busy count
516 * 15) drain mapping busy count
517 *
518 *
519 * Remove mapping via pmap, I/O or block
520 *
521 * 1) lock pmap share
522 * 2) find mapping full path - finds all possible list previous elements
523 * 3) upgrade lock to exclusive
524 * 4) bump mapping busy count
525 * 5) mark remove-in-progress
526 * 6) check and bump remove chunk cursor if needed
527 * 7) unlock pmap
528 * 8) if something to invalidate, go to step 11
529
530 * 9) drop busy
531 * 10) return with mapRtRemove to force higher level to call again
532
533 * 11) Lock PTEG
534 * 12) invalidate ptes, no tlbie
535 * 13) unlock PTEG
536 * 14) repeat 11 - 13 for all pages in chunk
537 * 15) if not final chunk, go to step 9
538 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
539 * 17) lock pmap share
540 * 18) find mapping full path - finds all possible list previous elements
541 * 19) upgrade lock to exclusive
542 * 20) remove mapping from search list
543 * 21) drop mapping busy count
544 * 22) drain mapping busy count
545 *
1c79356b
A
546 */
547
548 .align 5
549 .globl EXT(hw_rem_map)
550
551LEXT(hw_rem_map)
1c79356b 552
55e303ae
A
553;
554; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
555; THE HW_PURGE_* ROUTINES ALSO
556;
1c79356b 557
55e303ae
A
558#define hrmStackSize ((31-15+1)*4)+4
559 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
560 mflr r0 ; Save the link register
561 stw r15,FM_ARG0+0x00(r1) ; Save a register
562 stw r16,FM_ARG0+0x04(r1) ; Save a register
563 stw r17,FM_ARG0+0x08(r1) ; Save a register
564 stw r18,FM_ARG0+0x0C(r1) ; Save a register
565 stw r19,FM_ARG0+0x10(r1) ; Save a register
566 mfsprg r19,2 ; Get feature flags
567 stw r20,FM_ARG0+0x14(r1) ; Save a register
568 stw r21,FM_ARG0+0x18(r1) ; Save a register
569 mtcrf 0x02,r19 ; move pf64Bit cr6
570 stw r22,FM_ARG0+0x1C(r1) ; Save a register
571 stw r23,FM_ARG0+0x20(r1) ; Save a register
572 stw r24,FM_ARG0+0x24(r1) ; Save a register
573 stw r25,FM_ARG0+0x28(r1) ; Save a register
574 stw r26,FM_ARG0+0x2C(r1) ; Save a register
575 stw r27,FM_ARG0+0x30(r1) ; Save a register
576 stw r28,FM_ARG0+0x34(r1) ; Save a register
577 stw r29,FM_ARG0+0x38(r1) ; Save a register
578 stw r30,FM_ARG0+0x3C(r1) ; Save a register
579 stw r31,FM_ARG0+0x40(r1) ; Save a register
580 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
581 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
582
583 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
584 lwz r9,pmapvr+4(r3) ; Get conversion mask
585 b hrmSF1x ; Done...
586
587hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
588
589hrmSF1x:
590 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
591
592 xor r28,r3,r9 ; Convert the pmap to physical addressing
1c79356b 593
55e303ae
A
594;
595; Here is where we join in from the hw_purge_* routines
596;
1c79356b 597
55e303ae 598hrmJoin: mfsprg r19,2 ; Get feature flags again (for alternate entries)
1c79356b 599
55e303ae
A
600 mr r17,r11 ; Save the MSR
601 mr r29,r4 ; Top half of vaddr
602 mr r30,r5 ; Bottom half of vaddr
1c79356b 603
55e303ae
A
604 la r3,pmapSXlk(r28) ; Point to the pmap search lock
605 bl sxlkShared ; Go get a shared lock on the mapping lists
606 mr. r3,r3 ; Did we get the lock?
607 bne-- hrmBadLock ; Nope...
1c79356b 608
55e303ae
A
609;
610; Note that we do a full search (i.e., no shortcut level skips, etc.)
611; here so that we will know the previous elements so we can dequeue them
612; later. Note: we get back mpFlags in R7.
613;
d7e50217 614
55e303ae
A
615 mr r3,r28 ; Pass in pmap to search
616 mr r4,r29 ; High order of address
617 mr r5,r30 ; Low order of address
618 bl EXT(mapSearchFull) ; Go see if we can find it
619
620 andi. r0,r7,lo16(mpPerm|mpSpecial|mpNest) ; Is this nested, special, or a perm mapping?
621 mr r20,r7 ; Remember mpFlags
622 rlwinm r0,r7,0,mpRemovableb,mpRemovableb ; Are we allowed to remove it?
623 crmove cr5_eq,cr0_eq ; Remember if we should remove this
624 mr. r31,r3 ; Did we? (And remember mapping address for later)
625 cmplwi cr1,r0,0 ; Are we allowed to remove?
626 mr r15,r4 ; Save top of next vaddr
627 crorc cr5_eq,cr5_eq,cr1_eq ; cr5_eq is true if this is not removable
628 mr r16,r5 ; Save bottom of next vaddr
629 beq hrmNotFound ; Nope, not found...
630
631 bf-- cr5_eq,hrmPerm ; This one can't be removed...
632;
633; Here we try to promote to an exclusive lock. This will fail if someone else
634; has it shared.
635;
1c79356b 636
55e303ae
A
637 la r3,pmapSXlk(r28) ; Point to the pmap search lock
638 bl sxlkPromote ; Try to promote shared to exclusive
639 mr. r3,r3 ; Could we?
640 beq++ hrmGotX ; Yeah...
1c79356b 641
55e303ae
A
642;
643; Since we could not promote our lock, we need to convert to it.
644; That means that we drop the shared lock and wait to get it
645; exclusive. Since we release the lock, we need to do the look up
646; again.
647;
648
649 la r3,pmapSXlk(r28) ; Point to the pmap search lock
650 bl sxlkConvert ; Convert shared to exclusive
651 mr. r3,r3 ; Could we?
652 bne-- hrmBadLock ; Nope, we must have timed out...
653
654 mr r3,r28 ; Pass in pmap to search
655 mr r4,r29 ; High order of address
656 mr r5,r30 ; Low order of address
657 bl EXT(mapSearchFull) ; Rescan the list
658
659 andi. r0,r7,lo16(mpPerm|mpSpecial|mpNest) ; Is this nested, special, or a perm mapping?
660 rlwinm r0,r7,0,mpRemovableb,mpRemovableb ; Are we allowed to remove it?
661 crmove cr5_eq,cr0_eq ; Remember if we should remove this
662 mr. r31,r3 ; Did we lose it when we converted?
663 cmplwi cr1,r0,0 ; Are we allowed to remove?
664 mr r20,r7 ; Remember mpFlags
665 crorc cr5_eq,cr5_eq,cr1_eq ; cr5_eq is true if this is not removable
666 mr r15,r4 ; Save top of next vaddr
667 mr r16,r5 ; Save bottom of next vaddr
668 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
de355530 669
55e303ae
A
670 bf-- cr5_eq,hrmPerm ; This one can't be removed...
671
672;
673; We have an exclusive lock on the mapping chain. And we
674; also have the busy count bumped in the mapping so it can
675; not vanish on us.
676;
677
678hrmGotX: mr r3,r31 ; Get the mapping
679 bl mapBumpBusy ; Bump up the busy count
1c79356b 680
55e303ae
A
681;
682; Invalidate any PTEs associated with this
683; mapping (more than one if a block) and accumulate the reference
684; and change bits.
685;
686; Here is also where we need to split 32- and 64-bit processing
687;
1c79356b 688
55e303ae
A
689 lwz r21,mpPte(r31) ; Grab the offset to the PTE
690 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
691 mfsdr1 r29 ; Get the hash table base and size
692 rlwinm r0,r20,0,mpBlockb,mpBlockb ; Is this a block mapping?
693 andi. r2,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping?
694 cmplwi cr5,r0,0 ; Remember if this is a block mapping
695 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
696 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
697 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
698 rlwinm r21,r21,0,0,30 ; Clear out valid bit
699 crorc cr0_eq,cr1_eq,cr0_eq ; No need to look at PTE if none or a special mapping
700 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
701 andc r29,r29,r2 ; Clean up hash table base
702 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
703 mr r30,r23 ; Move the now merged vaddr to the correct register
704 add r26,r29,r21 ; Point to the PTEG slot
705
706 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
707
708 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
709 bne- cr5,hrmBlock32 ; Go treat block specially...
710 subfic r9,r9,-4 ; Get the PCA entry offset
711 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
712 add r7,r9,r29 ; Point to the PCA slot
1c79356b 713
55e303ae
A
714
715 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
716
717 lwz r21,mpPte(r31) ; Get the quick pointer again
718 lwz r5,0(r26) ; Get the top of PTE
1c79356b 719
55e303ae
A
720 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
721 rlwinm r21,r21,0,0,30 ; Clear out valid bit
722 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
723 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
724 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
1c79356b 725
55e303ae 726 stw r5,0(r26) ; Invalidate the PTE
1c79356b 727
55e303ae 728 li r9,tlbieLock ; Get the TLBIE lock
1c79356b 729
55e303ae
A
730 sync ; Make sure the invalid PTE is actually in memory
731
732hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
733 mr. r5,r5 ; Is it locked?
734 li r5,1 ; Get locked indicator
735 bne- hrmPtlb32 ; It is locked, go spin...
736 stwcx. r5,0,r9 ; Try to get it
737 bne- hrmPtlb32 ; We was beat...
738
739 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
740
741 tlbie r30 ; Invalidate it all corresponding TLB entries
1c79356b 742
55e303ae 743 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
de355530 744
55e303ae
A
745 eieio ; Make sure that the tlbie happens first
746 tlbsync ; Wait for everyone to catch up
747 sync ; Make sure of it all
748
749hrmNTlbs: li r0,0 ; Clear this
750 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
751 stw r0,tlbieLock(0) ; Clear the tlbie lock
752 lis r0,0x8000 ; Get bit for slot 0
753 eieio ; Make sure those RC bit have been stashed in PTE
754
755 srw r0,r0,r2 ; Get the allocation hash mask
756 lwz r22,4(r26) ; Get the latest reference and change bits
757 or r6,r6,r0 ; Show that this slot is free
758
759hrmUlckPCA32:
760 eieio ; Make sure all updates come first
761 stw r6,0(r7) ; Unlock the PTEG
762
763;
764; Now, it is time to remove the mapping and unlock the chain.
765; But first, we need to make sure no one else is using this
766; mapping so we drain the busy now
767;
9bccf70c 768
55e303ae
A
769hrmPysDQ32: mr r3,r31 ; Point to the mapping
770 bl mapDrainBusy ; Go wait until mapping is unused
d7e50217 771
55e303ae
A
772 mr r3,r28 ; Get the pmap to remove from
773 mr r4,r31 ; Point to the mapping
774 bl EXT(mapRemove) ; Remove the mapping from the list
d7e50217 775
d7e50217 776
55e303ae
A
777 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
778 andi. r0,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping?
779 cmplwi cr1,r0,0 ; Special thingie?
780 la r3,pmapSXlk(r28) ; Point to the pmap search lock
781 subi r4,r4,1 ; Drop down the mapped page count
782 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
783 bl sxlkUnlock ; Unlock the search list
784
785 bne-- cr1,hrmRetn32 ; This one has no real memory associated with it so we are done...
1c79356b 786
55e303ae 787 bl mapPhysFindLock ; Go find and lock the physent
de355530 788
55e303ae
A
789 lwz r9,ppLink+4(r3) ; Get first mapping
790
791 mr r4,r22 ; Get the RC bits we just got
792 bl mapPhysMerge ; Go merge the RC bits
793
794 rlwinm r9,r9,0,0,25 ; Clear the flags from the mapping pointer
d7e50217 795
55e303ae
A
796 cmplw r9,r31 ; Are we the first on the list?
797 bne- hrmNot1st ; Nope...
d7e50217 798
55e303ae
A
799 li r9,0 ; Get a 0
800 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
801 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
802 bl mapPhyCSet32 ; Go set the physent link and preserve flags
d7e50217 803
55e303ae 804 b hrmPhyDQd ; Join up and unlock it all...
d7e50217 805
55e303ae 806 .align 5
d7e50217 807
55e303ae
A
808hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
809 and r8,r8,r31 ; Get back to a page
810 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
de355530 811
55e303ae
A
812 la r3,pmapSXlk(r28) ; Point to the pmap search lock
813 bl sxlkUnlock ; Unlock the search list
814
815 xor r3,r31,r8 ; Flip mapping address to virtual
816 ori r3,r3,mapRtPerm ; Set permanent mapping error
817 b hrmErRtn
818
819hrmBadLock: li r3,mapRtBadLk ; Set bad lock
820 b hrmErRtn
821
822hrmEndInSight:
823 la r3,pmapSXlk(r28) ; Point to the pmap search lock
824 bl sxlkUnlock ; Unlock the search list
825
826hrmDoneChunk:
827 mr r3,r31 ; Point to the mapping
828 bl mapDropBusy ; Drop the busy here since we need to come back
829 li r3,mapRtRemove ; Say we are still removing this
830 b hrmErRtn
1c79356b 831
55e303ae
A
832 .align 5
833
834hrmNotFound:
835 la r3,pmapSXlk(r28) ; Point to the pmap search lock
836 bl sxlkUnlock ; Unlock the search list
837 li r3,0 ; Make sure we know we did not find it
1c79356b 838
55e303ae 839hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
1c79356b 840
55e303ae
A
841 mtmsr r17 ; Restore enables/translation/etc.
842 isync
843 b hrmRetnCmn ; Join the common return code...
de355530 844
55e303ae
A
845hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
846 isync
847 b hrmRetnCmn ; Join the common return code...
1c79356b
A
848
849 .align 5
1c79356b 850
55e303ae
A
851hrmNot1st: mr. r8,r9 ; Remember and test current node
852 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
853 lwz r9,mpAlias+4(r9) ; Chain to the next
854 cmplw r9,r31 ; Is this us?
855 bne- hrmNot1st ; Not us...
856
857 lwz r9,mpAlias+4(r9) ; Get our forward pointer
858 stw r9,mpAlias+4(r8) ; Unchain us
d7e50217 859
55e303ae
A
860 nop ; For alignment
861
862hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 863
55e303ae
A
864hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
865 mr r3,r31 ; Copy the pointer to the mapping
866 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
867 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 868
55e303ae 869 xor r3,r31,r8 ; Flip mapping address to virtual
1c79356b 870
55e303ae
A
871 mtmsr r17 ; Restore enables/translation/etc.
872 isync
1c79356b 873
55e303ae
A
874hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
875 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
876 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
877 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
878 mr. r6,r6 ; Should we pass back the "next" vaddr?
879 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
880 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
881 mtlr r0 ; Restore the return
882
883 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
884 beq hrmNoNextAdr ; Do not pass back the next vaddr...
885 stw r15,0(r6) ; Pass back the top of the next vaddr
886 stw r16,4(r6) ; Pass back the bottom of the next vaddr
887
888hrmNoNextAdr:
889 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
890 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
891 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
892 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
893 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
894 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
895 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
896 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
897 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
898 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
899 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
900 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
901 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
902 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
903 lwz r1,0(r1) ; Pop the stack
904 blr ; Leave...
905
906;
907; Here is where we come when all is lost. Somehow, we failed a mapping function
908; that must work... All hope is gone. Alas, we die.......
909;
d7e50217 910
55e303ae
A
911hrmPanic: lis r0,hi16(Choke) ; System abend
912 ori r0,r0,lo16(Choke) ; System abend
913 li r3,failMapping ; Show that we failed some kind of mapping thing
914 sc
1c79356b
A
915
916
55e303ae
A
917;
918; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
919; in the range. Then, if we did not finish, return a code indicating that we need to
920; be called again. Eventually, we will finish and then, we will do a TLBIE for each
921; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
922;
923; A potential speed up is that we stop the invalidate loop once we have walked through
924; the hash table once. This really is not worth the trouble because we need to have
925; mapped 1/2 of physical RAM in an individual block. Way unlikely.
926;
927; We should rethink this and see if we think it will be faster to check PTE and
928; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
929;
1c79356b 930
55e303ae 931 .align 5
1c79356b 932
55e303ae
A
933hrmBlock32:
934 lhz r23,mpSpace(r31) ; Get the address space hash
935 lhz r25,mpBSize(r31) ; Get the number of pages in block
936 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
937 ori r0,r20,mpRIP ; Turn on the remove in progress flag
938 mfsdr1 r29 ; Get the hash table base and size
939 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
940 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
941 sub r4,r25,r9 ; Get number of pages left
942 cmplw cr1,r9,r25 ; Have we already hit the end?
943 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
944 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
945 rlwinm r26,r29,16,7,15 ; Get the hash table size
946 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
947 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
948 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
949 cmpwi cr7,r2,0 ; Remember if we have finished
950 slwi r0,r9,12 ; Make cursor into page offset
951 or r24,r24,r23 ; Get full hash
952 and r4,r4,r2 ; If more than a chunk, bring this back to 0
953 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
954 add r27,r27,r0 ; Adjust vaddr to start of current chunk
955 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
956
957 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
958
959 la r3,pmapSXlk(r28) ; Point to the pmap search lock
960 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
961 bl sxlkUnlock ; Unlock the search list while we are invalidating
962
963 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
964 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
965 xor r24,r24,r8 ; Get the proper VSID
966 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
967 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
968 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
969 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
970 add r22,r22,r30 ; Get end address (in PTEG units)
971
972hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
973 xor r23,r23,r24 ; Hash it
974 and r23,r23,r26 ; Wrap it into the table
975 rlwinm r3,r23,28,4,29 ; Change to PCA offset
976 subfic r3,r3,-4 ; Get the PCA entry offset
977 add r7,r3,r29 ; Point to the PCA slot
978 cmplw cr5,r30,r22 ; Check if we reached the end of the range
979 addi r30,r30,64 ; bump to the next vaddr
980
981 bl mapLockPteg ; Lock the PTEG
982
983 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
984 add r5,r23,r29 ; Point to the PTEG
985 li r0,0 ; Set an invalid PTE value
986 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
987 mtcrf 0x80,r4 ; Set CRs to select PTE slots
988 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 989
55e303ae
A
990 bf 0,hrmSlot0 ; No autogen here
991 stw r0,0x00(r5) ; Invalidate PTE
1c79356b 992
55e303ae
A
993hrmSlot0: bf 1,hrmSlot1 ; No autogen here
994 stw r0,0x08(r5) ; Invalidate PTE
1c79356b 995
55e303ae
A
996hrmSlot1: bf 2,hrmSlot2 ; No autogen here
997 stw r0,0x10(r5) ; Invalidate PTE
1c79356b 998
55e303ae
A
999hrmSlot2: bf 3,hrmSlot3 ; No autogen here
1000 stw r0,0x18(r5) ; Invalidate PTE
1c79356b 1001
55e303ae
A
1002hrmSlot3: bf 4,hrmSlot4 ; No autogen here
1003 stw r0,0x20(r5) ; Invalidate PTE
1c79356b 1004
55e303ae
A
1005hrmSlot4: bf 5,hrmSlot5 ; No autogen here
1006 stw r0,0x28(r5) ; Invalidate PTE
1c79356b 1007
55e303ae
A
1008hrmSlot5: bf 6,hrmSlot6 ; No autogen here
1009 stw r0,0x30(r5) ; Invalidate PTE
1c79356b 1010
55e303ae
A
1011hrmSlot6: bf 7,hrmSlot7 ; No autogen here
1012 stw r0,0x38(r5) ; Invalidate PTE
1c79356b 1013
55e303ae
A
1014hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1015 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1016 andc r6,r6,r0 ; Turn off all the old autogen bits
9bccf70c 1017
55e303ae 1018hrmBNone32: eieio ; Make sure all updates come first
9bccf70c 1019
55e303ae 1020 stw r6,0(r7) ; Unlock and set the PCA
1c79356b 1021
55e303ae 1022 bne+ cr5,hrmBInv32 ; Go invalidate the next...
1c79356b 1023
55e303ae 1024 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1c79356b 1025
55e303ae
A
1026 mr r3,r31 ; Copy the pointer to the mapping
1027 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1c79356b 1028
55e303ae
A
1029 sync ; Make sure memory is consistent
1030
1031 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
1032 li r6,63 ; Assume full invalidate for now
1033 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1034 andc r6,r6,r5 ; Clear max if we have less to do
1035 and r5,r25,r5 ; Clear count if we have more than max
1036 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1037 li r7,tlbieLock ; Get the TLBIE lock
1038 or r5,r5,r6 ; Get number of TLBIEs needed
1039
1040hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1041 mr. r2,r2 ; Is it locked?
1042 li r2,1 ; Get our lock value
1043 bne- hrmBTLBlck ; It is locked, go wait...
1044 stwcx. r2,0,r7 ; Try to get it
1045 bne- hrmBTLBlck ; We was beat...
1046
1047hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1048 tlbie r27 ; Invalidate it everywhere
1049 addi r27,r27,0x1000 ; Up to the next page
1050 bge+ hrmBTLBi ; Make sure we have done it all...
1051
1052 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1053 li r2,0 ; Lock clear value
1054
1055 sync ; Make sure all is quiet
1056 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1057
1058 eieio ; Make sure that the tlbie happens first
1059 tlbsync ; Wait for everyone to catch up
1060 sync ; Wait for quiet again
1061
1062hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1063
1064 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1065 bl sxlkShared ; Go get a shared lock on the mapping lists
1066 mr. r3,r3 ; Did we get the lock?
1067 bne- hrmPanic ; Nope...
1068
1069 lwz r4,mpVAddr(r31) ; High order of address
1070 lwz r5,mpVAddr+4(r31) ; Low order of address
1071 mr r3,r28 ; Pass in pmap to search
1072 mr r29,r4 ; Save this in case we need it (only promote fails)
1073 mr r30,r5 ; Save this in case we need it (only promote fails)
1074 bl EXT(mapSearchFull) ; Go see if we can find it
1075
1076 mr. r3,r3 ; Did we? (And remember mapping address for later)
1077 mr r15,r4 ; Save top of next vaddr
1078 mr r16,r5 ; Save bottom of next vaddr
1079 beq- hrmPanic ; Nope, not found...
1080
1081 cmplw r3,r31 ; Same mapping?
1082 bne- hrmPanic ; Not good...
1083
1084 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1085 bl sxlkPromote ; Try to promote shared to exclusive
1086 mr. r3,r3 ; Could we?
1087 mr r3,r31 ; Restore the mapping pointer
1088 beq+ hrmBDone1 ; Yeah...
1089
1090 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1091 bl sxlkConvert ; Convert shared to exclusive
1092 mr. r3,r3 ; Could we?
1093 bne-- hrmPanic ; Nope, we must have timed out...
1094
1095 mr r3,r28 ; Pass in pmap to search
1096 mr r4,r29 ; High order of address
1097 mr r5,r30 ; Low order of address
1098 bl EXT(mapSearchFull) ; Rescan the list
1099
1100 mr. r3,r3 ; Did we lose it when we converted?
1101 mr r15,r4 ; Save top of next vaddr
1102 mr r16,r5 ; Save bottom of next vaddr
1103 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1104
1105hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1106
1107 mr r3,r28 ; Get the pmap to remove from
1108 mr r4,r31 ; Point to the mapping
1109 bl EXT(mapRemove) ; Remove the mapping from the list
1110
1111 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1112 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1113 subi r4,r4,1 ; Drop down the mapped page count
1114 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1115 bl sxlkUnlock ; Unlock the search list
1116
1117 b hrmRetn32 ; We are all done, get out...
1c79356b 1118
55e303ae
A
1119;
1120; Here we handle the 64-bit version of hw_rem_map
1121;
1122
1c79356b 1123 .align 5
55e303ae
A
1124
1125hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
1126 bne-- cr5,hrmBlock64 ; Go treat block specially...
1127 subfic r9,r9,-4 ; Get the PCA entry offset
1128 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1129 add r7,r9,r29 ; Point to the PCA slot
1130
1131 bl mapLockPteg ; Go lock up the PTEG
1132
1133 lwz r21,mpPte(r31) ; Get the quick pointer again
1134 ld r5,0(r26) ; Get the top of PTE
1135
1136 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
1137 rlwinm r21,r21,0,0,30 ; Clear out valid bit
1138 sldi r23,r5,16 ; Shift AVPN up to EA format
1139 rldicr r5,r5,0,62 ; Clear the valid bit
1140 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1141 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1142 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1143
1144 std r5,0(r26) ; Invalidate the PTE
1145
1146 li r9,tlbieLock ; Get the TLBIE lock
1147
1148 sync ; Make sure the invalid PTE is actually in memory
1149
1150hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1151 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1152 mr. r5,r5 ; Is it locked?
1153 li r5,1 ; Get locked indicator
1154 bne-- hrmPtlb64w ; It is locked, go spin...
1155 stwcx. r5,0,r9 ; Try to get it
1156 bne-- hrmPtlb64 ; We was beat...
1157
1158 tlbie r23 ; Invalidate it all corresponding TLB entries
1c79356b 1159
55e303ae
A
1160 eieio ; Make sure that the tlbie happens first
1161 tlbsync ; Wait for everyone to catch up
1162 isync
1163
1164 ptesync ; Make sure of it all
1165 li r0,0 ; Clear this
1166 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1167 stw r0,tlbieLock(0) ; Clear the tlbie lock
1168 oris r0,r0,0x8000 ; Assume slot 0
1169 eieio ; Make sure those RC bit have been stashed in PTE
1170 srw r0,r0,r2 ; Get slot mask to deallocate
d7e50217 1171
55e303ae
A
1172 lwz r22,12(r26) ; Get the latest reference and change bits
1173 or r6,r6,r0 ; Make the guy we killed free
de355530 1174
55e303ae
A
1175hrmUlckPCA64:
1176 eieio ; Make sure all updates come first
1177
1178 stw r6,0(r7) ; Unlock and change the PCA
1179
1180hrmPysDQ64: mr r3,r31 ; Point to the mapping
1181 bl mapDrainBusy ; Go wait until mapping is unused
1182
1183 mr r3,r28 ; Get the pmap to insert into
1184 mr r4,r31 ; Point to the mapping
1185 bl EXT(mapRemove) ; Remove the mapping from the list
1186
1187 andi. r0,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping?
1188 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1189 cmplwi cr1,r0,0 ; Special thingie?
1190 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1191 subi r4,r4,1 ; Drop down the mapped page count
1192 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1193 bl sxlkUnlock ; Unlock the search list
1194
1195 bne-- cr1,hrmRetn64 ; This one has no real memory associated with it so we are done...
1c79356b 1196
55e303ae 1197 bl mapPhysFindLock ; Go find and lock the physent
1c79356b 1198
55e303ae
A
1199 li r0,0xFF ; Get mask to clean up mapping pointer
1200 ld r9,ppLink(r3) ; Get first mapping
1201 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1202 mr r4,r22 ; Get the RC bits we just got
1c79356b 1203
55e303ae 1204 bl mapPhysMerge ; Go merge the RC bits
d7e50217 1205
55e303ae 1206 andc r9,r9,r0 ; Clean up the mapping pointer
d7e50217 1207
55e303ae
A
1208 cmpld r9,r31 ; Are we the first on the list?
1209 bne- hrmNot1st64 ; Nope...
1c79356b 1210
55e303ae
A
1211 li r9,0 ; Get a 0
1212 ld r4,mpAlias(r31) ; Get our forward pointer
1213
1214 std r9,mpAlias(r31) ; Make sure we are off the chain
1215 bl mapPhyCSet64 ; Go set the physent link and preserve flags
de355530 1216
55e303ae
A
1217 b hrmPhyDQd64 ; Join up and unlock it all...
1218
1219hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1220 stwcx. r5,0,r5 ; Clear the pending reservation
de355530 1221
d7e50217 1222
55e303ae
A
1223hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1224 mr. r5,r5 ; is it locked?
1225 beq++ hrmPtlb64 ; Nope...
1226 b hrmPtlb64x ; Sniff some more...
1227
1228 .align 5
1229
1230hrmNot1st64:
1231 mr. r8,r9 ; Remember and test current node
1232 beq- hrmNotFound ; Could not find our node...
1233 ld r9,mpAlias(r9) ; Chain to the next
1234 cmpld r9,r31 ; Is this us?
1235 bne- hrmNot1st64 ; Not us...
1236
1237 ld r9,mpAlias(r9) ; Get our forward pointer
1238 std r9,mpAlias(r8) ; Unchain us
1239
1240 nop ; For alignment
1241
1242hrmPhyDQd64:
1243 bl mapPhysUnlock ; Unlock the physent chain
1c79356b 1244
55e303ae
A
1245hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1246 mr r3,r31 ; Copy the pointer to the mapping
1247 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1248 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 1249
55e303ae 1250 xor r3,r31,r8 ; Flip mapping address to virtual
d7e50217 1251
55e303ae 1252 mtmsrd r17 ; Restore enables/translation/etc.
de355530 1253 isync
55e303ae
A
1254
1255 b hrmRetnCmn ; Join the common return path...
1c79356b 1256
1c79356b 1257
55e303ae
A
1258;
1259; Check hrmBlock32 for comments.
1260;
1c79356b 1261
de355530 1262 .align 5
55e303ae
A
1263
1264hrmBlock64:
1265 lhz r24,mpSpace(r31) ; Get the address space hash
1266 lhz r25,mpBSize(r31) ; Get the number of pages in block
1267 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
1268 ori r0,r20,mpRIP ; Turn on the remove in progress flag
1269 mfsdr1 r29 ; Get the hash table base and size
1270 ld r27,mpVAddr(r31) ; Get the base vaddr
1271 rlwinm r5,r29,0,27,31 ; Isolate the size
1272 sub r4,r25,r9 ; Get number of pages left
1273 cmplw cr1,r9,r25 ; Have we already hit the end?
1274 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1275 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1276 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1277 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1278 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1279 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1280 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1281 srdi r27,r27,12 ; Change address into page index
1282 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1283 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1284
1285 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1286
1287 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1288 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1289 bl sxlkUnlock ; Unlock the search list while we are invalidating
1290
1291 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1292 eqv r26,r26,r26 ; Get all foxes here
1293 rldimi r24,r24,28,8 ; Make a couple copies up higher
1294 rldicr r29,r29,0,47 ; Isolate just the hash table base
1295 subfic r5,r5,46 ; Get number of leading zeros
1296 srd r26,r26,r5 ; Shift the size bits over
1297 mr r30,r27 ; Get start of chunk to invalidate
1298 rldicr r26,r26,0,56 ; Make length in PTEG units
1299 add r22,r4,r30 ; Get end page number
1300
1301hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1302 rldicr r0,r0,0,49 ; Clean all but segment portion
1303 rlwinm r2,r30,0,16,31 ; Get the current page index
1304 xor r0,r0,r24 ; Form VSID
1305 xor r8,r2,r0 ; Hash the vaddr
1306 sldi r8,r8,7 ; Make into PTEG offset
1307 and r23,r8,r26 ; Wrap into the hash table
1308 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1309 subfic r3,r3,-4 ; Get the PCA entry offset
1310 add r7,r3,r29 ; Point to the PCA slot
1311
1312 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1313
1314 bl mapLockPteg ; Lock the PTEG
1315
1316 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1317 add r5,r23,r29 ; Point to the PTEG
1318 li r0,0 ; Set an invalid PTE value
1319 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1320 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1321 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 1322
1c79356b 1323
55e303ae
A
1324 bf 0,hrmSlot0s ; No autogen here
1325 std r0,0x00(r5) ; Invalidate PTE
1c79356b 1326
55e303ae
A
1327hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1328 std r0,0x10(r5) ; Invalidate PTE
1c79356b 1329
55e303ae
A
1330hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1331 std r0,0x20(r5) ; Invalidate PTE
d7e50217 1332
55e303ae
A
1333hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1334 std r0,0x30(r5) ; Invalidate PTE
d7e50217 1335
55e303ae
A
1336hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1337 std r0,0x40(r5) ; Invalidate PTE
d7e50217 1338
55e303ae
A
1339hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1340 std r0,0x50(r5) ; Invalidate PTE
d7e50217 1341
55e303ae
A
1342hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1343 std r0,0x60(r5) ; Invalidate PTE
d7e50217 1344
55e303ae
A
1345hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1346 std r0,0x70(r5) ; Invalidate PTE
d7e50217 1347
55e303ae
A
1348hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1349 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1350 andc r6,r6,r0 ; Turn off all the old autogen bits
1351
1352hrmBNone64: eieio ; Make sure all updates come first
1353 stw r6,0(r7) ; Unlock and set the PCA
1354
1355 addi r30,r30,1 ; bump to the next PTEG
1356 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1357
1358 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1359
1360 mr r3,r31 ; Copy the pointer to the mapping
1361 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1362
1363 sync ; Make sure memory is consistent
1364
1365 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1366 li r6,255 ; Assume full invalidate for now
1367 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1368 andc r6,r6,r5 ; Clear max if we have less to do
1369 and r5,r25,r5 ; Clear count if we have more than max
1370 sldi r24,r24,28 ; Get the full XOR value over to segment position
1371 ld r27,mpVAddr(r31) ; Get the base vaddr
1372 li r7,tlbieLock ; Get the TLBIE lock
1373 or r5,r5,r6 ; Get number of TLBIEs needed
1c79356b 1374
55e303ae
A
1375hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1376 mr. r2,r2 ; Is it locked?
1377 li r2,1 ; Get our lock value
1378 bne-- hrmBTLBlcm ; It is locked, go wait...
1379 stwcx. r2,0,r7 ; Try to get it
1380 bne-- hrmBTLBlcl ; We was beat...
1381
1382hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1383 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1384 addic. r5,r5,-1 ; See if we did them all
1385 xor r2,r2,r24 ; Make the VSID
1386 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1387 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1c79356b 1388
55e303ae
A
1389 tlbie r2 ; Invalidate it everywhere
1390 addi r27,r27,0x1000 ; Up to the next page
1391 bge++ hrmBTLBj ; Make sure we have done it all...
1c79356b 1392
55e303ae 1393 sync ; Make sure all is quiet
1c79356b 1394
55e303ae
A
1395 eieio ; Make sure that the tlbie happens first
1396 tlbsync ; wait for everyone to catch up
150bd074 1397 isync
1c79356b 1398
55e303ae 1399 li r2,0 ; Lock clear value
d7e50217 1400
55e303ae
A
1401 ptesync ; Wait for quiet again
1402 sync ; Make sure that is done
1403
1404 stw r2,tlbieLock(0) ; Clear the tlbie lock
1405
1406 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1407 bl sxlkShared ; Go get a shared lock on the mapping lists
1408 mr. r3,r3 ; Did we get the lock?
1409 bne- hrmPanic ; Nope...
1410
1411 lwz r4,mpVAddr(r31) ; High order of address
1412 lwz r5,mpVAddr+4(r31) ; Low order of address
1413 mr r3,r28 ; Pass in pmap to search
1414 mr r29,r4 ; Save this in case we need it (only promote fails)
1415 mr r30,r5 ; Save this in case we need it (only promote fails)
1416 bl EXT(mapSearchFull) ; Go see if we can find it
1417
1418 mr. r3,r3 ; Did we? (And remember mapping address for later)
1419 mr r15,r4 ; Save top of next vaddr
1420 mr r16,r5 ; Save bottom of next vaddr
1421 beq- hrmPanic ; Nope, not found...
1422
1423 cmpld r3,r31 ; Same mapping?
1424 bne- hrmPanic ; Not good...
1425
1426 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1427 bl sxlkPromote ; Try to promote shared to exclusive
1428 mr. r3,r3 ; Could we?
1429 mr r3,r31 ; Restore the mapping pointer
1430 beq+ hrmBDone2 ; Yeah...
1431
1432 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1433 bl sxlkConvert ; Convert shared to exclusive
1434 mr. r3,r3 ; Could we?
1435 bne-- hrmPanic ; Nope, we must have timed out...
1436
1437 mr r3,r28 ; Pass in pmap to search
1438 mr r4,r29 ; High order of address
1439 mr r5,r30 ; Low order of address
1440 bl EXT(mapSearchFull) ; Rescan the list
1441
1442 mr. r3,r3 ; Did we lose it when we converted?
1443 mr r15,r4 ; Save top of next vaddr
1444 mr r16,r5 ; Save bottom of next vaddr
1445 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1446
1447hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1448
1449 mr r3,r28 ; Get the pmap to remove from
1450 mr r4,r31 ; Point to the mapping
1451 bl EXT(mapRemove) ; Remove the mapping from the list
1452
1453 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1454 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1455 subi r4,r4,1 ; Drop down the mapped page count
1456 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1457 bl sxlkUnlock ; Unlock the search list
1458
1459 b hrmRetn64 ; We are all done, get out...
1460
1461hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1462 stwcx. r2,0,r2 ; Unreserve it
1463
1464hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1465 mr. r2,r2 ; Is it held?
1466 beq++ hrmBTLBlcl ; Nope...
1467 b hrmBTLBlcn ; Yeah...
1c79356b 1468
1c79356b
A
1469
1470
1471/*
55e303ae 1472 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1c79356b 1473 *
55e303ae 1474 * Upon entry, R3 contains a pointer to a physent.
1c79356b 1475 *
55e303ae
A
1476 * This function removes the first mapping from a physical entry
1477 * alias list. It locks the list, extracts the vaddr and pmap from
1478 * the first entry. It then jumps into the hw_rem_map function.
1479 * NOTE: since we jump into rem_map, we need to set up the stack
1480 * identically. Also, we set the next parm to 0 so we do not
1481 * try to save a next vaddr.
1482 *
1483 * We return the virtual address of the removed mapping as a
1484 * R3.
de355530 1485 *
55e303ae 1486 * Note that this is designed to be called from 32-bit mode with a stack.
de355530 1487 *
55e303ae
A
1488 * We disable translation and all interruptions here. This keeps is
1489 * from having to worry about a deadlock due to having anything locked
1490 * and needing it to process a fault.
1c79356b 1491 *
55e303ae
A
1492 * Note that this must be done with both interruptions off and VM off
1493 *
1494 *
1495 * Remove mapping via physical page (mapping_purge)
1496 *
1497 * 1) lock physent
1498 * 2) extract vaddr and pmap
1499 * 3) unlock physent
1500 * 4) do "remove mapping via pmap"
1501 *
1c79356b 1502 *
1c79356b
A
1503 */
1504
1505 .align 5
55e303ae
A
1506 .globl EXT(hw_purge_phys)
1507
1508LEXT(hw_purge_phys)
1509 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1510 mflr r0 ; Save the link register
1511 stw r15,FM_ARG0+0x00(r1) ; Save a register
1512 stw r16,FM_ARG0+0x04(r1) ; Save a register
1513 stw r17,FM_ARG0+0x08(r1) ; Save a register
1514 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1515 stw r19,FM_ARG0+0x10(r1) ; Save a register
1516 stw r20,FM_ARG0+0x14(r1) ; Save a register
1517 stw r21,FM_ARG0+0x18(r1) ; Save a register
1518 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1519 stw r23,FM_ARG0+0x20(r1) ; Save a register
1520 stw r24,FM_ARG0+0x24(r1) ; Save a register
1521 stw r25,FM_ARG0+0x28(r1) ; Save a register
1522 li r6,0 ; Set no next address return
1523 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1524 stw r27,FM_ARG0+0x30(r1) ; Save a register
1525 stw r28,FM_ARG0+0x34(r1) ; Save a register
1526 stw r29,FM_ARG0+0x38(r1) ; Save a register
1527 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1528 stw r31,FM_ARG0+0x40(r1) ; Save a register
1529 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1530 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1531
1532 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1533
1534 bl mapPhysLock ; Lock the physent
1535
1536 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1537
1538 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1539 li r0,0x3F ; Set the bottom stuff to clear
1540 b hppJoin ; Join the common...
1541
1542hppSF: li r0,0xFF
1543 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1544 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1545
1546hppJoin: andc. r12,r12,r0 ; Clean and test link
1547 beq-- hppNone ; There are no more mappings on physical page
1548
1549 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1550 lhz r7,mpSpace(r12) ; Get the address space hash
1551 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1552 slwi r0,r7,2 ; Multiply space by 4
1553 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1554 slwi r7,r7,3 ; Multiply space by 8
1555 lwz r5,mpVAddr+4(r12) ; and the bottom
1556 add r7,r7,r0 ; Get correct displacement into translate table
1557 lwz r28,0(r28) ; Get the actual translation map
de355530 1558
55e303ae
A
1559 add r28,r28,r7 ; Point to the pmap translation
1560
1561 bl mapPhysUnlock ; Time to unlock the physical entry
1562
1563 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1564
1565 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1566 b hrmJoin ; Go remove the mapping...
1567
1568hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1569 b hrmJoin ; Go remove the mapping...
d7e50217 1570
de355530 1571 .align 5
55e303ae
A
1572
1573hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1574
1575 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1576
1577 mtmsr r11 ; Restore enables/translation/etc.
1578 isync
1579 b hppRetnCmn ; Join the common return code...
1c79356b 1580
55e303ae
A
1581hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1582 isync
1c79356b 1583
55e303ae
A
1584;
1585; NOTE: we have not used any registers other than the volatiles to this point
1586;
1c79356b 1587
55e303ae 1588hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1c79356b 1589
55e303ae
A
1590 li r3,0 ; Clear high order mapping address because we are 32-bit
1591 mtlr r12 ; Restore the return
1592 lwz r1,0(r1) ; Pop the stack
1593 blr ; Leave...
1c79356b
A
1594
1595/*
55e303ae
A
1596 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1597 *
1598 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1599 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1600 *
1601 * We return the virtual address of the removed mapping as a
1602 * R3.
1603 *
1604 * Note that this is designed to be called from 32-bit mode with a stack.
1605 *
1606 * We disable translation and all interruptions here. This keeps is
1607 * from having to worry about a deadlock due to having anything locked
1608 * and needing it to process a fault.
1609 *
1610 * Note that this must be done with both interruptions off and VM off
1611 *
1612 * Remove a mapping which can be reestablished by VM
1613 *
1c79356b 1614 */
1c79356b 1615
55e303ae
A
1616 .align 5
1617 .globl EXT(hw_purge_map)
1618
1619LEXT(hw_purge_map)
1620 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1621 mflr r0 ; Save the link register
1622 stw r15,FM_ARG0+0x00(r1) ; Save a register
1623 stw r16,FM_ARG0+0x04(r1) ; Save a register
1624 stw r17,FM_ARG0+0x08(r1) ; Save a register
1625 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1626 stw r19,FM_ARG0+0x10(r1) ; Save a register
1627 mfsprg r19,2 ; Get feature flags
1628 stw r20,FM_ARG0+0x14(r1) ; Save a register
1629 stw r21,FM_ARG0+0x18(r1) ; Save a register
1630 mtcrf 0x02,r19 ; move pf64Bit cr6
1631 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1632 stw r23,FM_ARG0+0x20(r1) ; Save a register
1633 stw r24,FM_ARG0+0x24(r1) ; Save a register
1634 stw r25,FM_ARG0+0x28(r1) ; Save a register
1635 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1636 stw r27,FM_ARG0+0x30(r1) ; Save a register
1637 stw r28,FM_ARG0+0x34(r1) ; Save a register
1638 stw r29,FM_ARG0+0x38(r1) ; Save a register
1639 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1640 stw r31,FM_ARG0+0x40(r1) ; Save a register
1641 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1642 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1643
1644 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1645 lwz r9,pmapvr+4(r3) ; Get conversion mask
1646 b hpmSF1x ; Done...
1647
1648hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1649
1650hpmSF1x:
1651 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1652
1653 xor r28,r3,r9 ; Convert the pmap to physical addressing
1654
1655 mr r17,r11 ; Save the MSR
1656
1657 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1658 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1659 mr. r3,r3 ; Did we get the lock?
1660 bne-- hrmBadLock ; Nope...
1661;
1662; Note that we do a full search (i.e., no shortcut level skips, etc.)
1663; here so that we will know the previous elements so we can dequeue them
1664; later.
1665;
1666hpmSearch:
1667 mr r3,r28 ; Pass in pmap to search
1668 mr r29,r4 ; Top half of vaddr
1669 mr r30,r5 ; Bottom half of vaddr
1670 bl EXT(mapSearchFull) ; Rescan the list
1671 mr. r31,r3 ; Did we? (And remember mapping address for later)
1672 or r0,r4,r5 ; Are we beyond the end?
1673 mr r15,r4 ; Save top of next vaddr
1674 cmplwi cr1,r0,0 ; See if there is another
1675 mr r16,r5 ; Save bottom of next vaddr
1676 bne-- hpmGotOne ; We found one, go check it out...
1677
1678hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1679 b hrmNotFound ; No more in pmap to check...
1680
1681hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
1682 andi. r9,r20,lo16(mpSpecial|mpNest|mpPerm|mpBlock) ; Are we allowed to remove it?
ab86ba33
A
1683 rlwinm r21,r20,8,24,31 ; Extract the busy count
1684 cmplwi cr2,r21,0 ; Is it busy?
1685 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
55e303ae
A
1686 beq++ hrmGotX ; Found, branch to remove the mapping...
1687 b hpmCNext ; Nope...
1c79356b 1688
55e303ae
A
1689/*
1690 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1691 *
1692 * Upon entry, R3 contains a pointer to a pmap.
1693 * pa is a pointer to the physent
1694 *
1695 * This function removes the first mapping for a specific pmap from a physical entry
1696 * alias list. It locks the list, extracts the vaddr and pmap from
1697 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1698 * NOTE: since we jump into rem_map, we need to set up the stack
1699 * identically. Also, we set the next parm to 0 so we do not
1700 * try to save a next vaddr.
1701 *
1702 * We return the virtual address of the removed mapping as a
1703 * R3.
1704 *
1705 * Note that this is designed to be called from 32-bit mode with a stack.
1706 *
1707 * We disable translation and all interruptions here. This keeps is
1708 * from having to worry about a deadlock due to having anything locked
1709 * and needing it to process a fault.
1710 *
1711 * Note that this must be done with both interruptions off and VM off
1712 *
1713 *
1714 * Remove mapping via physical page (mapping_purge)
1715 *
1716 * 1) lock physent
1717 * 2) extract vaddr and pmap
1718 * 3) unlock physent
1719 * 4) do "remove mapping via pmap"
1720 *
1721 *
1722 */
1c79356b 1723
55e303ae
A
1724 .align 5
1725 .globl EXT(hw_purge_space)
1726
1727LEXT(hw_purge_space)
1728 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1729 mflr r0 ; Save the link register
1730 stw r15,FM_ARG0+0x00(r1) ; Save a register
1731 stw r16,FM_ARG0+0x04(r1) ; Save a register
1732 stw r17,FM_ARG0+0x08(r1) ; Save a register
1733 mfsprg r2,2 ; Get feature flags
1734 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1735 stw r19,FM_ARG0+0x10(r1) ; Save a register
1736 stw r20,FM_ARG0+0x14(r1) ; Save a register
1737 stw r21,FM_ARG0+0x18(r1) ; Save a register
1738 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1739 mtcrf 0x02,r2 ; move pf64Bit cr6
1740 stw r23,FM_ARG0+0x20(r1) ; Save a register
1741 stw r24,FM_ARG0+0x24(r1) ; Save a register
1742 stw r25,FM_ARG0+0x28(r1) ; Save a register
1743 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1744 stw r27,FM_ARG0+0x30(r1) ; Save a register
1745 li r6,0 ; Set no next address return
1746 stw r28,FM_ARG0+0x34(r1) ; Save a register
1747 stw r29,FM_ARG0+0x38(r1) ; Save a register
1748 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1749 stw r31,FM_ARG0+0x40(r1) ; Save a register
1750 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1751 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1752
1753 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
1754
1755 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
1756
1757 b hpsSF1x ; Done...
1758
1759hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
1760
1761hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1762
1763 xor r4,r4,r9 ; Convert the pmap to physical addressing
1764
1765 bl mapPhysLock ; Lock the physent
1766
1767 lwz r8,pmapSpace(r4) ; Get the space hash
1768
1769 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
1770
1771 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1772
1773hpsSrc32: rlwinm. r12,r12,0,0,25 ; Clean and test mapping address
1774 beq hpsNone ; Did not find one...
1775
1776 lhz r10,mpSpace(r12) ; Get the space
1777
1778 cmplw r10,r8 ; Is this one of ours?
1779 beq hpsFnd ; Yes...
1780
1781 lwz r12,mpAlias+4(r12) ; Chain on to the next
1782 b hpsSrc32 ; Check it out...
1c79356b 1783
55e303ae
A
1784 .align 5
1785
1786hpsSF: li r0,0xFF
1787 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1788 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1789
1790hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
1791 beq hpsNone ; Did not find one...
1792
1793 lhz r10,mpSpace(r12) ; Get the space
1794
1795 cmplw r10,r8 ; Is this one of ours?
1796 beq hpsFnd ; Yes...
1797
1798 ld r12,mpAlias(r12) ; Chain on to the next
1799 b hpsSrc64 ; Check it out...
1800
1801 .align 5
1c79356b 1802
55e303ae
A
1803hpsFnd: mr r28,r4 ; Set the pmap physical address
1804 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1805 lwz r5,mpVAddr+4(r12) ; and the bottom
1806
1807 bl mapPhysUnlock ; Time to unlock the physical entry
1808 b hrmJoin ; Go remove the mapping...
1809
1810 .align 5
1811
1812hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 1813
55e303ae 1814 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 1815
55e303ae
A
1816 mtmsr r11 ; Restore enables/translation/etc.
1817 isync
1818 b hpsRetnCmn ; Join the common return code...
1c79356b 1819
55e303ae
A
1820hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
1821 isync
1c79356b 1822
55e303ae
A
1823;
1824; NOTE: we have not used any registers other than the volatiles to this point
1825;
d7e50217 1826
55e303ae
A
1827hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1828
1829 li r3,0 ; Set return code
1830 mtlr r12 ; Restore the return
1831 lwz r1,0(r1) ; Pop the stack
1832 blr ; Leave...
1c79356b
A
1833
1834
1835/*
55e303ae
A
1836 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
1837 *
1838 * Upon entry, R3 contains a pointer to a physent.
1839 * space is the space ID from the pmap in question
1840 *
1841 * We return the virtual address of the found mapping in
1842 * R3. Note that the mapping busy is bumped.
1843 *
1844 * Note that this is designed to be called from 32-bit mode with a stack.
1845 *
1846 * We disable translation and all interruptions here. This keeps is
1847 * from having to worry about a deadlock due to having anything locked
1848 * and needing it to process a fault.
1849 *
1c79356b
A
1850 */
1851
1852 .align 5
55e303ae
A
1853 .globl EXT(hw_find_space)
1854
1855LEXT(hw_find_space)
1856 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
1857 mflr r0 ; Save the link register
1858 mr r8,r4 ; Remember the space
1859 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1860
1861 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1c79356b 1862
55e303ae 1863 bl mapPhysLock ; Lock the physent
1c79356b 1864
55e303ae
A
1865 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
1866
1867 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
d7e50217 1868
55e303ae
A
1869hfsSrc32: rlwinm. r12,r12,0,0,25 ; Clean and test mapping address
1870 beq hfsNone ; Did not find one...
1871
1872 lhz r10,mpSpace(r12) ; Get the space
1873
1874 cmplw r10,r8 ; Is this one of ours?
1875 beq hfsFnd ; Yes...
1876
1877 lwz r12,mpAlias+4(r12) ; Chain on to the next
1878 b hfsSrc32 ; Check it out...
1c79356b 1879
55e303ae
A
1880 .align 5
1881
1882hfsSF: li r0,0xFF
1883 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1884 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1885
1886hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
1887 beq hfsNone ; Did not find one...
1888
1889 lhz r10,mpSpace(r12) ; Get the space
1890
1891 cmplw r10,r8 ; Is this one of ours?
1892 beq hfsFnd ; Yes...
1893
1894 ld r12,mpAlias(r12) ; Chain on to the next
1895 b hfsSrc64 ; Check it out...
1896
1897 .align 5
1898
1899hfsFnd: mr r8,r3 ; Save the physent
1900 mr r3,r12 ; Point to the mapping
1901 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 1902
55e303ae
A
1903 mr r3,r8 ; Get back the physical entry
1904 li r7,0xFFF ; Get a page size mask
1905 bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 1906
55e303ae
A
1907 andc r3,r12,r7 ; Move the mapping back down to a page
1908 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
1909 xor r12,r3,r12 ; Convert to virtual
1910 b hfsRet ; Time to return
1911
1912 .align 5
1913
1914hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
1915
1916hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 1917
55e303ae
A
1918 mtmsr r11 ; Restore enables/translation/etc.
1919 isync
1920 b hfsRetnCmn ; Join the common return code...
1c79356b 1921
55e303ae
A
1922hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
1923 isync
1c79356b 1924
55e303ae
A
1925;
1926; NOTE: we have not used any registers other than the volatiles to this point
1927;
1c79356b 1928
55e303ae
A
1929hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
1930 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1931
1932 mtlr r12 ; Restore the return
1933 lwz r1,0(r1) ; Pop the stack
1934 blr ; Leave...
1c79356b 1935
1c79356b 1936
55e303ae
A
1937;
1938; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
1939; Returns 0 if not found or the virtual address of the mapping if
1940; if is. Also, the mapping has the busy count bumped.
1941;
1942 .align 5
1943 .globl EXT(hw_find_map)
1c79356b 1944
55e303ae
A
1945LEXT(hw_find_map)
1946 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
1947 mflr r0 ; Save the link register
1948 stw r25,FM_ARG0+0x00(r1) ; Save a register
1949 stw r26,FM_ARG0+0x04(r1) ; Save a register
1950 mr r25,r6 ; Remember address of next va
1951 stw r27,FM_ARG0+0x08(r1) ; Save a register
1952 stw r28,FM_ARG0+0x0C(r1) ; Save a register
1953 stw r29,FM_ARG0+0x10(r1) ; Save a register
1954 stw r30,FM_ARG0+0x14(r1) ; Save a register
1955 stw r31,FM_ARG0+0x18(r1) ; Save a register
1956 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 1957
55e303ae
A
1958 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
1959 lwz r7,pmapvr+4(r3) ; Get the second part
1c79356b 1960
1c79356b 1961
55e303ae
A
1962 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1963
1964 mr r27,r11 ; Remember the old MSR
1965 mr r26,r12 ; Remember the feature bits
9bccf70c 1966
55e303ae 1967 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 1968
55e303ae 1969 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
1c79356b 1970
55e303ae 1971 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 1972
55e303ae
A
1973hfmSF1: mr r29,r4 ; Save top half of vaddr
1974 mr r30,r5 ; Save the bottom half
1975
1976 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1977 bl sxlkShared ; Go get a shared lock on the mapping lists
1978 mr. r3,r3 ; Did we get the lock?
1979 bne-- hfmBadLock ; Nope...
1c79356b 1980
55e303ae
A
1981 mr r3,r28 ; get the pmap address
1982 mr r4,r29 ; Get bits 0:31 to look for
1983 mr r5,r30 ; Get bits 32:64
1984
1985 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
1c79356b 1986
55e303ae
A
1987 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
1988 mr. r31,r3 ; Save the mapping if we found it
1989 cmplwi cr1,r0,0 ; Are we removing?
1990 mr r29,r4 ; Save next va high half
1991 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
1992 mr r30,r5 ; Save next va low half
1993 li r6,0 ; Assume we did not find it
1994 li r26,0xFFF ; Get a mask to relocate to start of mapping page
1c79356b 1995
55e303ae 1996 bt-- cr0_eq,hfmNotFnd ; We did not find it...
1c79356b 1997
55e303ae 1998 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 1999
55e303ae 2000 andc r4,r31,r26 ; Get back to the mapping page start
1c79356b 2001
55e303ae
A
2002; Note: we can treat 32- and 64-bit the same here. Because we are going from
2003; physical to virtual and we only do 32-bit virtual, we only need the low order
2004; word of the xor.
d7e50217 2005
55e303ae
A
2006 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2007 li r6,-1 ; Indicate we found it and it is not being removed
2008 xor r31,r31,r4 ; Flip to virtual
d7e50217 2009
55e303ae
A
2010hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2011 bl sxlkUnlock ; Unlock the search list
d7e50217 2012
55e303ae
A
2013 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2014 and r3,r3,r6 ; Clear if not found or removing
de355530 2015
55e303ae 2016hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
de355530 2017
55e303ae
A
2018 mtmsr r27 ; Restore enables/translation/etc.
2019 isync
2020 b hfmReturnC ; Join common...
2021
2022hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2023 isync
2024
2025hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2026 stw r30,4(r25) ; Save the bottom of the next va
2027 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2028 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2029 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2030 and r3,r3,r6 ; Clear return if the mapping is being removed
2031 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2032 mtlr r0 ; Restore the return
2033 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2034 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2035 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2036 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2037 lwz r1,0(r1) ; Pop the stack
2038 blr ; Leave...
2039
2040 .align 5
2041
2042hfmBadLock: li r3,1 ; Set lock time out error code
2043 b hfmReturn ; Leave....
1c79356b 2044
1c79356b
A
2045
2046/*
55e303ae
A
2047 * unsigned int hw_walk_phys(pp, preop, op, postop, parm)
2048 * walks all mapping for a physical page and performs
2049 * specified operations on each.
1c79356b 2050 *
55e303ae
A
2051 * pp is unlocked physent
2052 * preop is operation to perform on physent before walk. This would be
2053 * used to set cache attribute or protection
2054 * op is the operation to perform on each mapping during walk
2055 * postop is operation to perform in the phsyent after walk. this would be
2056 * used to set or reset the RC bits.
2057 *
2058 * We return the RC bits from before postop is run.
2059 *
2060 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 2061 *
55e303ae
A
2062 * We disable translation and all interruptions here. This keeps is
2063 * from having to worry about a deadlock due to having anything locked
2064 * and needing it to process a fault.
d7e50217 2065 *
55e303ae
A
2066 * We lock the physent, execute preop, and then walk each mapping in turn.
2067 * If there is a PTE, it is invalidated and the RC merged into the physent.
2068 * Then we call the op function.
2069 * Then we revalidate the PTE.
2070 * Once all all mappings are finished, we save the physent RC and call the
2071 * postop routine. Then we unlock the physent and return the RC.
2072 *
2073 *
1c79356b
A
2074 */
2075
1c79356b 2076 .align 5
55e303ae
A
2077 .globl EXT(hw_walk_phys)
2078
2079LEXT(hw_walk_phys)
2080 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2081 mflr r0 ; Save the link register
2082 stw r25,FM_ARG0+0x00(r1) ; Save a register
2083 stw r26,FM_ARG0+0x04(r1) ; Save a register
2084 stw r27,FM_ARG0+0x08(r1) ; Save a register
2085 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2086 mr r25,r7 ; Save the parm
2087 stw r29,FM_ARG0+0x10(r1) ; Save a register
2088 stw r30,FM_ARG0+0x14(r1) ; Save a register
2089 stw r31,FM_ARG0+0x18(r1) ; Save a register
2090 stw r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2091
2092 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2093
2094 mr r26,r11 ; Save the old MSR
2095 lis r27,hi16(hwpOpBase) ; Get high order of op base
2096 slwi r4,r4,7 ; Convert preop to displacement
2097 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2098 slwi r5,r5,7 ; Convert op to displacement
2099 add r12,r4,r27 ; Point to the preop routine
2100 slwi r28,r6,7 ; Convert postop to displacement
2101 mtctr r12 ; Set preop routine
2102 add r28,r28,r27 ; Get the address of the postop routine
2103 add r27,r5,r27 ; Get the address of the op routine
1c79356b 2104
55e303ae 2105 bl mapPhysLock ; Lock the physent
1c79356b 2106
55e303ae
A
2107 mr r29,r3 ; Save the physent address
2108
2109 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2110
2111 bctrl ; Call preop routine
2112 bne- hwpEarly32 ; preop says to bail now...
1c79356b 2113
55e303ae
A
2114 mtctr r27 ; Set up the op function address
2115 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
2116
2117hwpSrc32: rlwinm. r31,r31,0,0,25 ; Clean and test mapping address
2118 beq hwpNone32 ; Did not find one...
d7e50217 2119
55e303ae
A
2120;
2121; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2122; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2123; If there is no PTE, PTE low is obtained from mapping
2124;
2125 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2126
2127 bctrl ; Call the op function
2128
2129 crmove cr1_eq,cr0_eq ; Save the return code
2130
2131 mr. r3,r3 ; Was there a previously valid PTE?
2132 beq- hwpNxt32 ; Nope...
1c79356b 2133
55e303ae
A
2134 stw r5,4(r3) ; Store second half of PTE
2135 eieio ; Make sure we do not reorder
2136 stw r4,0(r3) ; Revalidate the PTE
2137
2138 eieio ; Make sure all updates come first
2139 stw r6,0(r7) ; Unlock the PCA
d7e50217 2140
55e303ae
A
2141hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2142 lwz r31,mpAlias+4(r31) ; Chain on to the next
2143 b hwpSrc32 ; Check it out...
1c79356b 2144
55e303ae 2145 .align 5
1c79356b 2146
55e303ae 2147hwpNone32: mtctr r28 ; Get the post routine address
1c79356b 2148
55e303ae
A
2149 lwz r30,ppLink+4(r29) ; Save the old RC
2150 mr r3,r29 ; Get the physent address
2151 bctrl ; Call post routine
1c79356b 2152
55e303ae
A
2153 bl mapPhysUnlock ; Unlock the physent
2154
2155 mtmsr r26 ; Restore translation/mode/etc.
2156 isync
1c79356b 2157
55e303ae 2158 b hwpReturn ; Go restore registers and return...
1c79356b 2159
55e303ae 2160 .align 5
1c79356b 2161
55e303ae
A
2162hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2163 mr r3,r29 ; Get the physent address
2164 bl mapPhysUnlock ; Unlock the physent
2165
2166 mtmsr r26 ; Restore translation/mode/etc.
2167 isync
2168
2169 b hwpReturn ; Go restore registers and return...
1c79356b 2170
55e303ae 2171 .align 5
1c79356b 2172
55e303ae
A
2173hwp64: bctrl ; Call preop routine
2174 bne-- hwpEarly64 ; preop says to bail now...
d7e50217 2175
55e303ae
A
2176 mtctr r27 ; Set up the op function address
2177
2178 li r0,0xFF
2179 ld r31,ppLink(r3) ; Get the pointer to the first mapping
2180 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
2181
2182hwpSrc64: andc. r31,r31,r0 ; Clean and test mapping address
2183 beq hwpNone64 ; Did not find one...
2184;
2185; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2186; PTE low in R5. PTEG comes back locked if there is one
2187;
2188 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
1c79356b 2189
55e303ae 2190 bctrl ; Call the op function
1c79356b 2191
55e303ae 2192 crmove cr1_eq,cr0_eq ; Save the return code
1c79356b 2193
55e303ae
A
2194 mr. r3,r3 ; Was there a previously valid PTE?
2195 beq-- hwpNxt64 ; Nope...
2196
2197 std r5,8(r3) ; Save bottom of PTE
2198 eieio ; Make sure we do not reorder
2199 std r4,0(r3) ; Revalidate the PTE
d7e50217 2200
55e303ae
A
2201 eieio ; Make sure all updates come first
2202 stw r6,0(r7) ; Unlock the PCA
2203
2204hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2205 ld r31,mpAlias(r31) ; Chain on to the next
2206 li r0,0xFF
2207 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
2208 b hwpSrc64 ; Check it out...
1c79356b 2209
55e303ae
A
2210 .align 5
2211
2212hwpNone64: mtctr r28 ; Get the post routine address
2213
2214 lwz r30,ppLink+4(r29) ; Save the old RC
2215 mr r3,r29 ; Get the physent address
2216 bctrl ; Call post routine
2217
2218 bl mapPhysUnlock ; Unlock the physent
2219
2220 mtmsrd r26 ; Restore translation/mode/etc.
1c79356b 2221 isync
55e303ae
A
2222 b hwpReturn ; Go restore registers and return...
2223
2224 .align 5
1c79356b 2225
55e303ae
A
2226hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2227 mr r3,r29 ; Get the physent address
2228 bl mapPhysUnlock ; Unlock the physent
2229
2230 mtmsrd r26 ; Restore translation/mode/etc.
2231 isync
2232
2233hwpReturn: lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2234 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2235 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2236 mr r3,r30 ; Pass back the RC
2237 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2238 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2239 mtlr r0 ; Restore the return
2240 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2241 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2242 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2243 lwz r1,0(r1) ; Pop the stack
2244 blr ; Leave...
d7e50217 2245
d7e50217 2246
55e303ae
A
2247;
2248; The preop/op/postop function table.
2249; Each function must be 64-byte aligned and be no more than
2250; 16 instructions. If more than 16, we must fix address calculations
2251; at the start of hwpOpBase
2252;
2253; The routine must set CR0_EQ in order to continue scan.
2254; If CR0_EQ is not set, an early return from the function is made.
2255;
d7e50217 2256
55e303ae
A
2257 .align 7
2258
2259hwpOpBase:
2260
2261; Function 0 - No operation
2262
2263hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2264 blr ; Just return...
1c79356b
A
2265
2266 .align 5
1c79356b 2267
55e303ae 2268; This is the continuation of function 4 - Set attributes in mapping
1c79356b 2269
55e303ae
A
2270; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2271; NOTE: Do we have to deal with i-cache here?
2272
2273hwpSAM: li r11,4096 ; Get page size
d7e50217 2274
55e303ae
A
2275hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2276 dcbf r11,r5 ; Flush the line in the data cache
2277 bgt++ hwpSAMinvd ; Go do the rest of it...
2278
2279 sync ; Make sure it is done
1c79356b 2280
55e303ae
A
2281 li r11,4096 ; Get page size
2282
2283hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2284 icbi r11,r5 ; Flush the line in the icache
2285 bgt++ hwpSAMinvi ; Go do the rest of it...
2286
2287 sync ; Make sure it is done
1c79356b 2288
55e303ae
A
2289 cmpw r0,r0 ; Make sure we return CR0_EQ
2290 blr ; Return...
1c79356b 2291
1c79356b 2292
55e303ae 2293; Function 1 - Set protection in physent
1c79356b 2294
55e303ae
A
2295 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2296
2297hwpSPrtPhy: li r5,ppLink+4 ; Get offset for flag part of physent
d7e50217 2298
55e303ae
A
2299hwpSPrtPhX: lwarx r4,r5,r29 ; Get the old flags
2300 rlwimi r4,r25,0,ppPPb-32,ppPPe-32 ; Stick in the new protection
2301 stwcx. r4,r5,r29 ; Try to stuff it
2302 bne-- hwpSPrtPhX ; Try again...
2303; Note: CR0_EQ is set because of stwcx.
2304 blr ; Return...
1c79356b 2305
1c79356b 2306
55e303ae 2307; Function 2 - Set protection in mapping
1c79356b 2308
55e303ae 2309 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
1c79356b 2310
55e303ae
A
2311hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2312 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2313 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2314 li r0,lo16(mpPP) ; Get protection bits
2315 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2316 rlwinm r2,r25,0,mpPPb-32,mpPPb-32+2 ; Position new protection
2317 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2318 andc r5,r5,r0 ; Clear the old prot bits
2319 or r5,r5,r2 ; Move in the prot bits
2320 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2321 cmpw r0,r0 ; Make sure we return CR0_EQ
2322 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2323 blr ; Leave...
2324
2325; Function 3 - Set attributes in physent
1c79356b 2326
55e303ae 2327 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
1c79356b 2328
55e303ae 2329hwpSAtrPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2330
55e303ae
A
2331hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
2332 rlwimi r4,r25,0,ppIb-32,ppGb-32 ; Stick in the new attributes
2333 stwcx. r4,r5,r29 ; Try to stuff it
2334 bne-- hwpSAtrPhX ; Try again...
2335; Note: CR0_EQ is set because of stwcx.
2336 blr ; Return...
de355530 2337
55e303ae 2338; Function 4 - Set attributes in mapping
d7e50217 2339
55e303ae
A
2340 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2341
2342hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2343 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2344 li r2,0x10 ; Force on coherent
2345 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2346 li r0,lo16(mpWIMG) ; Get wimg mask
2347 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2348 rlwimi r2,r2,mpIb-ppIb,mpIb-32,mpIb-32 ; Copy in the cache inhibited bit
2349 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2350 andc r5,r5,r0 ; Clear the old wimg
2351 rlwimi r2,r2,32-(mpGb-ppGb),mpGb-32,mpGb-32 ; Copy in the guarded bit
2352 mfsprg r9,2 ; Feature flags
2353 or r5,r5,r2 ; Move in the new wimg
2354 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2355 lwz r2,mpPAddr(r31) ; Get the physical address
2356 li r0,0xFFF ; Start a mask
2357 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2358 rlwinm r5,r0,0,1,0 ; Copy to top half
2359 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2360 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2361 and r5,r5,r2 ; Clean stuff in top 32 bits
2362 andc r2,r2,r0 ; Clean bottom too
2363 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2364 b hwpSAM ; Join common
1c79356b 2365
55e303ae
A
2366; NOTE: we moved the remainder of the code out of here because it
2367; did not fit in the 128 bytes allotted. It got stuck into the free space
2368; at the end of the no-op function.
2369
2370
2371
de355530 2372
55e303ae 2373; Function 5 - Clear reference in physent
1c79356b 2374
55e303ae 2375 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
1c79356b 2376
55e303ae 2377hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2378
55e303ae
A
2379hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
2380 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
2381 stwcx. r4,r5,r29 ; Try to stuff it
2382 bne-- hwpCRefPhX ; Try again...
2383; Note: CR0_EQ is set because of stwcx.
2384 blr ; Return...
1c79356b
A
2385
2386
55e303ae 2387; Function 6 - Clear reference in mapping
1c79356b 2388
55e303ae 2389 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
1c79356b 2390
55e303ae
A
2391hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2392 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2393 andc r5,r5,r0 ; Clear in PTE copy
2394 andc r8,r8,r0 ; and in the mapping
2395 cmpw r0,r0 ; Make sure we return CR0_EQ
2396 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2397 blr ; Return...
1c79356b 2398
de355530 2399
55e303ae 2400; Function 7 - Clear change in physent
1c79356b 2401
55e303ae 2402 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
1c79356b 2403
55e303ae 2404hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2405
55e303ae
A
2406hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2407 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2408 stwcx. r4,r5,r29 ; Try to stuff it
2409 bne-- hwpCCngPhX ; Try again...
2410; Note: CR0_EQ is set because of stwcx.
2411 blr ; Return...
1c79356b 2412
de355530 2413
55e303ae 2414; Function 8 - Clear change in mapping
1c79356b 2415
55e303ae
A
2416 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2417
2418hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2419 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2420 andc r5,r5,r0 ; Clear in PTE copy
2421 andc r8,r8,r0 ; and in the mapping
2422 cmpw r0,r0 ; Make sure we return CR0_EQ
2423 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2424 blr ; Return...
d7e50217 2425
de355530 2426
55e303ae 2427; Function 9 - Set reference in physent
d7e50217 2428
55e303ae 2429 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
d7e50217 2430
55e303ae
A
2431hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2432
2433hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
2434 ori r4,r4,lo16(ppR) ; Set the reference
2435 stwcx. r4,r5,r29 ; Try to stuff it
2436 bne-- hwpSRefPhX ; Try again...
2437; Note: CR0_EQ is set because of stwcx.
2438 blr ; Return...
d7e50217 2439
1c79356b 2440
55e303ae 2441; Function 10 - Set reference in mapping
d7e50217 2442
55e303ae
A
2443 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
2444
2445hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2446 ori r5,r5,lo16(mpR) ; Set reference in PTE low
2447 ori r8,r8,lo16(mpR) ; Set reference in mapping
2448 cmpw r0,r0 ; Make sure we return CR0_EQ
2449 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2450 blr ; Return...
2451
2452; Function 11 - Set change in physent
1c79356b 2453
55e303ae 2454 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
1c79356b 2455
55e303ae 2456hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2457
55e303ae
A
2458hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
2459 ori r4,r4,lo16(ppC) ; Set the change bit
2460 stwcx. r4,r5,r29 ; Try to stuff it
2461 bne-- hwpSCngPhX ; Try again...
2462; Note: CR0_EQ is set because of stwcx.
2463 blr ; Return...
de355530 2464
55e303ae 2465; Function 12 - Set change in mapping
1c79356b 2466
55e303ae 2467 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
1c79356b 2468
55e303ae
A
2469hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2470 ori r5,r5,lo16(mpC) ; Set change in PTE low
2471 ori r8,r8,lo16(mpC) ; Set chage in mapping
2472 cmpw r0,r0 ; Make sure we return CR0_EQ
2473 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2474 blr ; Return...
1c79356b 2475
55e303ae 2476; Function 13 - Test reference in physent
1c79356b 2477
55e303ae
A
2478 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
2479
2480hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
2481 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
2482 blr ; Return (CR0_EQ set to continue if reference is off)...
1c79356b 2483
1c79356b 2484
55e303ae 2485; Function 14 - Test reference in mapping
1c79356b 2486
55e303ae 2487 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
de355530 2488
55e303ae
A
2489hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
2490 blr ; Return (CR0_EQ set to continue if reference is off)...
2491
2492; Function 15 - Test change in physent
1c79356b 2493
55e303ae 2494 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
1c79356b 2495
55e303ae
A
2496hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
2497 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
2498 blr ; Return (CR0_EQ set to continue if reference is off)...
2499
2500
2501; Function 16 - Test change in mapping
2502
2503 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
d7e50217 2504
55e303ae
A
2505hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
2506 blr ; Return (CR0_EQ set to continue if reference is off)...
2507
2508 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
2509
d7e50217 2510
d7e50217 2511
de355530 2512;
55e303ae
A
2513; int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
2514;
2515; Returns:
2516; mapRtOK - if all is ok
2517; mapRtBadLk - if mapping lock fails
2518; mapRtPerm - if mapping is permanent
2519; mapRtNotFnd - if mapping is not found
2520; mapRtBlock - if mapping is a block
de355530 2521;
55e303ae
A
2522 .align 5
2523 .globl EXT(hw_protect)
d7e50217 2524
55e303ae
A
2525LEXT(hw_protect)
2526 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2527 mflr r0 ; Save the link register
2528 stw r24,FM_ARG0+0x00(r1) ; Save a register
2529 stw r25,FM_ARG0+0x04(r1) ; Save a register
2530 mr r25,r7 ; Remember address of next va
2531 stw r26,FM_ARG0+0x08(r1) ; Save a register
2532 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2533 stw r28,FM_ARG0+0x10(r1) ; Save a register
2534 mr r24,r6 ; Save the new protection flags
2535 stw r29,FM_ARG0+0x14(r1) ; Save a register
2536 stw r30,FM_ARG0+0x18(r1) ; Save a register
2537 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2538 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 2539
55e303ae
A
2540 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2541 lwz r7,pmapvr+4(r3) ; Get the second part
d7e50217 2542
d7e50217 2543
55e303ae 2544 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 2545
55e303ae
A
2546 mr r27,r11 ; Remember the old MSR
2547 mr r26,r12 ; Remember the feature bits
9bccf70c 2548
55e303ae 2549 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 2550
55e303ae
A
2551 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
2552
2553 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
9bccf70c 2554
55e303ae
A
2555hpSF1: mr r29,r4 ; Save top half of vaddr
2556 mr r30,r5 ; Save the bottom half
2557
2558 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2559 bl sxlkShared ; Go get a shared lock on the mapping lists
2560 mr. r3,r3 ; Did we get the lock?
2561 bne-- hpBadLock ; Nope...
d7e50217 2562
55e303ae
A
2563 mr r3,r28 ; get the pmap address
2564 mr r4,r29 ; Get bits 0:31 to look for
2565 mr r5,r30 ; Get bits 32:64
de355530 2566
55e303ae 2567 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
d7e50217 2568
55e303ae
A
2569 andi. r7,r7,lo16(mpSpecial|mpNest|mpPerm|mpBlock|mpRIP) ; Are we allowed to change it or is it being removed?
2570 mr. r31,r3 ; Save the mapping if we found it
2571 cmplwi cr1,r7,0 ; Anything special going on?
2572 mr r29,r4 ; Save next va high half
2573 mr r30,r5 ; Save next va low half
d7e50217 2574
55e303ae 2575 beq-- hpNotFound ; Not found...
de355530 2576
55e303ae 2577 bne-- cr1,hpNotAllowed ; Something special is happening...
d7e50217 2578
55e303ae
A
2579 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
2580
2581 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
2582
2583 rlwimi r5,r24,0,mpPPb-32,mpPPb-32+2 ; Stick in the new pp
2584 mr. r3,r3 ; Was there a previously valid PTE?
2585
2586 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
2587
2588 beq-- hpNoOld32 ; Nope...
1c79356b 2589
55e303ae
A
2590 stw r5,4(r3) ; Store second half of PTE
2591 eieio ; Make sure we do not reorder
2592 stw r4,0(r3) ; Revalidate the PTE
2593
2594 eieio ; Make sure all updates come first
2595 stw r6,0(r7) ; Unlock PCA
2596
2597hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2598 bl sxlkUnlock ; Unlock the search list
de355530 2599
55e303ae
A
2600 li r3,mapRtOK ; Set normal return
2601 b hpR32 ; Join common...
2602
2603 .align 5
1c79356b 2604
d7e50217 2605
55e303ae
A
2606hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2607
2608 rldimi r5,r24,0,mpPPb ; Stick in the new pp
2609 mr. r3,r3 ; Was there a previously valid PTE?
2610
2611 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
2612
2613 beq-- hpNoOld64 ; Nope...
d7e50217 2614
55e303ae
A
2615 std r5,8(r3) ; Store second half of PTE
2616 eieio ; Make sure we do not reorder
2617 std r4,0(r3) ; Revalidate the PTE
de355530 2618
55e303ae
A
2619 eieio ; Make sure all updates come first
2620 stw r6,0(r7) ; Unlock PCA
de355530 2621
55e303ae
A
2622hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2623 bl sxlkUnlock ; Unlock the search list
de355530 2624
55e303ae
A
2625 li r3,mapRtOK ; Set normal return
2626 b hpR64 ; Join common...
de355530 2627
55e303ae
A
2628 .align 5
2629
2630hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
2631
2632hpR32: mtmsr r27 ; Restore enables/translation/etc.
2633 isync
2634 b hpReturnC ; Join common...
2635
2636hpR64: mtmsrd r27 ; Restore enables/translation/etc.
2637 isync
2638
2639hpReturnC: stw r29,0(r25) ; Save the top of the next va
2640 stw r30,4(r25) ; Save the bottom of the next va
2641 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2642 lwz r24,FM_ARG0+0x00(r1) ; Save a register
2643 lwz r25,FM_ARG0+0x04(r1) ; Save a register
2644 lwz r26,FM_ARG0+0x08(r1) ; Save a register
2645 mtlr r0 ; Restore the return
2646 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
2647 lwz r28,FM_ARG0+0x10(r1) ; Save a register
2648 lwz r29,FM_ARG0+0x14(r1) ; Save a register
2649 lwz r30,FM_ARG0+0x18(r1) ; Save a register
2650 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
2651 lwz r1,0(r1) ; Pop the stack
2652 blr ; Leave...
2653
2654 .align 5
2655
2656hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
2657 b hpReturn ; Leave....
d7e50217 2658
55e303ae
A
2659hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2660 bl sxlkUnlock ; Unlock the search list
d7e50217 2661
55e303ae
A
2662 li r3,mapRtNotFnd ; Set that we did not find the requested page
2663 b hpReturn ; Leave....
2664
2665hpNotAllowed:
2666 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
2667 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2668 bne-- hpNotFound ; Yeah...
2669 bl sxlkUnlock ; Unlock the search list
2670
2671 li r3,mapRtBlock ; Assume it was a block
2672 andi. r7,r7,lo16(mpBlock) ; Is this a block?
2673 bne++ hpReturn ; Yes, leave...
2674
2675 li r3,mapRtPerm ; Set that we hit a permanent page
2676 b hpReturn ; Leave....
9bccf70c 2677
9bccf70c 2678
55e303ae
A
2679;
2680; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
2681;
2682; Returns following code ORed with RC from mapping
2683; mapRtOK - if all is ok
2684; mapRtBadLk - if mapping lock fails
2685; mapRtNotFnd - if mapping is not found
2686;
2687 .align 5
2688 .globl EXT(hw_test_rc)
9bccf70c 2689
55e303ae
A
2690LEXT(hw_test_rc)
2691 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2692 mflr r0 ; Save the link register
2693 stw r24,FM_ARG0+0x00(r1) ; Save a register
2694 stw r25,FM_ARG0+0x04(r1) ; Save a register
2695 stw r26,FM_ARG0+0x08(r1) ; Save a register
2696 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2697 stw r28,FM_ARG0+0x10(r1) ; Save a register
2698 mr r24,r6 ; Save the reset request
2699 stw r29,FM_ARG0+0x14(r1) ; Save a register
2700 stw r30,FM_ARG0+0x18(r1) ; Save a register
2701 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2702 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
9bccf70c 2703
55e303ae
A
2704 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2705 lwz r7,pmapvr+4(r3) ; Get the second part
0b4e3aa0 2706
9bccf70c 2707
55e303ae 2708 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 2709
55e303ae
A
2710 mr r27,r11 ; Remember the old MSR
2711 mr r26,r12 ; Remember the feature bits
9bccf70c 2712
55e303ae 2713 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 2714
55e303ae 2715 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
1c79356b 2716
55e303ae 2717 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 2718
55e303ae
A
2719htrSF1: mr r29,r4 ; Save top half of vaddr
2720 mr r30,r5 ; Save the bottom half
1c79356b 2721
55e303ae
A
2722 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2723 bl sxlkShared ; Go get a shared lock on the mapping lists
2724 mr. r3,r3 ; Did we get the lock?
2725 li r25,0 ; Clear RC
2726 bne-- htrBadLock ; Nope...
2727
2728 mr r3,r28 ; get the pmap address
2729 mr r4,r29 ; Get bits 0:31 to look for
2730 mr r5,r30 ; Get bits 32:64
d7e50217 2731
55e303ae 2732 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
9bccf70c 2733
55e303ae
A
2734 andi. r0,r7,lo16(mpSpecial|mpNest|mpPerm|mpBlock|mpRIP) ; Are we allowed to change it or is it being removed?
2735 mr. r31,r3 ; Save the mapping if we found it
2736 cmplwi cr1,r0,0 ; Are we removing it?
2737 crorc cr0_eq,cr0_eq,cr1_eq ; Did we not find it or is it being removed?
d7e50217 2738
55e303ae 2739 bt-- cr0_eq,htrNotFound ; Not found, something special, or being removed...
1c79356b 2740
55e303ae
A
2741 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
2742
2743 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
2744
2745 cmplwi cr1,r24,0 ; Do we want to clear RC?
2746 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
2747 mr. r3,r3 ; Was there a previously valid PTE?
2748 li r0,lo16(mpR|mpC) ; Get bits to clear
9bccf70c 2749
55e303ae
A
2750 and r25,r5,r0 ; Save the RC bits
2751 beq++ cr1,htrNoClr32 ; Nope...
2752
2753 andc r12,r12,r0 ; Clear mapping copy of RC
2754 andc r5,r5,r0 ; Clear PTE copy of RC
2755 sth r12,mpVAddr+6(r31) ; Set the new RC
9bccf70c 2756
55e303ae 2757htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
d7e50217 2758
55e303ae
A
2759 sth r5,6(r3) ; Store updated RC
2760 eieio ; Make sure we do not reorder
2761 stw r4,0(r3) ; Revalidate the PTE
9bccf70c 2762
55e303ae
A
2763 eieio ; Make sure all updates come first
2764 stw r6,0(r7) ; Unlock PCA
1c79356b 2765
55e303ae
A
2766htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2767 bl sxlkUnlock ; Unlock the search list
2768 li r3,mapRtOK ; Set normal return
2769 b htrR32 ; Join common...
1c79356b 2770
55e303ae
A
2771 .align 5
2772
2773
2774htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2775
2776 cmplwi cr1,r24,0 ; Do we want to clear RC?
2777 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
2778 mr. r3,r3 ; Was there a previously valid PTE?
2779 li r0,lo16(mpR|mpC) ; Get bits to clear
1c79356b 2780
55e303ae
A
2781 and r25,r5,r0 ; Save the RC bits
2782 beq++ cr1,htrNoClr64 ; Nope...
2783
2784 andc r12,r12,r0 ; Clear mapping copy of RC
2785 andc r5,r5,r0 ; Clear PTE copy of RC
2786 sth r12,mpVAddr+6(r31) ; Set the new RC
1c79356b 2787
55e303ae
A
2788htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
2789
2790 sth r5,14(r3) ; Store updated RC
2791 eieio ; Make sure we do not reorder
2792 std r4,0(r3) ; Revalidate the PTE
1c79356b 2793
55e303ae
A
2794 eieio ; Make sure all updates come first
2795 stw r6,0(r7) ; Unlock PCA
1c79356b 2796
55e303ae
A
2797htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2798 bl sxlkUnlock ; Unlock the search list
2799 li r3,mapRtOK ; Set normal return
2800 b htrR64 ; Join common...
de355530 2801
55e303ae
A
2802 .align 5
2803
2804htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
de355530 2805
55e303ae
A
2806htrR32: mtmsr r27 ; Restore enables/translation/etc.
2807 isync
2808 b htrReturnC ; Join common...
de355530 2809
55e303ae
A
2810htrR64: mtmsrd r27 ; Restore enables/translation/etc.
2811 isync
1c79356b 2812
55e303ae
A
2813htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2814 or r3,r3,r25 ; Send the RC bits back
2815 lwz r24,FM_ARG0+0x00(r1) ; Save a register
2816 lwz r25,FM_ARG0+0x04(r1) ; Save a register
2817 lwz r26,FM_ARG0+0x08(r1) ; Save a register
2818 mtlr r0 ; Restore the return
2819 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
2820 lwz r28,FM_ARG0+0x10(r1) ; Save a register
2821 lwz r29,FM_ARG0+0x14(r1) ; Save a register
2822 lwz r30,FM_ARG0+0x18(r1) ; Save a register
2823 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
2824 lwz r1,0(r1) ; Pop the stack
1c79356b
A
2825 blr ; Leave...
2826
2827 .align 5
2828
55e303ae
A
2829htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
2830 b htrReturn ; Leave....
1c79356b 2831
55e303ae
A
2832htrNotFound:
2833 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2834 bl sxlkUnlock ; Unlock the search list
1c79356b 2835
55e303ae
A
2836 li r3,mapRtNotFnd ; Set that we did not find the requested page
2837 b htrReturn ; Leave....
2838
2839
2840
2841;
2842; mapPhysFindLock - find physent list and lock it
2843; R31 points to mapping
2844;
2845 .align 5
2846
2847mapPhysFindLock:
2848 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
2849 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
2850 rlwinm r4,r4,2,0,29 ; Change index into byte offset
2851 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
2852 add r3,r3,r4 ; Point to table entry
2853 lwz r5,mpPAddr(r31) ; Get physical page number
2854 lwz r7,mrStart(r3) ; Get the start of range
2855 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
2856 sub r6,r5,r7 ; Get index to physent
2857 rlwinm r6,r6,3,0,28 ; Get offset to physent
2858 add r3,r3,r6 ; Point right to the physent
2859 b mapPhysLock ; Join in the lock...
2860
2861;
2862; mapPhysLock - lock a physent list
2863; R3 contains list header
2864;
2865 .align 5
2866
2867mapPhysLockS:
2868 li r2,lgKillResv ; Get a spot to kill reservation
2869 stwcx. r2,0,r2 ; Kill it...
2870
2871mapPhysLockT:
2872 lwz r2,ppLink(r3) ; Get physent chain header
2873 rlwinm. r2,r2,0,0,0 ; Is lock clear?
2874 bne-- mapPhysLockT ; Nope, still locked...
2875
2876mapPhysLock:
2877 lwarx r2,0,r3 ; Get the lock
2878 rlwinm. r0,r2,0,0,0 ; Is it locked?
2879 oris r0,r2,0x8000 ; Set the lock bit
2880 bne-- mapPhysLockS ; It is locked, spin on it...
2881 stwcx. r0,0,r3 ; Try to stuff it back...
2882 bne-- mapPhysLock ; Collision, try again...
2883 isync ; Clear any speculations
2884 blr ; Leave...
2885
2886
2887;
2888; mapPhysUnlock - unlock a physent list
2889; R3 contains list header
2890;
2891 .align 5
2892
2893mapPhysUnlock:
2894 lwz r0,ppLink(r3) ; Get physent chain header
2895 rlwinm r0,r0,0,1,31 ; Clear the lock bit
2896 eieio ; Make sure unlock comes last
2897 stw r0,ppLink(r3) ; Unlock the list
2898 blr
2899
2900;
2901; mapPhysMerge - merge the RC bits into the master copy
2902; R3 points to the physent
2903; R4 contains the RC bits
2904;
2905; Note: we just return if RC is 0
2906;
2907 .align 5
2908
2909mapPhysMerge:
2910 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
2911 la r5,ppLink+4(r3) ; Point to the RC field
2912 beqlr-- ; Leave if RC is 0...
2913
2914mapPhysMergeT:
2915 lwarx r6,0,r5 ; Get the RC part
2916 or r6,r6,r4 ; Merge in the RC
2917 stwcx. r6,0,r5 ; Try to stuff it back...
2918 bne-- mapPhysMergeT ; Collision, try again...
2919 blr ; Leave...
2920
2921;
2922; Sets the physent link pointer and preserves all flags
2923; The list is locked
2924; R3 points to physent
2925; R4 has link to set
2926;
2927
2928 .align 5
2929
2930mapPhyCSet32:
2931 la r5,ppLink+4(r3) ; Point to the link word
2932
2933mapPhyCSetR:
2934 lwarx r2,0,r5 ; Get the link and flags
2935 rlwimi r4,r2,0,26,31 ; Insert the flags
2936 stwcx. r4,0,r5 ; Stick them back
2937 bne-- mapPhyCSetR ; Someone else did something, try again...
2938 blr ; Return...
2939
2940 .align 5
2941
2942mapPhyCSet64:
2943 li r0,0xFF ; Get mask to clean up mapping pointer
2944 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
2945
2946mapPhyCSet64x:
2947 ldarx r2,0,r3 ; Get the link and flags
2948 and r5,r2,r0 ; Isolate the flags
2949 or r6,r4,r5 ; Add them to the link
2950 stdcx. r6,0,r3 ; Stick them back
2951 bne-- mapPhyCSet64x ; Someone else did something, try again...
2952 blr ; Return...
2953
2954;
2955; mapBumpBusy - increment the busy count on a mapping
2956; R3 points to mapping
2957;
2958
2959 .align 5
2960
2961mapBumpBusy:
2962 lwarx r4,0,r3 ; Get mpBusy
2963 addis r4,r4,0x0100 ; Bump the busy count
2964 stwcx. r4,0,r3 ; Save it back
2965 bne-- mapBumpBusy ; This did not work, try again...
2966 blr ; Leave...
2967
2968;
2969; mapDropBusy - increment the busy count on a mapping
2970; R3 points to mapping
2971;
2972
2973 .globl EXT(mapping_drop_busy)
2974 .align 5
2975
2976LEXT(mapping_drop_busy)
2977mapDropBusy:
2978 lwarx r4,0,r3 ; Get mpBusy
2979 addis r4,r4,0xFF00 ; Drop the busy count
2980 stwcx. r4,0,r3 ; Save it back
2981 bne-- mapDropBusy ; This did not work, try again...
2982 blr ; Leave...
2983
2984;
2985; mapDrainBusy - drain the busy count on a mapping
2986; R3 points to mapping
2987; Note: we already have a busy for ourselves. Only one
2988; busy per processor is allowed, so we just spin here
2989; waiting for the count to drop to 1.
2990; Also, the mapping can not be on any lists when we do this
2991; so all we are doing is waiting until it can be released.
2992;
2993
2994 .align 5
2995
2996mapDrainBusy:
2997 lwz r4,mpFlags(r3) ; Get mpBusy
2998 rlwinm r4,r4,8,24,31 ; Clean it up
2999 cmplwi r4,1 ; Is is just our busy?
3000 beqlr++ ; Yeah, it is clear...
3001 b mapDrainBusy ; Try again...
3002
3003
3004
3005;
3006; handleDSeg - handle a data segment fault
3007; handleISeg - handle an instruction segment fault
3008;
3009; All that we do here is to map these to DSI or ISI and insure
3010; that the hash bit is not set. This forces the fault code
3011; to also handle the missing segment.
3012;
3013; At entry R2 contains per_proc, R13 contains savarea pointer,
3014; and R11 is the exception code.
3015;
3016
3017 .align 5
3018 .globl EXT(handleDSeg)
3019
3020LEXT(handleDSeg)
3021
3022 li r11,T_DATA_ACCESS ; Change fault to DSI
3023 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3024 b EXT(handlePF) ; Join common...
3025
3026 .align 5
3027 .globl EXT(handleISeg)
3028
3029LEXT(handleISeg)
3030
3031 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3032 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3033 b EXT(handlePF) ; Join common...
3034
3035
3036/*
3037 * handlePF - handle a page fault interruption
3038 *
3039 * At entry R2 contains per_proc, R13 contains savarea pointer,
3040 * and R11 is the exception code.
3041 *
3042 * This first part does a quick check to see if we can handle the fault.
3043 * We canot handle any kind of protection exceptions here, so we pass
3044 * them up to the next level.
3045 *
3046 * NOTE: In order for a page-fault redrive to work, the translation miss
3047 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3048 * before we come here.
3049 */
3050
3051 .align 5
3052 .globl EXT(handlePF)
3053
3054LEXT(handlePF)
3055
3056 mfsprg r12,2 ; Get feature flags
3057 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3058 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3059 mtcrf 0x02,r12 ; move pf64Bit to cr6
3060 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3061 lwz r18,SAVflags(r13) ; Get the flags
3062
3063 beq-- gotIfetch ; We have an IFETCH here...
3064
3065 lwz r27,savedsisr(r13) ; Get the DSISR
3066 lwz r29,savedar(r13) ; Get the first half of the DAR
3067 lwz r30,savedar+4(r13) ; And second half
3068
3069 b ckIfProt ; Go check if this is a protection fault...
3070
3071gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3072 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3073 lwz r30,savesrr0+4(r13) ; And second half
3074 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3075
3076ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3077 li r20,64 ; Set a limit of 64 nests for sanity check
3078 bne-- hpfExit ; Yes... (probably not though)
3079
3080;
3081; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3082; should be loading the user pmap here.
3083;
3084
3085 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3086 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3087 mr r19,r2 ; Remember the per_proc
3088 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3089 mr r23,r30 ; Save the low part of faulting address
3090 beq-- hpfInKern ; Skip if we are in the kernel
3091 la r8,ppUserPmap(r19) ; Point to the current user pmap
3092
3093hpfInKern: mr r22,r29 ; Save the high part of faulting address
3094
3095 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3096
3097;
3098; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3099; predefined value that corresponds to no address space. When we see that value
3100; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3101; cause the proper SR to be loaded.
3102;
3103
3104 lwz r28,4(r8) ; Pick up the pmap
3105 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3106 mr r25,r28 ; Save the original pmap (in case we nest)
3107 bne hpfNest ; Segs are not ours if so...
3108 mfsrin r4,r30 ; Get the SR that was used for translation
3109 cmplwi r4,invalSpace ; Is this a simulated segment fault?
3110 bne++ hpfNest ; No...
3111
3112 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
3113 b hpfNest ; Join on up...
3114
3115 .align 5
3116
3117 nop ; Push hpfNest to a 32-byte boundary
3118 nop ; Push hpfNest to a 32-byte boundary
3119 nop ; Push hpfNest to a 32-byte boundary
3120 nop ; Push hpfNest to a 32-byte boundary
3121 nop ; Push hpfNest to a 32-byte boundary
3122 nop ; Push hpfNest to a 32-byte boundary
3123
3124hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3125 mr r25,r28 ; Save the original pmap (in case we nest)
3126
3127;
3128; This is where we loop descending nested pmaps
3129;
3130
3131hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3132 addi r20,r20,-1 ; Count nest try
3133 bl sxlkShared ; Go get a shared lock on the mapping lists
3134 mr. r3,r3 ; Did we get the lock?
3135 bne-- hpfBadLock ; Nope...
3136
3137 mr r3,r28 ; Get the pmap pointer
3138 mr r4,r22 ; Get top of faulting vaddr
3139 mr r5,r23 ; Get bottom of faulting vaddr
3140 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3141
3142 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3143 mr. r31,r3 ; Save the mapping if we found it
3144 cmplwi cr1,r0,0 ; Check for removal
3145 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3146
3147 bt-- cr0_eq,hpfNotFound ; Not found or removing...
3148
3149 rlwinm. r0,r7,0,mpNestb,mpNestb ; Are we nested?
3150 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3151
3152 lhz r21,mpSpace(r31) ; Get the space
3153
3154 beq++ hpfFoundIt ; No, we found our guy...
3155
3156
3157#if pmapTransSize != 12
3158#error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3159#endif
3160 rlwinm. r0,r26,0,mpSpecialb,mpSpecialb ; Special handling?
3161 cmplwi cr1,r20,0 ; Too many nestings?
3162 bne-- hpfSpclNest ; Do we need to do special handling?
3163
3164hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3165 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3166 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3167 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3168 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3169 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3170 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3171 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3172 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3173 slwi r11,r21,3 ; Multiply space by 8
3174 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3175 addc r23,r23,r9 ; Relocate bottom half of vaddr
3176 lwz r10,0(r10) ; Get the actual translation map
3177 slwi r12,r21,2 ; Multiply space by 4
3178 add r10,r10,r11 ; Add in the higher part of the index
3179 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3180 adde r22,r22,r8 ; Relocate the top half of the vaddr
3181 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3182 bl sxlkUnlock ; Unlock the search list
3183
3184 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3185 bf-- pf64Bitb,hpfNest ; Done if 32-bit...
3186
3187 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3188 b hpfNest ; Go try the new pmap...
3189
3190;
3191; Error condition. We only allow 64 nestings. This keeps us from having to
3192; check for recusive nests when we install them.
3193;
3194
3195 .align 5
3196
3197hpfNestTooMuch:
3198 lwz r20,savedsisr(r13) ; Get the DSISR
3199 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3200 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3201 ori r20,r20,1 ; Indicate that there was a nesting problem
3202 stw r20,savedsisr(r13) ; Stash it
3203 lwz r11,saveexception(r13) ; Restore the exception code
3204 b EXT(PFSExit) ; Yes... (probably not though)
3205
3206;
3207; Error condition - lock failed - this is fatal
3208;
3209
3210 .align 5
3211
3212hpfBadLock:
3213 lis r0,hi16(Choke) ; System abend
3214 ori r0,r0,lo16(Choke) ; System abend
3215 li r3,failMapping ; Show mapping failure
3216 sc
3217;
3218; Did not find any kind of mapping
3219;
3220
3221 .align 5
3222
3223hpfNotFound:
3224 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3225 bl sxlkUnlock ; Unlock it
3226 lwz r11,saveexception(r13) ; Restore the exception code
3227
3228hpfExit: ; We need this because we can not do a relative branch
3229 b EXT(PFSExit) ; Yes... (probably not though)
3230
3231
3232;
3233; Here is where we handle special mappings. So far, the only use is to load a
3234; processor specific segment register for copy in/out handling.
3235;
3236; The only (so far implemented) special map is used for copyin/copyout.
3237; We keep a mapping of a "linkage" mapping in the per_proc.
3238; The linkage mapping is basically a nested pmap that is switched in
3239; as part of context switch. It relocates the appropriate user address
3240; space slice into the right place in the kernel.
3241;
3242
3243 .align 5
3244
3245hpfSpclNest:
3246 la r31,ppCIOmp(r19) ; Just point to the mapping
3247 oris r27,r27,hi16(dsiSpcNest) ; Show that we had a special nesting here
3248 b hpfCSrch ; Go continue search...
3249
3250
3251;
3252; We have now found a mapping for the address we faulted on.
3253;
3254
3255;
3256; Here we go about calculating what the VSID should be. We concatanate
3257; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3258; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3259; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3260; the VSID.
3261;
3262; This is used both for segment handling and PTE handling
3263;
3264
3265
3266#if maxAdrSpb != 14
3267#error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3268#endif
3269
3270 .align 5
3271
3272hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
3273 rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
3274 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3275 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3276 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
3277 rlwinm r0,r27,0,dsiSpcNestb,dsiSpcNestb ; Isolate special nest flag
3278 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
3279 cmplwi cr5,r0,0 ; Did we just do a special nesting?
3280 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
3281 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
3282 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
3283 xor r14,r14,r20 ; Calculate the top half of VSID
3284 xor r15,r15,r21 ; Calculate the bottom half of the VSID
3285 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
3286 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
3287 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
3288 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
3289 or r12,r12,r15 ; Add key into the bottom of VSID
3290;
3291; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
3292
3293 bne++ hpfPteMiss ; Nope, normal PTE miss...
3294
3295;
3296; Here is the only place that we make an entry in the pmap segment cache.
3297;
3298; Note that we do not make an entry in the segment cache for special
3299; nested mappings. This makes the copy in/out segment get refreshed
3300; when switching threads.
3301;
3302; The first thing that we do is to look up the ESID we are going to load
3303; into a segment in the pmap cache. If it is already there, this is
3304; a segment that appeared since the last time we switched address spaces.
3305; If all is correct, then it was another processors that made the cache
3306; entry. If not, well, it is an error that we should die on, but I have
3307; not figured a good way to trap it yet.
3308;
3309; If we get a hit, we just bail, otherwise, lock the pmap cache, select
3310; an entry based on the generation number, update the cache entry, and
3311; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
3312; entries that correspond to the last 4 bits (32:35 for 64-bit and
3313; 0:3 for 32-bit) of the ESID.
3314;
3315; Then we unlock and bail.
3316;
3317; First lock it. Then select a free slot or steal one based on the generation
3318; number. Then store it, update the allocation flags, and unlock.
3319;
3320; The cache entry contains an image of the ESID/VSID pair we would load for
3321; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
3322;
3323; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
3324; the current one, which may have changed because we nested.
3325;
3326; Also remember that we do not store the valid bit in the ESID. If we
3327; od, this will break some other stuff.
3328;
3329
3330 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
3331
3332 mr r3,r25 ; Point to the pmap
3333 mr r4,r22 ; ESID high half
3334 mr r5,r23 ; ESID low half
3335 bl pmapCacheLookup ; Go see if this is in the cache already
3336
3337 mr. r3,r3 ; Did we find it?
3338 mr r4,r11 ; Copy this to a different register
3339
3340 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
3341
3342 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
3343 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
3344
3345 cntlzw r7,r4 ; Find a free slot
3346
3347 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
3348 rlwinm r30,r30,0,0,3 ; Clean up the ESID
3349 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
3350 addi r5,r4,1 ; Bump the generation number
3351 and r7,r7,r6 ; Clear bit number if none empty
3352 andc r8,r4,r6 ; Clear generation count if we found an empty
3353 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
3354 or r7,r7,r8 ; Select a slot number
3355 li r8,0 ; Clear
3356 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
3357 oris r8,r8,0x8000 ; Get the high bit on
3358 la r9,pmapSegCache(r25) ; Point to the segment cache
3359 slwi r6,r7,4 ; Get index into the segment cache
3360 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
3361 srw r8,r8,r7 ; Get the mask
3362 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
3363 li r0,0 ; Clear
3364 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
3365 oris r0,r0,0xF000 ; Get the sub-tag mask
3366 add r9,r9,r6 ; Point to the cache slot
3367 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
3368 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
3369
3370 stw r29,sgcESID(r9) ; Save the top of the ESID
3371 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
3372 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
3373 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
3374 or r10,r10,r5 ; Stick in subtag in case top half
3375 or r11,r11,r5 ; Stick in subtag in case bottom half
3376 stw r14,sgcVSID(r9) ; Save the top of the VSID
3377 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
3378 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
3379 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
3380
3381 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
3382 b hpfNoCacheEnt ; Go finish up...
3383
3384hpfSCSTbottom:
3385 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
3386
3387
3388hpfNoCacheEnt:
3389 eieio ; Make sure cache is updated before lock
3390 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
3391
3392
3393hpfNoCacheEnt2:
3394 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
3395 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
3396
3397;
3398; Make and enter 32-bit segment register
3399;
3400
3401 lwz r16,validSegs(r19) ; Get the valid SR flags
3402 xor r12,r12,r4 ; Alter the storage key before loading segment register
3403 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
3404 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
3405 lis r0,0x8000 ; Set bit 0
3406 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
3407 srw r0,r0,r2 ; Get bit corresponding to SR
3408 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
3409 or r16,r16,r0 ; Show that SR is valid
3410
3411 mtsrin r6,r30 ; Set the actual SR
3412
3413 stw r16,validSegs(r19) ; Set the valid SR flags
3414
3415 b hpfPteMiss ; SR loaded, go do a PTE...
3416
3417;
3418; Make and enter 64-bit segment look-aside buffer entry.
3419; Note that the cache entry is the right format except for valid bit.
3420; We also need to convert from long long to 64-bit register values.
3421;
3422
3423
3424 .align 5
3425
3426hpfLoadSeg64:
3427 ld r16,validSegs(r19) ; Get the valid SLB entry flags
3428 sldi r8,r29,32 ; Move high order address over
3429 sldi r10,r14,32 ; Move high part of VSID over
3430
3431 not r3,r16 ; Make valids be 0s
3432 li r0,1 ; Prepare to set bit 0
3433
3434 cntlzd r17,r3 ; Find a free SLB
3435 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
3436 or r9,r8,r30 ; Form full 64-bit address
3437 cmplwi r17,63 ; Did we find a free SLB entry?
3438 sldi r0,r0,63 ; Get bit 0 set
3439 or r10,r10,r12 ; Move in low part and keys
3440 addi r17,r17,1 ; Skip SLB 0 always
3441 blt++ hpfFreeSeg ; Yes, go load it...
3442
3443;
3444; No free SLB entries, select one that is in use and invalidate it
3445;
3446 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
3447 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
3448 addi r4,r4,1 ; Set next slot to steal
3449 slbmfee r7,r17 ; Get the entry that is in the selected spot
3450 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
3451 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
3452 srawi r2,r2,31 ; Get -1 if steal index still in range
3453 slbie r7 ; Invalidate the in-use SLB entry
3454 and r4,r4,r2 ; Reset steal index when it should wrap
3455 isync ;
3456
3457 stw r4,ppSegSteal(r19) ; Set the next slot to steal
3458;
3459; We are now ready to stick the SLB entry in the SLB and mark it in use
3460;
3461
3462hpfFreeSeg:
3463 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
3464 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
3465 srd r0,r0,r4 ; Set bit mask for allocation
3466 oris r9,r9,0x0800 ; Turn on the valid bit
3467 or r16,r16,r0 ; Turn on the allocation flag
3468 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
3469
3470 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
3471 slbie r7 ; Blow away a potential duplicate
3472
3473hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
3474
3475 std r16,validSegs(r19) ; Mark as valid
3476 b hpfPteMiss ; STE loaded, go do a PTE...
3477
3478;
3479; The segment has been set up and loaded if need be. Now we are ready to build the
3480; PTE and get it into the hash table.
3481;
3482; Note that there is actually a race here. If we start fault processing on
3483; a different pmap, i.e., we have descended into a nested pmap, it is possible
3484; that the nest could have been removed from the original pmap. We would
3485; succeed with this translation anyway. I do not think we need to worry
3486; about this (famous last words) because nobody should be unnesting anything
3487; if there are still people activily using them. It should be up to the
3488; higher level VM system to put the kibosh on this.
3489;
3490; There is also another race here: if we fault on the same mapping on more than
3491; one processor at the same time, we could end up with multiple PTEs for the same
3492; mapping. This is not a good thing.... We really only need one of the
3493; fault handlers to finish, so what we do is to set a "fault in progress" flag in
3494; the mapping. If we see that set, we just abandon the handler and hope that by
3495; the time we restore context and restart the interrupted code, the fault has
3496; been resolved by the other guy. If not, we will take another fault.
3497;
3498
3499;
3500; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
3501; It is required to stay there until after we call mapSelSlot!!!!
3502;
3503
3504 .align 5
3505
3506hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
3507 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
3508 li r3,mpHValid ; Get the PTE valid bit
3509 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
3510 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
3511 crnot cr1_eq,cr0_eq ; Remember if FIP was on
3512 and. r12,r12,r3 ; Isolate the valid bit
3513 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
3514 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
3515 andi. r0,r2,mpBlock ; Is this a block mapping?
3516 crmove cr7_eq,cr0_eq ; Remember if we have a block mapping
3517 stwcx. r2,0,r31 ; Store the flags
3518 bne-- hpfPteMiss ; Collision, try again...
3519
3520 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
3521
3522;
3523; At this point we are about to do the 32-bit PTE generation.
3524;
3525; The following is the R14:R15 pair that contains the "shifted" VSID:
3526;
3527; 1 2 3 4 4 5 6
3528; 0 8 6 4 2 0 8 6 3
3529; +--------+--------+--------+--------+--------+--------+--------+--------+
3530; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
3531; +--------+--------+--------+--------+--------+--------+--------+--------+
3532;
3533; The 24 bits of the 32-bit architecture VSID is in the following:
3534;
3535; 1 2 3 4 4 5 6
3536; 0 8 6 4 2 0 8 6 3
3537; +--------+--------+--------+--------+--------+--------+--------+--------+
3538; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
3539; +--------+--------+--------+--------+--------+--------+--------+--------+
3540;
3541
3542
3543hpfBldPTE32:
3544 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
3545 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
3546
3547 mfsdr1 r27 ; Get the hash table base address
3548
3549 rlwinm r0,r23,0,4,19 ; Isolate just the page index
3550 rlwinm r18,r23,10,26,31 ; Extract the API
3551 xor r19,r15,r0 ; Calculate hash << 12
3552 mr r2,r25 ; Save the flag part of the mapping
3553 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
3554 rlwinm r16,r27,16,7,15 ; Extract the hash table size
3555 rlwinm r25,r25,0,0,19 ; Clear out the flags
3556 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
3557 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
3558 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
3559 rlwinm r27,r27,0,0,15 ; Extract the hash table base
3560 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
3561 add r24,r24,r25 ; Adjust to true physical address
3562 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
3563 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
3564 and r19,r19,r16 ; Wrap hash table offset into the hash table
3565 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
3566 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
3567 add r19,r19,r27 ; Point to the PTEG
3568 subfic r20,r20,-4 ; Get negative offset to PCA
3569 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
3570 add r20,r20,r27 ; Point to the PCA slot
3571
3572;
3573; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
3574; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
3575;
3576; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
3577; that some other processor beat us and stuck in a PTE or that
3578; all we had was a simple segment exception and the PTE was there the whole time.
3579; If we find one a pointer, we are done.
3580;
3581
3582 mr r7,r20 ; Copy the PCA pointer
3583 bl mapLockPteg ; Lock the PTEG
3584
3585 lwz r12,mpPte(r31) ; Get the offset to the PTE
3586 mr r17,r6 ; Remember the PCA image
3587 mr r16,r6 ; Prime the post-select PCA image
3588 andi. r0,r12,mpHValid ; Is there a PTE here already?
3589 li r21,8 ; Get the number of slots
3590
3591 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
3592
3593 bne- hpfBailOut ; Someone already did this for us...
3594
3595;
3596; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
3597; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
3598; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
3599; R4 returns the slot index.
3600;
3601; REMEMBER: CR7 indicates that we are building a block mapping.
3602;
3603
3604hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
3605 mr r6,r17 ; Get back the original PCA
3606 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
3607 blt- hpfBailOut ; Holy Cow, all slots are locked...
3608
3609 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
3610
3611 cmplwi cr5,r3,1 ; Did we steal a slot?
3612 rlwinm r5,r4,3,26,28 ; Convert index to slot offset
3613 add r19,r19,r5 ; Point directly to the PTE
3614 mr r16,r6 ; Remember the PCA image after selection
3615 blt+ cr5,hpfInser32 ; Nope, no steal...
3616
3617 lwz r6,0(r19) ; Get the old PTE
3618 lwz r7,4(r19) ; Get the real part of the stealee
3619 rlwinm r6,r6,0,1,31 ; Clear the valid bit
3620 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
3621 srwi r3,r7,12 ; Change phys address to a ppnum
3622 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
3623 cmplwi cr1,r3,0 ; Check if this is in RAM
3624 bne- hpfNoPte32 ; Could not get it, try for another...
3625
3626 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
3627
3628hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
3629
3630 sync ; Make sure the invalid is stored
3631 li r9,tlbieLock ; Get the TLBIE lock
3632 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
3633
3634hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
3635 mfsprg r4,0 ; Get the per_proc
3636 rlwinm r8,r6,25,18,31 ; Extract the space ID
3637 rlwinm r11,r6,25,18,31 ; Extract the space ID
3638 lwz r7,hwSteals(r4) ; Get the steal count
3639 srwi r2,r6,7 ; Align segment number with hash
3640 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
3641 mr. r0,r0 ; Is it locked?
3642 srwi r0,r19,6 ; Align PTEG offset for back hash
3643 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
3644 xor r11,r11,r0 ; Hash backwards to partial vaddr
3645 rlwinm r12,r2,14,0,3 ; Shift segment up
3646 mfsprg r2,2 ; Get feature flags
3647 li r0,1 ; Get our lock word
3648 rlwimi r12,r6,22,4,9 ; Move up the API
3649 bne- hpfTLBIE32 ; It is locked, go wait...
3650 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
3651
3652 stwcx. r0,0,r9 ; Try to get it
3653 bne- hpfTLBIE32 ; We was beat...
3654 addi r7,r7,1 ; Bump the steal count
3655
3656 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
3657 li r0,0 ; Lock clear value
3658
3659 tlbie r12 ; Invalidate it everywhere
3660
3661 stw r0,tlbieLock(0) ; Clear the tlbie lock
3662
3663 beq- hpfNoTS32 ; Can not have MP on this machine...
3664
3665 eieio ; Make sure that the tlbie happens first
3666 tlbsync ; Wait for everyone to catch up
3667 sync ; Make sure of it all
3668
3669hpfNoTS32: stw r7,hwSteals(r4) ; Save the steal count
3670 bgt cr5,hpfInser32 ; We just stole a block mapping...
3671
3672 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
3673
3674 la r11,ppLink+4(r3) ; Point to the master RC copy
3675 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
3676 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
3677
3678hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
3679 or r0,r0,r2 ; Merge in the new RC
3680 stwcx. r0,0,r11 ; Try to stick it back
3681 bne- hpfMrgRC32 ; Try again if we collided...
3682
3683
3684hpfFPnch: rlwinm. r7,r7,0,0,25 ; Clean and test mapping address
3685 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
3686
3687 lhz r10,mpSpace(r7) ; Get the space
3688 lwz r9,mpVAddr+4(r7) ; And the vaddr
3689 cmplw cr1,r10,r8 ; Is this one of ours?
3690 xor r9,r12,r9 ; Compare virtual address
3691 cmplwi r9,0x1000 ; See if we really match
3692 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
3693 beq+ hpfFPnch2 ; Yes, found ours...
3694
3695 lwz r7,mpAlias+4(r7) ; Chain on to the next
3696 b hpfFPnch ; Check it out...
3697
3698hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
3699 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
3700 bl mapPhysUnlock ; Unlock the physent now
3701
3702hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
3703
3704 stw r24,4(r19) ; Stuff in the real part of the PTE
3705 eieio ; Make sure this gets there first
3706
3707 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
3708 mr r17,r16 ; Get the PCA image to save
3709 b hpfFinish ; Go join the common exit code...
3710
3711
3712;
3713; At this point we are about to do the 64-bit PTE generation.
3714;
3715; The following is the R14:R15 pair that contains the "shifted" VSID:
3716;
3717; 1 2 3 4 4 5 6
3718; 0 8 6 4 2 0 8 6 3
3719; +--------+--------+--------+--------+--------+--------+--------+--------+
3720; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
3721; +--------+--------+--------+--------+--------+--------+--------+--------+
3722;
3723;
3724
3725 .align 5
3726
3727hpfBldPTE64:
3728 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
3729 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
3730
3731 mfsdr1 r27 ; Get the hash table base address
3732
3733 sldi r11,r22,32 ; Slide top of adjusted EA over
3734 sldi r14,r14,32 ; Slide top of VSID over
3735 rlwinm r5,r27,0,27,31 ; Isolate the size
3736 eqv r16,r16,r16 ; Get all foxes here
3737 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
3738 mr r2,r10 ; Save the flag part of the mapping
3739 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
3740 rldicr r27,r27,0,45 ; Clean up the hash table base
3741 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
3742 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
3743 subfic r5,r5,46 ; Get number of leading zeros
3744 xor r19,r0,r15 ; Calculate hash
3745 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
3746 srd r16,r16,r5 ; Shift over to get length of table
3747 srdi r19,r19,5 ; Convert page offset to hash table offset
3748 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
3749 rldicr r10,r10,0,51 ; Clear out flags
3750 sldi r24,r24,12 ; Change ppnum to physical address
3751 sub r11,r11,r10 ; Get the offset from the base mapping
3752 and r19,r19,r16 ; Wrap into hash table
3753 add r24,r24,r11 ; Get actual physical address of this page
3754 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
3755 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
3756 subfic r20,r20,-4 ; Get negative offset to PCA
3757 ori r24,r24,lo16(mpR) ; Force on the reference bit
3758 add r20,r20,r27 ; Point to the PCA slot
3759 add r19,r19,r27 ; Point to the PTEG
3760
3761;
3762; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
3763; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
3764;
3765; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
3766; that some other processor beat us and stuck in a PTE or that
3767; all we had was a simple segment exception and the PTE was there the whole time.
3768; If we find one a pointer, we are done.
3769;
3770
3771 mr r7,r20 ; Copy the PCA pointer
3772 bl mapLockPteg ; Lock the PTEG
3773
3774 lwz r12,mpPte(r31) ; Get the offset to the PTE
3775 mr r17,r6 ; Remember the PCA image
3776 mr r18,r6 ; Prime post-selection PCA image
3777 andi. r0,r12,mpHValid ; See if we have a PTE now
3778 li r21,8 ; Get the number of slots
3779
3780 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
3781
3782 bne-- hpfBailOut ; Someone already did this for us...
3783
3784;
3785; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
3786; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
3787; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
3788; R4 returns the slot index.
3789;
3790; REMEMBER: CR7 indicates that we are building a block mapping.
3791;
3792
3793hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
3794 mr r6,r17 ; Restore original state of PCA
3795 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
3796 blt- hpfBailOut ; Holy Cow, all slots are locked...
3797
3798 bl mapSelSlot ; Go select a slot
3799
3800 cmplwi cr5,r3,1 ; Did we steal a slot?
3801 rlwinm r5,r4,4,25,27 ; Convert index to slot offset
3802 mr r18,r6 ; Remember the PCA image after selection
3803 add r19,r19,r5 ; Point directly to the PTE
3804 lwz r10,hwSteals(r2) ; Get the steal count
3805 blt++ cr5,hpfInser64 ; Nope, no steal...
3806
3807 ld r6,0(r19) ; Get the old PTE
3808 ld r7,8(r19) ; Get the real part of the stealee
3809 rldicr r6,r6,0,62 ; Clear the valid bit
3810 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
3811 srdi r3,r7,12 ; Change page address to a page address
3812 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
3813 cmplwi cr1,r3,0 ; Check if this is in RAM
3814 bne-- hpfNoPte64 ; Could not get it, try for another...
3815
3816 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
3817
3818hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
3819 li r9,tlbieLock ; Get the TLBIE lock
3820
3821 srdi r11,r6,5 ; Shift VSID over for back hash
3822 mfsprg r4,0 ; Get the per_proc
3823 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
3824 sync ; Make sure the invalid is stored
3825
3826 sldi r12,r6,16 ; Move AVPN to EA position
3827 sldi r11,r11,5 ; Move this to the page position
3828
3829hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
3830 mr. r0,r0 ; Is it locked?
3831 li r0,1 ; Get our lock word
3832 bne-- hpfTLBIE65 ; It is locked, go wait...
3833
3834 stwcx. r0,0,r9 ; Try to get it
3835 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
3836 rldicl r8,r6,52,50 ; Isolate the address space ID
3837 bne-- hpfTLBIE64 ; We was beat...
3838 addi r10,r10,1 ; Bump the steal count
3839
3840 rldicl r11,r12,0,16 ; Clear cause the book says so
3841 li r0,0 ; Lock clear value
3842
3843 tlbie r11 ; Invalidate it everywhere
3844
3845 stw r0,tlbieLock(0) ; Clear the tlbie lock
3846
3847 mr r7,r8 ; Get a copy of the space ID
3848 eieio ; Make sure that the tlbie happens first
3849 rldimi r7,r7,14,36 ; Copy address space to make hash value
3850 tlbsync ; Wait for everyone to catch up
3851 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
3852 isync
3853 srdi r2,r6,26 ; Shift original segment down to bottom
3854
3855 ptesync ; Make sure of it all
3856 xor r7,r7,r2 ; Compute original segment
3857
3858 stw r10,hwSteals(r4) ; Save the steal count
3859 bgt cr5,hpfInser64 ; We just stole a block mapping...
3860
3861 rldimi r12,r7,28,0 ; Insert decoded segment
3862 rldicl r4,r12,0,13 ; Trim to max supported address
3863
3864 ld r12,8(r19) ; Get the RC of the just invalidated PTE
3865
3866 la r11,ppLink+4(r3) ; Point to the master RC copy
3867 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
3868 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
3869
3870hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
3871 li r12,0xFF ; Get mask to clean up alias pointer
3872 or r0,r0,r2 ; Merge in the new RC
3873 rldicl r12,r12,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
3874 stwcx. r0,0,r11 ; Try to stick it back
3875 bne-- hpfMrgRC64 ; Try again if we collided...
3876
3877hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
3878 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
3879
3880 lhz r10,mpSpace(r7) ; Get the space
3881 ld r9,mpVAddr(r7) ; And the vaddr
3882 cmplw cr1,r10,r8 ; Is this one of ours?
3883 xor r9,r4,r9 ; Compare virtual address
3884 cmpldi r9,0x1000 ; See if we really match
3885 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
3886 beq++ hpfFPnch2x ; Yes, found ours...
3887
3888 ld r7,mpAlias(r7) ; Chain on to the next
3889 b hpfFPnchx ; Check it out...
3890
3891 .align 5
3892
3893hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
3894 stwcx. r7,0,r7 ; Kill reservation
3895
3896hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
3897 mr. r0,r0 ; Is it locked?
3898 beq++ hpfTLBIE64 ; Yup, wait for it...
3899 b hpfTLBIE63 ; Nope, try again..
3900
3901
3902
3903hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
3904 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
3905 bl mapPhysUnlock ; Unlock the physent now
3906
3907
3908hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
3909 eieio ; Make sure this gets there first
3910 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
3911 mr r17,r18 ; Get the PCA image to set
3912 b hpfFinish ; Go join the common exit code...
3913
3914hpfLostPhys:
3915 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
3916 ori r0,r0,lo16(Choke) ; System abend
3917 sc
3918
3919;
3920; This is the common code we execute when we are finished setting up the PTE.
3921;
3922
3923 .align 5
3924
3925hpfFinish: sub r4,r19,r27 ; Get offset of PTE
3926 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
3927 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
3928 stw r4,mpPte(r31) ; Remember our PTE
3929
3930hpfBailOut: eieio ; Make sure all updates come first
3931 stw r17,0(r20) ; Unlock and set the final PCA
3932
3933;
3934; This is where we go if we have started processing the fault, but find that someone
3935; else has taken care of it.
3936;
3937
3938hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
3939 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
3940 sth r2,mpFlags+2(r31) ; Set it
3941
3942 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3943 bl sxlkUnlock ; Unlock the search list
3944
3945 li r11,T_IN_VAIN ; Say that it was handled
3946 b EXT(PFSExit) ; Leave...
3947
3948;
3949; This is where we go when we find that someone else
3950; is in the process of handling the fault.
3951;
3952
3953hpfAbandon: li r3,lgKillResv ; Kill off any reservation
3954 stwcx. r3,0,r3 ; Do it
3955
3956 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3957 bl sxlkUnlock ; Unlock the search list
3958
3959 li r11,T_IN_VAIN ; Say that it was handled
3960 b EXT(PFSExit) ; Leave...
3961
3962
3963
3964/*
3965 * hw_set_user_space(pmap)
3966 * hw_set_user_space_dis(pmap)
3967 *
3968 * Indicate whether memory space needs to be switched.
3969 * We really need to turn off interrupts here, because we need to be non-preemptable
de355530
A
3970 *
3971 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
3972 * register usage here. The VMM switch code in vmachmon.s that calls this
3973 * know what registers are in use. Check that if these change.
3974 */
1c79356b 3975
1c79356b 3976
55e303ae
A
3977
3978 .align 5
3979 .globl EXT(hw_set_user_space)
3980
3981LEXT(hw_set_user_space)
3982
3983 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
3984 mfmsr r10 ; Get the current MSR
3985 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
3986 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
3987 andc r10,r10,r8 ; Turn off VEC, FP for good
3988 andc r9,r10,r9 ; Turn off EE also
3989 mtmsr r9 ; Disable them
3990 isync ; Make sure FP and vec are off
3991 mfsprg r6,0 ; Get the per_proc_info address
3992 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
3993 mfsprg r4,2 ; The the feature flags
3994 lwz r7,pmapvr(r3) ; Get the v to r translation
3995 lwz r8,pmapvr+4(r3) ; Get the v to r translation
3996 mtcrf 0x80,r4 ; Get the Altivec flag
3997 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
3998 cmplw cr1,r3,r2 ; Same address space as before?
3999 stw r7,ppUserPmap(r6) ; Show our real pmap address
4000 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4001 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4002 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4003 mtmsr r10 ; Restore interruptions
4004 beqlr-- cr1 ; Leave if the same address space or not Altivec
4005
4006 dssall ; Need to kill all data streams if adrsp changed
4007 sync
4008 blr ; Return...
4009
4010 .align 5
4011 .globl EXT(hw_set_user_space_dis)
4012
4013LEXT(hw_set_user_space_dis)
4014
4015 lwz r7,pmapvr(r3) ; Get the v to r translation
4016 mfsprg r4,2 ; The the feature flags
4017 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4018 mfsprg r6,0 ; Get the per_proc_info address
4019 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4020 mtcrf 0x80,r4 ; Get the Altivec flag
4021 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4022 cmplw cr1,r3,r2 ; Same address space as before?
4023 stw r7,ppUserPmap(r6) ; Show our real pmap address
4024 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4025 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4026 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4027 beqlr-- cr1 ; Leave if the same
4028
4029 dssall ; Need to kill all data streams if adrsp changed
4030 sync
4031 blr ; Return...
4032
4033/* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4034 *
4035 * Lock must already be held on mapping block list
4036 * returns 0 if all slots filled.
4037 * returns n if a slot is found and it is not the last
4038 * returns -n if a slot is found and it is the last
4039 * when n and -n are returned, the corresponding bit is cleared
4040 * the mapping is zeroed out before return
4041 *
4042 */
4043
4044 .align 5
4045 .globl EXT(mapalc1)
4046
4047LEXT(mapalc1)
4048 lwz r4,mbfree(r3) ; Get the 1st mask
4049 lis r0,0x8000 ; Get the mask to clear the first free bit
4050 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4051 mr r12,r3 ; Save the block ptr
4052 cntlzw r3,r4 ; Get first 1-bit in 1st word
4053 srw. r9,r0,r3 ; Get bit corresponding to first free one
4054 cntlzw r10,r5 ; Get first free field in second word
4055 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4056 bne mapalc1f ; Found one in 1st word
4057
4058 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4059 li r3,0 ; assume failure return
4060 andc r5,r5,r9 ; Turn it off
4061 beqlr-- ; There are no 1 bits left...
4062 addi r3,r10,32 ; set the correct number
4063
4064mapalc1f:
4065 or. r0,r4,r5 ; any more bits set?
4066 stw r4,mbfree(r12) ; update bitmasks
4067 stw r5,mbfree+4(r12)
4068
4069 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4070 addi r7,r6,32
4071 dcbz r6,r12 ; clear the 64-byte mapping
4072 dcbz r7,r12
4073
4074 bnelr++ ; return if another bit remains set
4075
4076 neg r3,r3 ; indicate we just returned the last bit
4077 blr
4078
4079
4080/* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4081 *
4082 * Lock must already be held on mapping block list
4083 * returns 0 if all slots filled.
4084 * returns n if a slot is found and it is not the last
4085 * returns -n if a slot is found and it is the last
4086 * when n and -n are returned, the corresponding bits are cleared
4087 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4088 * the mapping is zero'd out before return
4089 */
4090
4091 .align 5
4092 .globl EXT(mapalc2)
4093LEXT(mapalc2)
4094 lwz r4,mbfree(r3) ; Get the first mask
4095 lis r0,0x8000 ; Get the mask to clear the first free bit
4096 lwz r5,mbfree+4(r3) ; Get the second mask
4097 mr r12,r3 ; Save the block ptr
4098 slwi r6,r4,1 ; shift first word over
4099 and r6,r4,r6 ; lite start of double bit runs in 1st word
4100 slwi r7,r5,1 ; shift 2nd word over
4101 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4102 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4103 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4104 cntlzw r10,r7 ; Get first free field in second word
4105 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4106 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4107 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4108 bne mapalc2a ; Found two consecutive free bits in 1st word
4109
4110 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4111 li r3,0 ; assume failure
4112 srwi r11,r9,1 ; get mask for 2nd bit
4113 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4114 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4115 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4116 addi r3,r10,32 ; set the correct number
4117
4118mapalc2a:
4119 or. r0,r4,r5 ; any more bits set?
4120 stw r4,mbfree(r12) ; update bitmasks
4121 stw r5,mbfree+4(r12)
4122 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4123 addi r7,r6,32
4124 addi r8,r6,64
4125 addi r9,r6,96
4126 dcbz r6,r12 ; zero out the 128-byte mapping
4127 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
4128 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
4129 dcbz r9,r12
4130
4131 bnelr++ ; return if another bit remains set
4132
4133 neg r3,r3 ; indicate we just returned the last bit
4134 blr
4135
4136mapalc2c:
4137 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
4138 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
4139 beqlr ; no, we failed
4140 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
4141 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
4142 li r3,31 ; get index of this field
4143 b mapalc2a
4144
4145
4146;
4147; This routine initialzes the hash table and PCA.
4148; It is done here because we may need to be 64-bit to do it.
4149;
4150
4151 .align 5
4152 .globl EXT(hw_hash_init)
4153
4154LEXT(hw_hash_init)
4155
4156 mfsprg r10,2 ; Get feature flags
4157 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
4158 mtcrf 0x02,r10 ; move pf64Bit to cr6
4159 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
4160 lis r4,0xFF01 ; Set all slots free and start steal at end
4161 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
4162 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
4163
4164 lwz r12,0(r12) ; Get hash table size
4165 li r3,0 ; Get start
4166 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
4167
4168 lwz r11,4(r11) ; Get hash table base
4169
4170hhiNext32: cmplw r3,r12 ; Have we reached the end?
4171 bge- hhiCPCA32 ; Yes...
4172 dcbz r3,r11 ; Clear the line
4173 addi r3,r3,32 ; Next one...
4174 b hhiNext32 ; Go on...
4175
4176hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
4177 li r3,-4 ; Displacement to first PCA entry
4178 neg r12,r12 ; Get negative end of PCA
4179
4180hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
4181 subi r3,r3,4 ; Next slot
4182 cmpw r3,r12 ; Have we finished?
4183 bge+ hhiNPCA32 ; Not yet...
4184 blr ; Leave...
4185
4186hhiSF: mfmsr r9 ; Save the MSR
4187 li r8,1 ; Get a 1
4188 mr r0,r9 ; Get a copy of the MSR
4189 ld r11,0(r11) ; Get hash table base
4190 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
4191 mtmsrd r0 ; Turn on SF
4192 isync
4193
4194
4195hhiNext64: cmpld r3,r12 ; Have we reached the end?
4196 bge-- hhiCPCA64 ; Yes...
4197 dcbz128 r3,r11 ; Clear the line
4198 addi r3,r3,128 ; Next one...
4199 b hhiNext64 ; Go on...
4200
4201hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
4202 li r3,-4 ; Displacement to first PCA entry
4203 neg r12,r12 ; Get negative end of PCA
4204
4205hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
4206 subi r3,r3,4 ; Next slot
4207 cmpd r3,r12 ; Have we finished?
4208 bge++ hhiNPCA64 ; Not yet...
4209
4210 mtmsrd r9 ; Turn off SF if it was off
4211 isync
4212 blr ; Leave...
4213
4214
4215;
4216; This routine sets up the hardware to start translation.
4217; Note that we do NOT start translation.
4218;
4219
4220 .align 5
4221 .globl EXT(hw_setup_trans)
4222
4223LEXT(hw_setup_trans)
4224
4225 mfsprg r11,0 ; Get the per_proc block
4226 mfsprg r12,2 ; Get feature flags
4227 li r0,0 ; Get a 0
4228 li r2,1 ; And a 1
4229 mtcrf 0x02,r12 ; Move pf64Bit to cr6
4230 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
4231 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
4232 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
4233 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
4234
4235 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
4236
4237 li r9,0 ; Clear out a register
4238 sync
4239 isync
4240 mtdbatu 0,r9 ; Invalidate maps
4241 mtdbatl 0,r9 ; Invalidate maps
4242 mtdbatu 1,r9 ; Invalidate maps
4243 mtdbatl 1,r9 ; Invalidate maps
4244 mtdbatu 2,r9 ; Invalidate maps
4245 mtdbatl 2,r9 ; Invalidate maps
4246 mtdbatu 3,r9 ; Invalidate maps
4247 mtdbatl 3,r9 ; Invalidate maps
4248
4249 mtibatu 0,r9 ; Invalidate maps
4250 mtibatl 0,r9 ; Invalidate maps
4251 mtibatu 1,r9 ; Invalidate maps
4252 mtibatl 1,r9 ; Invalidate maps
4253 mtibatu 2,r9 ; Invalidate maps
4254 mtibatl 2,r9 ; Invalidate maps
4255 mtibatu 3,r9 ; Invalidate maps
4256 mtibatl 3,r9 ; Invalidate maps
4257
4258 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
4259 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
4260 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
4261 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
4262 lwz r11,4(r11) ; Get hash table base
4263 lwz r12,0(r12) ; Get hash table size
4264 subi r12,r12,1 ; Back off by 1
4265 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
4266
4267 mtsdr1 r11 ; Ok, we now have the hash table set up
4268 sync
4269
4270 li r12,invalSpace ; Get the invalid segment value
4271 li r10,0 ; Start low
4272
4273hstsetsr: mtsrin r12,r10 ; Set the SR
4274 addis r10,r10,0x1000 ; Bump the segment
4275 mr. r10,r10 ; Are we finished?
4276 bne+ hstsetsr ; Nope...
4277 sync
4278 blr ; Return...
4279
4280;
4281; 64-bit version
4282;
4283
4284hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
4285 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
4286 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
4287 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
4288 ld r11,0(r11) ; Get hash table base
4289 lwz r12,0(r12) ; Get hash table size
4290 cntlzw r10,r12 ; Get the number of bits
4291 subfic r10,r10,13 ; Get the extra bits we need
4292 or r11,r11,r10 ; Add the size field to SDR1
4293
4294 mtsdr1 r11 ; Ok, we now have the hash table set up
4295 sync
4296
4297 li r0,0 ; Set an SLB slot index of 0
4298 slbia ; Trash all SLB entries (except for entry 0 that is)
4299 slbmfee r7,r0 ; Get the entry that is in SLB index 0
4300 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4301 slbie r7 ; Invalidate it
4302
4303 blr ; Return...
4304
4305
4306;
4307; This routine turns on translation for the first time on a processor
4308;
4309
4310 .align 5
4311 .globl EXT(hw_start_trans)
4312
4313LEXT(hw_start_trans)
4314
4315
4316 mfmsr r10 ; Get the msr
4317 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
4318
4319 mtmsr r10 ; Everything falls apart here
4320 isync
4321
4322 blr ; Back to it.
4323
4324
4325
4326;
4327; This routine validates a segment register.
4328; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
4329;
4330; r3 = virtual pmap
4331; r4 = segment[0:31]
4332; r5 = segment[32:63]
4333; r6 = va[0:31]
4334; r7 = va[32:63]
4335;
4336; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
4337; Note that there is no reason to apply the key modifier here because this is only
4338; used for kernel accesses.
4339;
4340
4341 .align 5
4342 .globl EXT(hw_map_seg)
4343
4344LEXT(hw_map_seg)
4345
4346 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
4347 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
4348 mfsprg r10,2 ; Get feature flags
4349 mfsprg r12,0 ; Get the per_proc
4350
4351;
4352; Note: the following code would problably be easier to follow if I split it,
4353; but I just wanted to see if I could write this to work on both 32- and 64-bit
4354; machines combined.
4355;
4356
4357;
4358; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
4359; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
4360
4361 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
4362 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
4363 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
4364 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
4365 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
4366 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
4367 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
4368 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
4369 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
4370 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
4371
4372 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
4373 ; concatenated together. There is garbage
4374 ; at the top for 64-bit but we will clean
4375 ; that out later.
4376 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
4377
4378
4379;
4380; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
4381; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
4382;
4383
4384;
4385; What we have now is:
4386;
4387; 0 0 1 2 3 4 4 5 6
4388; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
4389; +--------+--------+--------+--------+--------+--------+--------+--------+
4390; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
4391; +--------+--------+--------+--------+--------+--------+--------+--------+
4392; 0 0 1 2 3 - for 32-bit machines
4393; 0 8 6 4 1
4394;
4395; 0 0 1 2 3 4 4 5 6
4396; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
4397; +--------+--------+--------+--------+--------+--------+--------+--------+
4398; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
4399; +--------+--------+--------+--------+--------+--------+--------+--------+
4400; 0 0 1 2 3 - for 32-bit machines
4401; 0 8 6 4 1
4402;
4403; 0 0 1 2 3 4 4 5 6
4404; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
4405; +--------+--------+--------+--------+--------+--------+--------+--------+
4406; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
4407; +--------+--------+--------+--------+--------+--------+--------+--------+
4408; 0 0 1 2 3 - for 32-bit machines
4409; 0 8 6 4 1
4410
4411
4412 xor r8,r8,r2 ; Calculate VSID
4413
4414 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
4415
4416 li r0,1 ; Prepare to set bit 0 (also to clear EE)
4417 mfmsr r6 ; Get current MSR
4418 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
4419 mtmsrd r0,1 ; Set only the EE bit to 0
4420 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
4421 mfmsr r11 ; Get the MSR right now, after disabling EE
4422 andc r2,r11,r2 ; Turn off translation now
4423 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
4424 or r11,r11,r6 ; Turn on the EE bit if it was on
4425 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
4426 isync ; Hang out a bit
4427
4428 ld r6,validSegs(r12) ; Get the valid SLB entry flags
4429 sldi r9,r9,9 ; Position the key and noex bit
4430
4431 rldimi r5,r8,12,0 ; Form the VSID/key
4432
4433 not r3,r6 ; Make valids be 0s
4434
4435 cntlzd r7,r3 ; Find a free SLB
4436 cmplwi r7,63 ; Did we find a free SLB entry?
4437
4438 slbie r4 ; Since this ESID may still be in an SLBE, kill it
4439
4440 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
4441 addi r7,r7,1 ; Make sure we skip slb 0
4442 blt++ hmsFreeSeg ; Yes, go load it...
4443
4444;
4445; No free SLB entries, select one that is in use and invalidate it
4446;
4447 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
4448 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4449 addi r2,r2,1 ; Set next slot to steal
4450 slbmfee r3,r7 ; Get the entry that is in the selected spot
4451 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
4452 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
4453 srawi r8,r8,31 ; Get -1 if steal index still in range
4454 slbie r3 ; Invalidate the in-use SLB entry
4455 and r2,r2,r8 ; Reset steal index when it should wrap
4456 isync ;
4457
4458 stw r2,ppSegSteal(r12) ; Set the next slot to steal
4459;
4460; We are now ready to stick the SLB entry in the SLB and mark it in use
4461;
4462
4463hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
4464 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
4465 srd r0,r0,r2 ; Set bit mask for allocation
4466 rldicl r5,r5,0,15 ; Clean out the unsupported bits
4467 or r6,r6,r0 ; Turn on the allocation flag
4468
4469 slbmte r5,r4 ; Make that SLB entry
4470
4471 std r6,validSegs(r12) ; Mark as valid
4472 mtmsrd r11 ; Restore the MSR
4473 isync
4474 blr ; Back to it...
4475
4476 .align 5
4477
4478hms32bit: rlwinm r8,r8,0,8,31 ; Clean up the VSID
4479 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
4480 lis r0,0x8000 ; Set bit 0
4481 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
4482 srw r0,r0,r2 ; Get bit corresponding to SR
4483 addi r7,r12,validSegs ; Point to the valid segment flags directly
4484
4485 mtsrin r8,r4 ; Set the actual SR
4486 isync ; Need to make sure this is done
4487
4488hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
4489 or r6,r6,r0 ; Show that SR is valid
4490 stwcx. r6,0,r7 ; Set the valid SR flags
4491 bne-- hmsrupt ; Had an interrupt, need to get flags again...
4492
4493 blr ; Back to it...
4494
4495
4496;
4497; This routine invalidates a segment register.
4498;
4499
4500 .align 5
4501 .globl EXT(hw_blow_seg)
4502
4503LEXT(hw_blow_seg)
4504
4505 mfsprg r10,2 ; Get feature flags
4506 mfsprg r12,0 ; Get the per_proc
4507 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
4508
4509 addi r7,r12,validSegs ; Point to the valid segment flags directly
4510 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
4511
4512 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
4513
4514 li r0,1 ; Prepare to set bit 0 (also to clear EE)
4515 mfmsr r6 ; Get current MSR
4516 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
4517 mtmsrd r0,1 ; Set only the EE bit to 0
4518 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
4519 mfmsr r11 ; Get the MSR right now, after disabling EE
4520 andc r2,r11,r2 ; Turn off translation now
4521 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
4522 or r11,r11,r6 ; Turn on the EE bit if it was on
4523 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
4524 isync ; Hang out a bit
4525
4526 rldimi r9,r3,32,0 ; Insert the top part of the ESID
4527
4528 slbie r9 ; Invalidate the associated SLB entry
4529
4530 mtmsrd r11 ; Restore the MSR
4531 isync
4532 blr ; Back to it.
4533
4534 .align 5
4535
4536hbs32bit: lwarx r4,0,r7 ; Get and reserve the valid segment flags
4537 rlwinm r6,r9,4,28,31 ; Convert segment to number
4538 lis r2,0x8000 ; Set up a mask
4539 srw r2,r2,r6 ; Make a mask
4540 and. r0,r4,r2 ; See if this is even valid
4541 li r5,invalSpace ; Set the invalid address space VSID
4542 beqlr ; Leave if already invalid...
4543
4544 mtsrin r5,r9 ; Slam the segment register
4545 isync ; Need to make sure this is done
4546
4547hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
4548 stwcx. r4,0,r7 ; Set the valid SR flags
4549 beqlr++ ; Stored ok, no interrupt, time to leave...
4550
4551 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
4552 b hbsrupt ; Try again...
4553
4554;
4555; This routine invadates the entire pmap segment cache
4556;
4557; Translation is on, interrupts may or may not be enabled.
4558;
4559
4560 .align 5
4561 .globl EXT(invalidateSegs)
4562
4563LEXT(invalidateSegs)
4564
4565 la r10,pmapCCtl(r3) ; Point to the segment cache control
4566 eqv r2,r2,r2 ; Get all foxes
4567
4568isInv: lwarx r4,0,r10 ; Get the segment cache control value
4569 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
4570 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4571 bne-- isInv0 ; Yes, try again...
4572
4573 stwcx. r4,0,r10 ; Try to invalidate it
4574 bne-- isInv ; Someone else just stuffed it...
4575 blr ; Leave...
4576
4577
4578isInv0: li r4,lgKillResv ; Get reservation kill zone
4579 stwcx. r4,0,r4 ; Kill reservation
4580
4581isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
4582 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4583 bne-- isInv ; Nope...
4584 b isInv1 ; Still locked do it again...
4585
4586;
4587; This routine switches segment registers between kernel and user.
4588; We have some assumptions and rules:
4589; We are in the exception vectors
4590; pf64Bitb is set up
4591; R3 contains the MSR we going to
4592; We can not use R4, R13, R20, R21, R29
4593; R13 is the savearea
4594; R29 has the per_proc
4595;
4596; We return R3 as 0 if we did not switch between kernel and user
4597; We also maintain and apply the user state key modifier used by VMM support;
4598; If we go to the kernel it is set to 0, otherwise it follows the bit
4599; in spcFlags.
4600;
4601
d7e50217 4602 .align 5
55e303ae 4603 .globl EXT(switchSegs)
1c79356b 4604
55e303ae
A
4605LEXT(switchSegs)
4606
4607 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
4608 lwz r9,spcFlags(r29) ; Pick up the special user state flags
4609 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
4610 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
4611 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
4612 or r2,r2,r3 ; This will 1 if we will be using user segments
4613 li r3,0 ; Get a selection mask
4614 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
4615 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
4616 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
4617 la r19,ppUserPmap(r29) ; Point to the current user pmap
4618
4619; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
4620 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
4621
4622 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
4623 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
4624 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
4625 or r8,r8,r19 ; Get the pointer to the pmap we are using
4626
4627 beqlr ; We are staying in the same mode, do not touch segs...
4628
4629 lwz r28,0(r8) ; Get top half of pmap address
4630 lwz r10,4(r8) ; Get bottom half
4631
4632 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
4633 rlwinm r28,r28,0,1,0 ; Copy top to top
4634 stw r30,ppMapFlags(r29) ; Set the key modifier
4635 rlwimi r28,r10,0,0,31 ; Insert bottom
4636
4637 la r10,pmapCCtl(r28) ; Point to the segment cache control
4638 la r9,pmapSegCache(r28) ; Point to the segment cache
4639
4640ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
4641 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
4642 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
4643 bne-- ssgLock0 ; Yup, this is in use...
4644
4645 stwcx. r16,0,r10 ; Try to set the lock
4646 bne-- ssgLock ; Did we get contention?
4647
4648 not r11,r15 ; Invert the invalids to valids
4649 li r17,0 ; Set a mask for the SRs we are loading
4650 isync ; Make sure we are all caught up
4651
4652 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
4653
4654 li r0,0 ; Clear
4655 slbia ; Trash all SLB entries (except for entry 0 that is)
4656 li r17,1 ; Get SLB index to load (skip slb 0)
4657 oris r0,r0,0x8000 ; Get set for a mask
4658 b ssg64Enter ; Start on a cache line...
d7e50217
A
4659
4660 .align 5
d7e50217 4661
55e303ae
A
4662ssgLock0: li r15,lgKillResv ; Killing field
4663 stwcx. r15,0,r15 ; Kill reservation
d7e50217 4664
55e303ae
A
4665ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
4666 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
4667 beq++ ssgLock ; Yup, this is in use...
4668 b ssgLock1 ; Nope, try again...
4669;
4670; This is the 32-bit address space switch code.
4671; We take a reservation on the segment cache and walk through.
4672; For each entry, we load the specified entries and remember which
4673; we did with a mask. Then, we figure out which segments should be
4674; invalid and then see which actually are. Then we load those with the
4675; defined invalid VSID.
4676; Afterwards, we unlock the segment cache.
4677;
d7e50217 4678
55e303ae
A
4679 .align 5
4680
4681ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
4682 cmplwi r12,pmapSegCacheUse ; See if we are done
4683 slwi r14,r12,4 ; Index to the cache slot
4684 lis r0,0x8000 ; Get set for a mask
4685 add r14,r14,r9 ; Point to the entry
4686
4687 bge- ssg32Done ; All done...
4688
4689 lwz r5,sgcESID+4(r14) ; Get the ESID part
4690 srw r2,r0,r12 ; Form a mask for the one we are loading
4691 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
4692
4693 andc r11,r11,r2 ; Clear the bit
4694 lwz r6,sgcVSID(r14) ; And get the VSID top
4695
4696 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
4697
4698 xor r7,r7,r30 ; Modify the key before we actually set it
4699 srw r0,r0,r2 ; Get a mask for the SR we are loading
4700 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
4701 or r17,r17,r0 ; Remember the segment
4702 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
4703 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
4704
4705 mtsrin r8,r5 ; Load the segment
4706 b ssg32Enter ; Go enter the next...
4707
4708 .align 5
4709
4710ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
4711 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
4712
4713 lis r0,0x8000 ; Get set for a mask
4714 li r2,invalSpace ; Set the invalid address space VSID
4715
4716 nop ; Align loop
4717 nop ; Align loop
4718 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
4719 nop ; Align loop
4720
4721ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
4722 cmplwi r18,16 ; Have we finished?
4723 srw r22,r0,r18 ; Get the mask bit
4724 rlwinm r23,r18,28,0,3 ; Get the segment register we need
4725 andc r16,r16,r22 ; Get rid of the guy we just did
4726 bge ssg32Really ; Yes, we are really done now...
4727
4728 mtsrin r2,r23 ; Invalidate the SR
4729 b ssg32Inval ; Do the next...
4730
4731 .align 5
4732
4733ssg32Really:
4734 stw r17,validSegs(r29) ; Set the valid SR flags
4735 li r3,1 ; Set kernel/user transition
4736 blr
4737
4738;
4739; This is the 64-bit address space switch code.
4740; First we blow away all of the SLB entries.
4741; Walk through,
4742; loading the SLB. Afterwards, we release the cache lock
4743;
4744; Note that because we have to treat SLBE 0 specially, we do not ever use it...
4745; Its a performance thing...
4746;
1c79356b
A
4747
4748 .align 5
1c79356b 4749
55e303ae
A
4750ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
4751 cmplwi r12,pmapSegCacheUse ; See if we are done
4752 slwi r14,r12,4 ; Index to the cache slot
4753 srw r16,r0,r12 ; Form a mask for the one we are loading
4754 add r14,r14,r9 ; Point to the entry
4755 andc r11,r11,r16 ; Clear the bit
4756 bge-- ssg64Done ; All done...
4757
4758 ld r5,sgcESID(r14) ; Get the ESID part
4759 ld r6,sgcVSID(r14) ; And get the VSID part
4760 oris r5,r5,0x0800 ; Turn on the valid bit
4761 or r5,r5,r17 ; Insert the SLB slot
4762 xor r6,r6,r30 ; Modify the key before we actually set it
4763 addi r17,r17,1 ; Bump to the next slot
4764 slbmte r6,r5 ; Make that SLB entry
4765 b ssg64Enter ; Go enter the next...
1c79356b 4766
55e303ae 4767 .align 5
d7e50217 4768
55e303ae 4769ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
d7e50217 4770
55e303ae
A
4771 eqv r16,r16,r16 ; Load up with all foxes
4772 subfic r17,r17,64 ; Get the number of 1 bits we need
4773
4774 sld r16,r16,r17 ; Get a mask for the used SLB entries
4775 li r3,1 ; Set kernel/user transition
4776 std r16,validSegs(r29) ; Set the valid SR flags
1c79356b
A
4777 blr
4778
55e303ae
A
4779;
4780; mapSetUp - this function sets initial state for all mapping functions.
4781; We turn off all translations (physical), disable interruptions, and
4782; enter 64-bit mode if applicable.
4783;
4784; We also return the original MSR in r11, the feature flags in R12,
4785; and CR6 set up so we can do easy branches for 64-bit
4786;
4787
4788 .align 5
4789 .globl EXT(mapSetUp)
4790
4791LEXT(mapSetUp)
4792
4793 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
4794 mfsprg r12,2 ; Get feature flags
4795 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
4796 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
4797 mfmsr r11 ; Save the MSR
4798 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
4799 andc r11,r11,r0 ; Clear VEC and FP for good
4800 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
4801 li r2,1 ; Prepare for 64 bit
4802 andc r0,r11,r0 ; Clear the rest
4803 bt pfNoMSRirb,msuNoMSR ; No MSR...
4804 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
d7e50217 4805
55e303ae
A
4806 mtmsr r0 ; Translation and all off
4807 isync ; Toss prefetch
4808 blr ; Return...
4809
4810 .align 5
4811
4812msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
4813 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
4814 isync ; synchronize
4815 blr ; Return...
4816
4817 .align 5
4818
4819msuNoMSR: mr r2,r3 ; Save R3 across call
4820 mr r3,r0 ; Get the new MSR value
4821 li r0,loadMSR ; Get the MSR setter SC
4822 sc ; Set it
4823 mr r3,r2 ; Restore R3
4824 blr ; Go back all set up...
4825
4826
4827;
4828; Find the physent based on a physical page and try to lock it (but not too hard)
4829; Note that this table always has an entry that with a 0 table pointer at the end
4830;
4831; R3 contains ppnum on entry
4832; R3 is 0 if no entry was found
4833; R3 is physent if found
4834; cr0_eq is true if lock was obtained or there was no entry to lock
4835; cr0_eq is false of there was an entry and it was locked
4836;
4837
4838 .align 5
4839
4840mapFindPhyTry:
4841 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
4842 mr r2,r3 ; Save our target
4843 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
4844
4845mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address
4846 lwz r5,mrStart(r9) ; Get start of table entry
4847 lwz r0,mrEnd(r9) ; Get end of table entry
4848 addi r9,r9,mrSize ; Point to the next slot
4849 cmplwi cr2,r3,0 ; Are we at the end of the table?
4850 cmplw r2,r5 ; See if we are in this table
4851 cmplw cr1,r2,r0 ; Check end also
4852 sub r4,r2,r5 ; Calculate index to physical entry
4853 beq-- cr2,mapFindNo ; Leave if we did not find an entry...
4854 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
4855 slwi r4,r4,3 ; Get offset to physical entry
4856
4857 blt-- mapFindPhz ; Did not find it...
4858
4859 add r3,r3,r4 ; Point right to the slot
4860
4861mapFindOv: lwz r2,0(r3) ; Get the lock contents right now
4862 rlwinm. r0,r2,0,0,0 ; Is it locked?
4863 bnelr-- ; Yes it is...
4864
4865 lwarx r2,0,r3 ; Get the lock
4866 rlwinm. r0,r2,0,0,0 ; Is it locked?
4867 oris r0,r2,0x8000 ; Set the lock bit
4868 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
4869 stwcx. r0,0,r3 ; Try to stuff it back...
4870 bne-- mapFindOv ; Collision, try again...
4871 isync ; Clear any speculations
4872 blr ; Leave...
4873
4874mapFindKl: li r2,lgKillResv ; Killing field
4875 stwcx. r2,0,r2 ; Trash reservation...
4876 crclr cr0_eq ; Make sure we do not think we got the lock
4877 blr ; Leave...
4878
4879mapFindNo: crset cr0_eq ; Make sure that we set this
4880 li r3,0 ; Show that we did not find it
4881 blr ; Leave...
4882;
4883; pmapCacheLookup - This function will look up an entry in the pmap segment cache.
4884;
4885; How the pmap cache lookup works:
4886;
4887; We use a combination of three things: a mask of valid entries, a sub-tag, and the
4888; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
4889; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits
4890; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
4891; entry contains the full 36 bit ESID.
4892;
4893; The purpose of the sub-tag is to limit the number of searches necessary when looking
4894; for an existing cache entry. Because there are 16 slots in the cache, we could end up
4895; searching all 16 if an match is not found.
4896;
4897; Essentially, we will search only the slots that have a valid entry and whose sub-tag
4898; matches. More than likely, we will eliminate almost all of the searches.
4899;
4900; Inputs:
4901; R3 = pmap
4902; R4 = ESID high half
4903; R5 = ESID low half
4904;
4905; Outputs:
4906; R3 = pmap cache slot if found, 0 if not
4907; R10 = pmapCCtl address
4908; R11 = pmapCCtl image
4909; pmapCCtl locked on exit
4910;
4911
4912 .align 5
4913
4914pmapCacheLookup:
4915 la r10,pmapCCtl(r3) ; Point to the segment cache control
4916
4917pmapCacheLookuq:
4918 lwarx r11,0,r10 ; Get the segment cache control value
4919 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4920 ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit
4921 bne-- pmapCacheLookur ; Nope...
4922 stwcx. r0,0,r10 ; Try to take the lock
4923 bne-- pmapCacheLookuq ; Someone else just stuffed it, try again...
4924
4925 isync ; Make sure we get reservation first
4926 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
4927 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
4928 lwz r10,pmapSCSubTag+4(r3) ; And the bottom half
4929 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
4930 lis r8,0x8888 ; Get some eights
4931 rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right
4932 ori r8,r8,0x8888 ; Fill the rest with eights
4933
4934 eqv r10,r10,r5 ; Get 0xF where we hit in bottom half
4935 eqv r9,r9,r5 ; Get 0xF where we hit in top half
4936
4937 rlwinm r2,r10,1,0,30 ; Shift over 1
4938 rlwinm r0,r9,1,0,30 ; Shift over 1
4939 and r2,r2,r10 ; AND the even/odd pair into the even
4940 and r0,r0,r9 ; AND the even/odd pair into the even
4941 rlwinm r10,r2,2,0,28 ; Shift over 2
4942 rlwinm r9,r0,2,0,28 ; Shift over 2
4943 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
4944 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
4945
4946 and r10,r10,r8 ; Clear out extras
4947 and r9,r9,r8 ; Clear out extras
4948
4949 rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other
4950 rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other
4951 or r10,r0,r10 ; Merge them
4952 or r9,r2,r9 ; Merge them
4953 rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other
4954 rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other
4955 or r10,r0,r10 ; Merge them
4956 or r9,r2,r9 ; Merge them
4957 rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad
4958 rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad
4959 not r6,r11 ; Turn invalid into valid
4960 rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask
4961
4962 la r10,pmapSegCache(r3) ; Point at the cache slots
4963 and. r6,r9,r6 ; Get mask of valid and hit
4964 li r0,0 ; Clear
4965 li r3,0 ; Assume not found
4966 oris r0,r0,0x8000 ; Start a mask
4967 beqlr++ ; Leave, should usually be no hits...
4968
4969pclNextEnt: cntlzw r5,r6 ; Find an in use one
4970 cmplwi cr1,r5,pmapSegCacheUse ; Did we find one?
4971 rlwinm r7,r5,4,0,27 ; Index to the cache entry
4972 srw r2,r0,r5 ; Get validity mask bit
4973 add r7,r7,r10 ; Point to the cache slot
4974 andc r6,r6,r2 ; Clear the validity bit we just tried
4975 bgelr-- cr1 ; Leave if there are no more to check...
4976
4977 lwz r5,sgcESID(r7) ; Get the top half
4978
4979 cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half
4980
4981 bne++ pclNextEnt ; Nope, try again...
4982
4983 mr r3,r7 ; Point to the slot
4984 blr ; Leave....
d7e50217 4985
de355530 4986 .align 5
d7e50217 4987
55e303ae
A
4988pmapCacheLookur:
4989 li r11,lgKillResv ; The killing spot
4990 stwcx. r11,0,r11 ; Kill the reservation
d7e50217 4991
55e303ae
A
4992pmapCacheLookus:
4993 lwz r11,pmapCCtl(r3) ; Get the segment cache control
4994 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4995 beq++ pmapCacheLookup ; Nope...
4996 b pmapCacheLookus ; Yup, keep waiting...
4997
4998
4999
5000
5001;
5002; This routine, given a mapping, will find and lock the PTEG
5003; If mpPte does not point to a PTE (checked before and after lock), it will unlock the
5004; PTEG and return. In this case we will have undefined in R4
5005; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
5006;
5007; If the mapping is still valid, we will invalidate the PTE and merge
5008; the RC bits into the physent and also save them into the mapping.
5009;
5010; We then return with R3 pointing to the PTE slot, R4 is the
5011; top of the PTE and R5 is the bottom. R6 contains the PCA.
5012; R7 points to the PCA entry.
5013;
5014; Note that we should NEVER be called on a block or special mapping.
5015; We could do many bad things.
5016;
5017
5018 .align 5
5019
5020mapInvPte32:
5021 lwz r0,mpPte(r31) ; Grab the PTE offset
5022 mfsdr1 r7 ; Get the pointer to the hash table
5023 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
5024 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
5025 andi. r3,r0,mpHValid ; Is there a possible PTE?
5026 srwi r7,r0,4 ; Convert to PCA units
5027 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
5028 mflr r2 ; Save the return
5029 subfic r7,r7,-4 ; Convert to -4 based negative index
5030 add r7,r10,r7 ; Point to the PCA directly
5031 beqlr-- ; There was no PTE to start with...
5032
5033 bl mapLockPteg ; Lock the PTEG
5034
5035 lwz r0,mpPte(r31) ; Grab the PTE offset
5036 mtlr r2 ; Restore the LR
5037 andi. r3,r0,mpHValid ; Is there a possible PTE?
5038 beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
5039
5040 rlwinm r3,r0,0,0,30 ; Clear the valid bit
5041 add r3,r3,r10 ; Point to actual PTE
5042 lwz r4,0(r3) ; Get the top of the PTE
5043
5044 li r8,tlbieLock ; Get the TLBIE lock
5045 rlwinm r0,r4,0,1,31 ; Clear the valid bit
5046 stw r0,0(r3) ; Invalidate the PTE
5047
5048 sync ; Make sure everyone sees the invalidate
5049
5050mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock
5051 mfsprg r2,2 ; Get feature flags
5052 mr. r0,r0 ; Is it locked?
5053 li r0,1 ; Get our lock word
5054 bne- mITLBIE32 ; It is locked, go wait...
5055
5056 stwcx. r0,0,r8 ; Try to get it
5057 bne- mITLBIE32 ; We was beat...
5058
5059 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
5060 li r0,0 ; Lock clear value
5061
5062 tlbie r5 ; Invalidate it everywhere
5063
5064 stw r0,tlbieLock(0) ; Clear the tlbie lock
5065
5066 beq- mINoTS32 ; Can not have MP on this machine...
5067
5068 eieio ; Make sure that the tlbie happens first
5069 tlbsync ; Wait for everyone to catch up
5070 sync ; Make sure of it all
5071
5072mINoTS32: lwz r5,4(r3) ; Get the real part
5073 srwi r10,r5,12 ; Change physical address to a ppnum
5074
5075mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
5076 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
5077 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
5078 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
5079 rlwinm r11,r11,2,0,29 ; Change index into byte offset
5080 add r11,r11,r8 ; Point to the bank table
5081 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
5082 lwz r11,mrStart(r11) ; Get the start of bank
5083 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
5084 addi r2,r2,4 ; Offset to last half of field
5085 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
5086 sub r11,r10,r11 ; Get the index into the table
5087 rlwinm r11,r11,3,0,28 ; Get offset to the physent
5088
5089
5090mImrgRC: lwarx r10,r11,r2 ; Get the master RC
5091 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
5092 or r0,r0,r10 ; Merge in the new RC
5093 stwcx. r0,r11,r2 ; Try to stick it back
5094 bne-- mImrgRC ; Try again if we collided...
5095
5096 blr ; Leave with the PCA still locked up...
5097
5098mIPUnlock: eieio ; Make sure all updates come first
5099
5100 stw r6,0(r7) ; Unlock
de355530 5101 blr
d7e50217 5102
55e303ae
A
5103;
5104; 64-bit version
5105;
5106 .align 5
d7e50217 5107
55e303ae
A
5108mapInvPte64:
5109 lwz r0,mpPte(r31) ; Grab the PTE offset
5110 ld r5,mpVAddr(r31) ; Grab the virtual address
5111 mfsdr1 r7 ; Get the pointer to the hash table
5112 rldicr r10,r7,0,45 ; Clean up the hash table base
5113 andi. r3,r0,mpHValid ; Is there a possible PTE?
5114 srdi r7,r0,5 ; Convert to PCA units
5115 rldicr r7,r7,0,61 ; Clean up PCA
5116 subfic r7,r7,-4 ; Convert to -4 based negative index
5117 mflr r2 ; Save the return
5118 add r7,r10,r7 ; Point to the PCA directly
5119 beqlr-- ; There was no PTE to start with...
5120
5121 bl mapLockPteg ; Lock the PTEG
5122
5123 lwz r0,mpPte(r31) ; Grab the PTE offset again
5124 mtlr r2 ; Restore the LR
5125 andi. r3,r0,mpHValid ; Is there a possible PTE?
5126 beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
5127
5128 rlwinm r3,r0,0,0,30 ; Clear the valid bit
5129 add r3,r3,r10 ; Point to the actual PTE
5130 ld r4,0(r3) ; Get the top of the PTE
5131
5132 li r8,tlbieLock ; Get the TLBIE lock
5133 rldicr r0,r4,0,62 ; Clear the valid bit
5134 std r0,0(r3) ; Invalidate the PTE
5135
5136 rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN
5137 sync ; Make sure everyone sees the invalidate
5138 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
5139
5140mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock
5141 mr. r0,r0 ; Is it locked?
5142 li r0,1 ; Get our lock word
5143 bne-- mITLBIE64a ; It is locked, toss reservation and wait...
5144
5145 stwcx. r0,0,r8 ; Try to get it
5146 bne-- mITLBIE64 ; We was beat...
5147
5148 rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders
5149
5150 li r0,0 ; Lock clear value
5151
5152 tlbie r2 ; Invalidate it everywhere
5153
5154 stw r0,tlbieLock(0) ; Clear the tlbie lock
5155
5156 eieio ; Make sure that the tlbie happens first
5157 tlbsync ; Wait for everyone to catch up
5158 isync
5159 ptesync ; Wait for quiet again
5160
5161mINoTS64: sync ; Make sure of it all
5162
5163 ld r5,8(r3) ; Get the real part
5164 srdi r10,r5,12 ; Change physical address to a ppnum
5165 b mINmerge ; Join the common 32-64-bit code...
5166
5167mITLBIE64a: li r5,lgKillResv ; Killing field
5168 stwcx. r5,0,r5 ; Kill reservation
5169
5170mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock
5171 mr. r0,r0 ; Is it locked?
5172 beq++ mITLBIE64 ; Nope, try again...
5173 b mITLBIE64b ; Yup, wait for it...
5174
5175;
5176; mapLockPteg - Locks a PTEG
5177; R7 points to PCA entry
5178; R6 contains PCA on return
5179;
5180;
1c79356b
A
5181
5182 .align 5
55e303ae
A
5183
5184mapLockPteg:
5185 lwarx r6,0,r7 ; Pick up the PCA
5186 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
5187 ori r0,r6,PCAlock ; Set the lock bit
5188 bne-- mLSkill ; It is locked...
5189
5190 stwcx. r0,0,r7 ; Try to lock the PTEG
5191 bne-- mapLockPteg ; We collided...
5192
5193 isync ; Nostradamus lied
5194 blr ; Leave...
5195
5196mLSkill: li r6,lgKillResv ; Get killing field
5197 stwcx. r6,0,r6 ; Kill it
1c79356b 5198
55e303ae
A
5199mapLockPteh:
5200 lwz r6,0(r7) ; Pick up the PCA
5201 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
5202 beq++ mapLockPteg ; Nope, try again...
5203 b mapLockPteh ; Yes, wait for it...
1c79356b 5204
55e303ae
A
5205
5206;
5207; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6
5208; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was
5209; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
5210; R4 returns the slot index.
5211;
5212; CR7 also indicates that we have a block mapping
5213;
5214; The PTEG allocation controls are a bit map of the state of the PTEG.
5215; PCAfree indicates that the PTE slot is empty.
5216; PCAauto means that it comes from an autogen area. These
5217; guys do not keep track of reference and change and are actually "wired".
5218; They are easy to maintain. PCAsteal
5219; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
5220; fields fit in a single word and are loaded and stored under control of the
5221; PTEG control area lock (PCAlock).
5222;
5223; Note that PCAauto does not contribute to the steal calculations at all. Originally
5224; it did, autogens were second in priority. This can result in a pathalogical
5225; case where an instruction can not make forward progress, or one PTE slot
5226; thrashes.
5227;
5228; Note that the PCA must be locked when we get here.
5229;
5230; Physically, the fields are arranged:
5231; 0: PCAfree
5232; 1: PCAsteal
5233; 2: PCAauto
5234; 3: PCAmisc
5235;
5236;
5237; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched)
5238;
5239; At exit:
5240;
5241; R3 = 0 - no steal
5242; R3 = 1 - steal regular
5243; R3 = 2 - steal autogen
5244; R4 contains slot number
5245; R6 contains updated PCA image
5246;
5247
5248 .align 5
1c79356b 5249
55e303ae
A
5250mapSelSlot: lis r10,0 ; Clear autogen mask
5251 li r9,0 ; Start a mask
5252 beq cr7,mSSnotblk ; Skip if this is not a block mapping
5253 ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen)
5254
5255mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask
5256 oris r9,r9,0x8000 ; Get a mask
5257 cntlzw r4,r6 ; Find a slot or steal one
5258 ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000
5259 rlwinm r4,r4,0,29,31 ; Isolate bit position
5260 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
5261 srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags
5262 srwi r11,r11,1 ; Slide steal mask right
5263 and r8,r6,r2 ; Isolate the old in use and autogen bits
5264 andc r6,r6,r2 ; Allocate the slot and also clear autogen flag
5265 addi r0,r8,0x7F00 ; Push autogen flag to bit 16
5266 and r2,r2,r10 ; Keep the autogen part if autogen
5267 addis r8,r8,0xFF00 ; Push in use to bit 0 and invert
5268 or r6,r6,r2 ; Add in the new autogen bit
5269 rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use)
5270 rlwinm r8,r8,1,31,31 ; Isolate old in use
5271 rlwimi r6,r11,16,8,15 ; Stick the new steal slot in
5272
5273 add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen
5274 blr ; Leave...
1c79356b 5275
55e303ae
A
5276;
5277; Shared/Exclusive locks
5278;
5279; A shared/exclusive lock allows multiple shares of a lock to be taken
5280; but only one exclusive. A shared lock can be "promoted" to exclusive
5281; when it is the only share. If there are multiple sharers, the lock
5282; must be "converted". A promotion drops the share and gains exclusive as
5283; an atomic operation. If anyone else has a share, the operation fails.
5284; A conversion first drops the share and then takes an exclusive lock.
5285;
5286; We will want to add a timeout to this eventually.
5287;
5288; R3 is set to 0 for success, non-zero for failure
5289;
5290
5291;
5292; Convert a share into an exclusive
5293;
5294
5295 .align 5
1c79356b 5296
55e303ae
A
5297sxlkConvert:
5298
5299 lis r0,0x8000 ; Get the locked lock image
5300#if 0
5301 mflr r0 ; (TEST/DEBUG)
5302 oris r0,r0,0x8000 ; (TEST/DEBUG)
5303#endif
5304
5305sxlkCTry: lwarx r2,0,r3 ; Get the lock word
5306 cmplwi r2,1 ; Does it just have our share?
5307 subi r2,r2,1 ; Drop our share in case we do not get it
5308 bne-- sxlkCnotfree ; No, we need to unlock...
5309 stwcx. r0,0,r3 ; Try to take it exclusively
5310 bne-- sxlkCTry ; Collision, try again...
1c79356b 5311
55e303ae
A
5312 isync
5313 li r3,0 ; Set RC
5314 blr ; Leave...
5315
5316sxlkCnotfree:
5317 stwcx. r2,0,r3 ; Try to drop our share...
5318 bne-- sxlkCTry ; Try again if we collided...
5319 b sxlkExclusive ; Go take it exclusively...
5320
5321;
5322; Promote shared to exclusive
5323;
5324
5325 .align 5
1c79356b 5326
55e303ae
A
5327sxlkPromote:
5328 lis r0,0x8000 ; Get the locked lock image
5329#if 0
5330 mflr r0 ; (TEST/DEBUG)
5331 oris r0,r0,0x8000 ; (TEST/DEBUG)
5332#endif
5333
5334sxlkPTry: lwarx r2,0,r3 ; Get the lock word
5335 cmplwi r2,1 ; Does it just have our share?
5336 bne-- sxlkPkill ; No, just fail (R3 is non-zero)...
5337 stwcx. r0,0,r3 ; Try to take it exclusively
5338 bne-- sxlkPTry ; Collision, try again...
1c79356b 5339
55e303ae
A
5340 isync
5341 li r3,0 ; Set RC
1c79356b 5342 blr ; Leave...
55e303ae
A
5343
5344sxlkPkill: li r2,lgKillResv ; Point to killing field
5345 stwcx. r2,0,r2 ; Kill reservation
5346 blr ; Leave
5347
5348
5349
5350;
5351; Take lock exclusivily
5352;
5353
5354 .align 5
1c79356b 5355
55e303ae
A
5356sxlkExclusive:
5357 lis r0,0x8000 ; Get the locked lock image
5358#if 0
5359 mflr r0 ; (TEST/DEBUG)
5360 oris r0,r0,0x8000 ; (TEST/DEBUG)
5361#endif
5362
5363sxlkXTry: lwarx r2,0,r3 ; Get the lock word
5364 mr. r2,r2 ; Is it locked?
5365 bne-- sxlkXWait ; Yes...
5366 stwcx. r0,0,r3 ; Try to take it
5367 bne-- sxlkXTry ; Collision, try again...
1c79356b 5368
55e303ae
A
5369 isync ; Toss anything younger than us
5370 li r3,0 ; Set RC
5371 blr ; Leave...
1c79356b 5372
55e303ae
A
5373 .align 5
5374
5375sxlkXWait: li r2,lgKillResv ; Point to killing field
5376 stwcx. r2,0,r2 ; Kill reservation
1c79356b 5377
55e303ae
A
5378sxlkXWaiu: lwz r2,0(r3) ; Get the lock again
5379 mr. r2,r2 ; Is it free yet?
5380 beq++ sxlkXTry ; Yup...
5381 b sxlkXWaiu ; Hang around a bit more...
1c79356b 5382
55e303ae
A
5383;
5384; Take a share of the lock
5385;
1c79356b
A
5386
5387 .align 5
55e303ae
A
5388
5389sxlkShared: lwarx r2,0,r3 ; Get the lock word
5390 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
5391 addi r2,r2,1 ; Up the share count
5392 bne-- sxlkSWait ; Yes...
5393 stwcx. r2,0,r3 ; Try to take it
5394 bne-- sxlkShared ; Collision, try again...
5395
5396 isync ; Toss anything younger than us
5397 li r3,0 ; Set RC
5398 blr ; Leave...
5399
5400 .align 5
d7e50217 5401
55e303ae
A
5402sxlkSWait: li r2,lgKillResv ; Point to killing field
5403 stwcx. r2,0,r2 ; Kill reservation
d7e50217 5404
55e303ae
A
5405sxlkSWaiu: lwz r2,0(r3) ; Get the lock again
5406 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
5407 beq++ sxlkShared ; Nope...
5408 b sxlkSWaiu ; Hang around a bit more...
5409
5410;
5411; Unlock either exclusive or shared.
5412;
5413
5414 .align 5
5415
5416sxlkUnlock: eieio ; Make sure we order our stores out
5417
5418sxlkUnTry: lwarx r2,0,r3 ; Get the lock
5419 rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively?
5420 subi r2,r2,1 ; Remove our share if we have one
5421 li r0,0 ; Clear this
5422 bne-- sxlkUExclu ; We hold exclusive...
5423
5424 stwcx. r2,0,r3 ; Try to lose our share
5425 bne-- sxlkUnTry ; Collision...
5426 blr ; Leave...
5427
5428sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation
5429 beqlr++ ; Leave if ok...
5430 b sxlkUnTry ; Could not store, try over...
5431
5432
5433 .align 5
5434 .globl EXT(fillPage)
5435
5436LEXT(fillPage)
5437
5438 mfsprg r0,2 ; Get feature flags
5439 mtcrf 0x02,r0 ; move pf64Bit to cr
5440
5441 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
5442 lis r2,0x0200 ; Get vec
5443 mr r6,r4 ; Copy
5444 ori r2,r2,0x2000 ; Get FP
5445 mr r7,r4 ; Copy
5446 mfmsr r5 ; Get MSR
5447 mr r8,r4 ; Copy
5448 andc r5,r5,r2 ; Clear out permanent turn-offs
5449 mr r9,r4 ; Copy
5450 ori r2,r2,0x8030 ; Clear IR, DR and EE
5451 mr r10,r4 ; Copy
5452 andc r0,r5,r2 ; Kill them
5453 mr r11,r4 ; Copy
5454 mr r12,r4 ; Copy
5455 bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint)
5456
5457 slwi r3,r3,12 ; Make into a physical address
5458 mtmsr r2 ; Interrupts and translation off
5459 isync
5460
5461 li r2,4096/32 ; Get number of cache lines
5462
5463fp32again: dcbz 0,r3 ; Clear
5464 addic. r2,r2,-1 ; Count down
5465 stw r4,0(r3) ; Fill
5466 stw r6,4(r3) ; Fill
5467 stw r7,8(r3) ; Fill
5468 stw r8,12(r3) ; Fill
5469 stw r9,16(r3) ; Fill
5470 stw r10,20(r3) ; Fill
5471 stw r11,24(r3) ; Fill
5472 stw r12,28(r3) ; Fill
5473 addi r3,r3,32 ; Point next
5474 bgt+ fp32again ; Keep going
5475
5476 mtmsr r5 ; Restore all
1c79356b 5477 isync
55e303ae
A
5478 blr ; Return...
5479
5480 .align 5
5481
5482fpSF1: li r2,1
5483 sldi r2,r2,63 ; Get 64-bit bit
5484 or r0,r0,r2 ; Turn on 64-bit
5485 sldi r3,r3,12 ; Make into a physical address
1c79356b 5486
55e303ae 5487 mtmsrd r0 ; Interrupts and translation off
1c79356b 5488 isync
55e303ae
A
5489
5490 li r2,4096/128 ; Get number of cache lines
5491
5492fp64again: dcbz128 0,r3 ; Clear
5493 addic. r2,r2,-1 ; Count down
5494 std r4,0(r3) ; Fill
5495 std r6,8(r3) ; Fill
5496 std r7,16(r3) ; Fill
5497 std r8,24(r3) ; Fill
5498 std r9,32(r3) ; Fill
5499 std r10,40(r3) ; Fill
5500 std r11,48(r3) ; Fill
5501 std r12,56(r3) ; Fill
5502 std r4,64+0(r3) ; Fill
5503 std r6,64+8(r3) ; Fill
5504 std r7,64+16(r3) ; Fill
5505 std r8,64+24(r3) ; Fill
5506 std r9,64+32(r3) ; Fill
5507 std r10,64+40(r3) ; Fill
5508 std r11,64+48(r3) ; Fill
5509 std r12,64+56(r3) ; Fill
5510 addi r3,r3,128 ; Point next
5511 bgt+ fp64again ; Keep going
5512
5513 mtmsrd r5 ; Restore all
5514 isync
5515 blr ; Return...
5516
5517 .align 5
5518 .globl EXT(mapLog)
5519
5520LEXT(mapLog)
5521
5522 mfmsr r12
5523 lis r11,hi16(EXT(mapdebug))
5524 ori r11,r11,lo16(EXT(mapdebug))
5525 lwz r10,0(r11)
5526 mr. r10,r10
5527 bne++ mLxx
5528 mr r10,r3
5529mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1
5530 mtmsr r0
5531 isync
5532 stw r4,0(r10)
5533 stw r4,4(r10)
5534 stw r5,8(r10)
5535 stw r6,12(r10)
5536 mtmsr r12
5537 isync
5538 addi r10,r10,16
5539 stw r10,0(r11)
1c79356b 5540 blr
55e303ae
A
5541
5542#if 1
5543 .align 5
5544 .globl EXT(checkBogus)
5545
5546LEXT(checkBogus)
5547
5548 BREAKPOINT_TRAP
5549 blr ; No-op normally
5550
5551#endif
5552
5553
1c79356b
A
5554
5555