]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/hw_vm.s
xnu-517.11.1.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_vm.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <assym.s>
23#include <debug.h>
24#include <cpus.h>
25#include <db_machine_commands.h>
26#include <mach_rt.h>
27
28#include <mach_debug.h>
29#include <ppc/asm.h>
30#include <ppc/proc_reg.h>
31#include <ppc/exception.h>
32#include <ppc/Performance.h>
33#include <ppc/exception.h>
1c79356b 34#include <mach/ppc/vm_param.h>
55e303ae
A
35
36#define INSTRUMENT 0
1c79356b
A
37
38 .text
39
55e303ae
A
40;
41; 0 0 1 2 3 4 4 5 6
42; 0 8 6 4 2 0 8 6 3
43; +--------+--------+--------+--------+--------+--------+--------+--------+
44; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
45; +--------+--------+--------+--------+--------+--------+--------+--------+
46;
47; 0 0 1
48; 0 8 6
49; +--------+--------+--------+
50; |//////BB|BBBBBBBB|BBBB////| - SID - base
51; +--------+--------+--------+
52;
53; 0 0 1
54; 0 8 6
55; +--------+--------+--------+
56; |////////|11111111|111111//| - SID - copy 1
57; +--------+--------+--------+
58;
59; 0 0 1
60; 0 8 6
61; +--------+--------+--------+
62; |////////|//222222|22222222| - SID - copy 2
63; +--------+--------+--------+
64;
65; 0 0 1
66; 0 8 6
67; +--------+--------+--------+
68; |//////33|33333333|33//////| - SID - copy 3 - not needed
69; +--------+--------+--------+ for 65 bit VPN
70;
71; 0 0 1 2 3 4 4 5 5
72; 0 8 6 4 2 0 8 1 5
73; +--------+--------+--------+--------+--------+--------+--------+
74; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
75; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
76; 0 0 1 2 3 4 4 5 5
77; 0 8 6 4 2 0 8 1 5
78; +--------+--------+--------+--------+--------+--------+--------+
79; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
80; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
81; part of EA to make
82; room for SID base
83;
84;
85; 0 0 1 2 3 4 4 5 5
86; 0 8 6 4 2 0 8 1 5
87; +--------+--------+--------+--------+--------+--------+--------+
88; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
89; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
90;
91; 0 0 1 2 3 4 4 5 6 7 7
92; 0 8 6 4 2 0 8 6 4 2 9
93; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
94; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
95; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
96;
1c79356b
A
97
98
55e303ae 99/* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
1c79356b 100 *
55e303ae 101 * Maps a page or block into a pmap
de355530 102 *
55e303ae 103 * Returns 0 if add worked or the vaddr of the first overlap if not
1c79356b 104 *
55e303ae
A
105 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
106 *
107 * 1) bump mapping busy count
108 * 2) lock pmap share
109 * 3) find mapping full path - finds all possible list previous elements
110 * 4) upgrade pmap to exclusive
111 * 5) add mapping to search list
112 * 6) find physent
113 * 7) lock physent
114 * 8) add to physent
115 * 9) unlock physent
116 * 10) unlock pmap
117 * 11) drop mapping busy count
118 *
119 *
120 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
121 *
122 * 1) bump mapping busy count
123 * 2) lock pmap share
124 * 3) find mapping full path - finds all possible list previous elements
125 * 4) upgrade pmap to exclusive
126 * 5) add mapping to search list
127 * 6) unlock pmap
128 * 7) drop mapping busy count
129 *
1c79356b
A
130 */
131
132 .align 5
133 .globl EXT(hw_add_map)
134
135LEXT(hw_add_map)
55e303ae
A
136
137 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
138 mflr r0 ; Save the link register
139 stw r17,FM_ARG0+0x00(r1) ; Save a register
140 stw r18,FM_ARG0+0x04(r1) ; Save a register
141 stw r19,FM_ARG0+0x08(r1) ; Save a register
142 mfsprg r19,2 ; Get feature flags
143 stw r20,FM_ARG0+0x0C(r1) ; Save a register
144 stw r21,FM_ARG0+0x10(r1) ; Save a register
145 mtcrf 0x02,r19 ; move pf64Bit cr6
146 stw r22,FM_ARG0+0x14(r1) ; Save a register
147 stw r23,FM_ARG0+0x18(r1) ; Save a register
148 stw r24,FM_ARG0+0x1C(r1) ; Save a register
149 stw r25,FM_ARG0+0x20(r1) ; Save a register
150 stw r26,FM_ARG0+0x24(r1) ; Save a register
151 stw r27,FM_ARG0+0x28(r1) ; Save a register
152 stw r28,FM_ARG0+0x2C(r1) ; Save a register
153 stw r29,FM_ARG0+0x30(r1) ; Save a register
154 stw r30,FM_ARG0+0x34(r1) ; Save a register
155 stw r31,FM_ARG0+0x38(r1) ; Save a register
156 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
157
158 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
159 mr r28,r3 ; Save the pmap
160 mr r31,r4 ; Save the mapping
161 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
162 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
163 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
164
165 b hamSF1x ; Done...
166
167hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
168 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
169
170hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
171
172 mr r17,r11 ; Save the MSR
173 xor r28,r28,r20 ; Convert the pmap to physical addressing
174 xor r31,r31,r21 ; Convert the mapping to physical addressing
175
176 la r3,pmapSXlk(r28) ; Point to the pmap search lock
177 bl sxlkShared ; Go get a shared lock on the mapping lists
178 mr. r3,r3 ; Did we get the lock?
179 lwz r24,mpFlags(r31) ; Pick up the flags
180 bne-- hamBadLock ; Nope...
181
182 li r21,0 ; Remember that we have the shared lock
1c79356b 183
55e303ae
A
184;
185; Note that we do a full search (i.e., no shortcut level skips, etc.)
186; here so that we will know the previous elements so we can dequeue them
187; later.
188;
de355530 189
55e303ae
A
190hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
191 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
192 mr r3,r28 ; Pass in pmap to search
193 lhz r23,mpBSize(r31) ; Get the block size for later
194 mr r29,r4 ; Save top half of vaddr for later
195 mr r30,r5 ; Save bottom half of vaddr for later
196
197#if INSTRUMENT
198 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[16] - Take stamp before mapSearchFull
199 stw r0,0x6100+(16*16)+0x0(0) ; INSTRUMENT - Save it
200 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
201 stw r0,0x6100+(16*16)+0x4(0) ; INSTRUMENT - Save it
202 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
203 stw r0,0x6100+(16*16)+0x8(0) ; INSTRUMENT - Save it
204 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
205 stw r0,0x6100+(16*16)+0xC(0) ; INSTRUMENT - Save it
206#endif
207
208 bl EXT(mapSearchFull) ; Go see if we can find it
209
210#if INSTRUMENT
211 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[14] - Take stamp after mapSearchFull
212 stw r0,0x6100+(17*16)+0x0(0) ; INSTRUMENT - Save it
213 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
214 stw r0,0x6100+(17*16)+0x4(0) ; INSTRUMENT - Save it
215 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
216 stw r0,0x6100+(17*16)+0x8(0) ; INSTRUMENT - Save it
217 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
218 stw r0,0x6100+(17*16)+0xC(0) ; INSTRUMENT - Save it
219#endif
220
221 andi. r0,r24,mpNest ; See if we are a nest
222 rlwinm r23,r23,12,0,19 ; Convert standard block size to bytes
223 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
224 li r22,0 ; Assume high part of size is 0
225 beq++ hamNoNest ; This is not a nest...
226
227 rlwinm r22,r23,16,16,31 ; Convert partially converted size to segments
228 rlwinm r23,r23,16,0,3 ; Finish shift
229
230hamNoNest: add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
231 mr. r3,r3 ; Did we find a mapping here?
232 or r0,r0,r30 ; Make sure a carry will propagate all the way in 64-bit
233 crmove cr5_eq,cr0_eq ; Remember that if we found the mapping
234 addc r9,r0,r23 ; Add size to get last page in new range
235 or. r0,r4,r5 ; Are we beyond the end?
236 adde r8,r29,r22 ; Add the rest of the length on
237 bne-- cr5,hamOverlay ; Yeah, this is no good, can not double map...
238 rlwinm r9,r9,0,0,31 ; Clean top half of sum
239 beq++ hamFits ; We are at the end...
240
241 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
242 cmplw r8,r4 ; Is our end before the next (top part)
243 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
244 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
245
246 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
d7e50217 247
55e303ae
A
248;
249; Here we try to convert to an exclusive lock. This will fail if someone else
250; has it shared.
251;
252hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
253 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1c79356b 254
55e303ae
A
255 bne-- hamGotX ; We already have the exclusive...
256
257 bl sxlkPromote ; Try to promote shared to exclusive
258 mr. r3,r3 ; Could we?
259 beq++ hamGotX ; Yeah...
260
261;
262; Since we could not promote our lock, we need to convert to it.
263; That means that we drop the shared lock and wait to get it
264; exclusive. Since we release the lock, we need to do the look up
265; again.
266;
d7e50217 267
55e303ae
A
268 la r3,pmapSXlk(r28) ; Point to the pmap search lock
269 bl sxlkConvert ; Convert shared to exclusive
270 mr. r3,r3 ; Could we?
271 bne-- hamBadLock ; Nope, we must have timed out...
1c79356b 272
55e303ae
A
273 li r21,1 ; Remember that we have the exclusive lock
274 b hamRescan ; Go look again...
1c79356b 275
55e303ae 276 .align 5
1c79356b 277
55e303ae
A
278hamGotX:
279#if INSTRUMENT
280 mfspr r3,pmc1 ; INSTRUMENT - saveinstr[18] - Take stamp before mapSearchFull
281 stw r3,0x6100+(18*16)+0x0(0) ; INSTRUMENT - Save it
282 mfspr r3,pmc2 ; INSTRUMENT - Get stamp
283 stw r3,0x6100+(18*16)+0x4(0) ; INSTRUMENT - Save it
284 mfspr r3,pmc3 ; INSTRUMENT - Get stamp
285 stw r3,0x6100+(18*16)+0x8(0) ; INSTRUMENT - Save it
286 mfspr r3,pmc4 ; INSTRUMENT - Get stamp
287 stw r4,0x6100+(18*16)+0xC(0) ; INSTRUMENT - Save it
288#endif
289 mr r3,r28 ; Get the pmap to insert into
290 mr r4,r31 ; Point to the mapping
291 bl EXT(mapInsert) ; Insert the mapping into the list
292
293#if INSTRUMENT
294 mfspr r4,pmc1 ; INSTRUMENT - saveinstr[19] - Take stamp before mapSearchFull
295 stw r4,0x6100+(19*16)+0x0(0) ; INSTRUMENT - Save it
296 mfspr r4,pmc2 ; INSTRUMENT - Get stamp
297 stw r4,0x6100+(19*16)+0x4(0) ; INSTRUMENT - Save it
298 mfspr r4,pmc3 ; INSTRUMENT - Get stamp
299 stw r4,0x6100+(19*16)+0x8(0) ; INSTRUMENT - Save it
300 mfspr r4,pmc4 ; INSTRUMENT - Get stamp
301 stw r4,0x6100+(19*16)+0xC(0) ; INSTRUMENT - Save it
302#endif
303
304 lhz r8,mpSpace(r31) ; Get the address space
305 mfsdr1 r7 ; Get the hash table base/bounds
306 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
307 andi. r0,r24,mpNest|mpBlock ; Is this a nest or block?
308
309 rlwimi r8,r8,14,4,17 ; Double address space
310 rlwinm r9,r30,20,16,31 ; Isolate the page number
311 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
312 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
313 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
314 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
315 addi r4,r4,1 ; Bump up the mapped page count
316 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
317 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
318 xor r9,r9,r10 ; Get the hash to the PTEG
319
320 bne-- hamDoneNP ; This is a block or nest, therefore, no physent...
321
322 bl mapPhysFindLock ; Go find and lock the physent
323
324 bt++ pf64Bitb,ham64 ; This is 64-bit...
325
326 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
327 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
328 slwi r9,r9,6 ; Make PTEG offset
329 ori r7,r7,0xFFC0 ; Stick in the bottom part
330 rlwinm r12,r11,0,0,25 ; Clean it up
331 and r9,r9,r7 ; Wrap offset into table
332 mr r4,r31 ; Set the link to install
333 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
334 stw r12,mpAlias+4(r31) ; Move to the mapping
335 bl mapPhyCSet32 ; Install the link
336 b hamDone ; Go finish up...
337
338 .align 5
1c79356b 339
55e303ae
A
340ham64: li r0,0xFF ; Get mask to clean up alias pointer
341 subfic r7,r7,46 ; Get number of leading zeros
342 eqv r4,r4,r4 ; Get all ones
343 ld r11,ppLink(r3) ; Get the alias chain pointer
344 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
345 srd r4,r4,r7 ; Get the wrap mask
346 sldi r9,r9,7 ; Change hash to PTEG offset
347 andc r11,r11,r0 ; Clean out the lock and flags
348 and r9,r9,r4 ; Wrap to PTEG
349 mr r4,r31
350 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
351 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
352
353 bl mapPhyCSet64 ; Install the link
354
355hamDone: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 356
55e303ae
A
357hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
358 bl sxlkUnlock ; Unlock the search list
1c79356b 359
55e303ae
A
360 mr r3,r31 ; Get the mapping pointer
361 bl mapDropBusy ; Drop the busy count
1c79356b 362
55e303ae
A
363 li r3,0 ; Set successful return
364 li r4,0 ; Set successful return
1c79356b 365
55e303ae 366hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
1c79356b 367
55e303ae
A
368 mtmsr r17 ; Restore enables/translation/etc.
369 isync
370 b hamReturnC ; Join common...
1c79356b 371
55e303ae
A
372hamR64: mtmsrd r17 ; Restore enables/translation/etc.
373 isync
1c79356b 374
55e303ae
A
375hamReturnC:
376#if INSTRUMENT
377 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[20] - Take stamp before mapSearchFull
378 stw r0,0x6100+(20*16)+0x0(0) ; INSTRUMENT - Save it
379 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
380 stw r0,0x6100+(20*16)+0x4(0) ; INSTRUMENT - Save it
381 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
382 stw r0,0x6100+(20*16)+0x8(0) ; INSTRUMENT - Save it
383 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
384 stw r0,0x6100+(20*16)+0xC(0) ; INSTRUMENT - Save it
385#endif
386 lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
387 lwz r17,FM_ARG0+0x00(r1) ; Save a register
388 lwz r18,FM_ARG0+0x04(r1) ; Save a register
389 lwz r19,FM_ARG0+0x08(r1) ; Save a register
390 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
391 mtlr r0 ; Restore the return
392 lwz r21,FM_ARG0+0x10(r1) ; Save a register
393 lwz r22,FM_ARG0+0x14(r1) ; Save a register
394 lwz r23,FM_ARG0+0x18(r1) ; Save a register
395 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
396 lwz r25,FM_ARG0+0x20(r1) ; Save a register
397 lwz r26,FM_ARG0+0x24(r1) ; Save a register
398 lwz r27,FM_ARG0+0x28(r1) ; Save a register
399 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
400 lwz r29,FM_ARG0+0x30(r1) ; Save a register
401 lwz r30,FM_ARG0+0x34(r1) ; Save a register
402 lwz r31,FM_ARG0+0x38(r1) ; Save a register
403 lwz r1,0(r1) ; Pop the stack
d7e50217 404
55e303ae 405 blr ; Leave...
d7e50217 406
de355530 407
de355530 408 .align 5
d7e50217 409
55e303ae
A
410hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
411 li r0,mpC|mpR ; Get a mask to turn off RC bits
412 lwz r23,mpFlags(r31) ; Get the requested flags
413 lwz r20,mpVAddr(r3) ; Get the overlay address
414 lwz r8,mpVAddr(r31) ; Get the requested address
415 lwz r21,mpVAddr+4(r3) ; Get the overlay address
416 lwz r9,mpVAddr+4(r31) ; Get the requested address
417 lhz r10,mpBSize(r3) ; Get the overlay length
418 lhz r11,mpBSize(r31) ; Get the requested length
419 lwz r24,mpPAddr(r3) ; Get the overlay physical address
420 lwz r25,mpPAddr(r31) ; Get the requested physical address
421 andc r21,r21,r0 ; Clear RC bits
422 andc r9,r9,r0 ; Clear RC bits
423
424 la r3,pmapSXlk(r28) ; Point to the pmap search lock
425 bl sxlkUnlock ; Unlock the search list
426
427 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
428 mr r3,r20 ; Save the top of the colliding address
429 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
430
431 bne++ hamRemv ; Removing, go say so so we help...
432
433 cmplw r20,r8 ; High part of vaddr the same?
434 cmplw cr1,r21,r9 ; Low part?
435 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
436
437 cmplw r10,r11 ; Size the same?
438 cmplw cr1,r24,r25 ; Physical address?
439 crand cr5_eq,cr5_eq,cr0_eq ; Remember
440 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
441
442 xor r23,r23,r22 ; Check for differences in flags
443 ori r23,r23,mpFIP ; "Fault in Progress" is ok to be different
444 xori r23,r23,mpFIP ; Force mpFIP off
445 rlwinm. r0,r23,0,mpSpecialb,mpListsb-1 ; See if any important flags are different
446 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
447 bf-- cr5_eq,hamReturn ; This is not the same, so we just return a collision...
448
449 ori r4,r4,mapRtMapDup ; Set duplicate
450 b hamReturn ; And leave...
451
452hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
453 b hamReturn ; Come back yall...
454
455 .align 5
456
457hamBadLock: li r3,0 ; Set lock time out error code
458 li r4,mapRtBadLk ; Set lock time out error code
459 b hamReturn ; Leave....
460
461
1c79356b 462
1c79356b
A
463
464
465/*
55e303ae 466 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
de355530 467 *
55e303ae
A
468 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
469 * a 64-bit quantity, it is a long long so it is in R4 and R5.
470 *
471 * We return the virtual address of the removed mapping as a
472 * R3.
1c79356b 473 *
55e303ae 474 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 475 *
55e303ae
A
476 * We disable translation and all interruptions here. This keeps is
477 * from having to worry about a deadlock due to having anything locked
478 * and needing it to process a fault.
1c79356b
A
479 *
480 * Note that this must be done with both interruptions off and VM off
481 *
55e303ae
A
482 * Remove mapping via pmap, regular page, no pte
483 *
484 * 1) lock pmap share
485 * 2) find mapping full path - finds all possible list previous elements
486 * 4) upgrade pmap to exclusive
487 * 3) bump mapping busy count
488 * 5) remove mapping from search list
489 * 6) unlock pmap
490 * 7) lock physent
491 * 8) remove from physent
492 * 9) unlock physent
493 * 10) drop mapping busy count
494 * 11) drain mapping busy count
495 *
496 *
497 * Remove mapping via pmap, regular page, with pte
498 *
499 * 1) lock pmap share
500 * 2) find mapping full path - finds all possible list previous elements
501 * 3) upgrade lock to exclusive
502 * 4) bump mapping busy count
503 * 5) lock PTEG
504 * 6) invalidate pte and tlbie
505 * 7) atomic merge rc into physent
506 * 8) unlock PTEG
507 * 9) remove mapping from search list
508 * 10) unlock pmap
509 * 11) lock physent
510 * 12) remove from physent
511 * 13) unlock physent
512 * 14) drop mapping busy count
513 * 15) drain mapping busy count
514 *
515 *
516 * Remove mapping via pmap, I/O or block
517 *
518 * 1) lock pmap share
519 * 2) find mapping full path - finds all possible list previous elements
520 * 3) upgrade lock to exclusive
521 * 4) bump mapping busy count
522 * 5) mark remove-in-progress
523 * 6) check and bump remove chunk cursor if needed
524 * 7) unlock pmap
525 * 8) if something to invalidate, go to step 11
526
527 * 9) drop busy
528 * 10) return with mapRtRemove to force higher level to call again
529
530 * 11) Lock PTEG
531 * 12) invalidate ptes, no tlbie
532 * 13) unlock PTEG
533 * 14) repeat 11 - 13 for all pages in chunk
534 * 15) if not final chunk, go to step 9
535 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
536 * 17) lock pmap share
537 * 18) find mapping full path - finds all possible list previous elements
538 * 19) upgrade lock to exclusive
539 * 20) remove mapping from search list
540 * 21) drop mapping busy count
541 * 22) drain mapping busy count
542 *
1c79356b
A
543 */
544
545 .align 5
546 .globl EXT(hw_rem_map)
547
548LEXT(hw_rem_map)
1c79356b 549
55e303ae
A
550;
551; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
552; THE HW_PURGE_* ROUTINES ALSO
553;
1c79356b 554
55e303ae
A
555#define hrmStackSize ((31-15+1)*4)+4
556 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
557 mflr r0 ; Save the link register
558 stw r15,FM_ARG0+0x00(r1) ; Save a register
559 stw r16,FM_ARG0+0x04(r1) ; Save a register
560 stw r17,FM_ARG0+0x08(r1) ; Save a register
561 stw r18,FM_ARG0+0x0C(r1) ; Save a register
562 stw r19,FM_ARG0+0x10(r1) ; Save a register
563 mfsprg r19,2 ; Get feature flags
564 stw r20,FM_ARG0+0x14(r1) ; Save a register
565 stw r21,FM_ARG0+0x18(r1) ; Save a register
566 mtcrf 0x02,r19 ; move pf64Bit cr6
567 stw r22,FM_ARG0+0x1C(r1) ; Save a register
568 stw r23,FM_ARG0+0x20(r1) ; Save a register
569 stw r24,FM_ARG0+0x24(r1) ; Save a register
570 stw r25,FM_ARG0+0x28(r1) ; Save a register
571 stw r26,FM_ARG0+0x2C(r1) ; Save a register
572 stw r27,FM_ARG0+0x30(r1) ; Save a register
573 stw r28,FM_ARG0+0x34(r1) ; Save a register
574 stw r29,FM_ARG0+0x38(r1) ; Save a register
575 stw r30,FM_ARG0+0x3C(r1) ; Save a register
576 stw r31,FM_ARG0+0x40(r1) ; Save a register
577 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
578 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
579
580 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
581 lwz r9,pmapvr+4(r3) ; Get conversion mask
582 b hrmSF1x ; Done...
583
584hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
585
586hrmSF1x:
587 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
588
589 xor r28,r3,r9 ; Convert the pmap to physical addressing
1c79356b 590
55e303ae
A
591;
592; Here is where we join in from the hw_purge_* routines
593;
1c79356b 594
55e303ae 595hrmJoin: mfsprg r19,2 ; Get feature flags again (for alternate entries)
1c79356b 596
55e303ae
A
597 mr r17,r11 ; Save the MSR
598 mr r29,r4 ; Top half of vaddr
599 mr r30,r5 ; Bottom half of vaddr
1c79356b 600
55e303ae
A
601 la r3,pmapSXlk(r28) ; Point to the pmap search lock
602 bl sxlkShared ; Go get a shared lock on the mapping lists
603 mr. r3,r3 ; Did we get the lock?
604 bne-- hrmBadLock ; Nope...
1c79356b 605
55e303ae
A
606;
607; Note that we do a full search (i.e., no shortcut level skips, etc.)
608; here so that we will know the previous elements so we can dequeue them
609; later. Note: we get back mpFlags in R7.
610;
d7e50217 611
55e303ae
A
612 mr r3,r28 ; Pass in pmap to search
613 mr r4,r29 ; High order of address
614 mr r5,r30 ; Low order of address
615 bl EXT(mapSearchFull) ; Go see if we can find it
616
617 andi. r0,r7,lo16(mpPerm|mpSpecial|mpNest) ; Is this nested, special, or a perm mapping?
618 mr r20,r7 ; Remember mpFlags
619 rlwinm r0,r7,0,mpRemovableb,mpRemovableb ; Are we allowed to remove it?
620 crmove cr5_eq,cr0_eq ; Remember if we should remove this
621 mr. r31,r3 ; Did we? (And remember mapping address for later)
622 cmplwi cr1,r0,0 ; Are we allowed to remove?
623 mr r15,r4 ; Save top of next vaddr
624 crorc cr5_eq,cr5_eq,cr1_eq ; cr5_eq is true if this is not removable
625 mr r16,r5 ; Save bottom of next vaddr
626 beq hrmNotFound ; Nope, not found...
627
628 bf-- cr5_eq,hrmPerm ; This one can't be removed...
629;
630; Here we try to promote to an exclusive lock. This will fail if someone else
631; has it shared.
632;
1c79356b 633
55e303ae
A
634 la r3,pmapSXlk(r28) ; Point to the pmap search lock
635 bl sxlkPromote ; Try to promote shared to exclusive
636 mr. r3,r3 ; Could we?
637 beq++ hrmGotX ; Yeah...
1c79356b 638
55e303ae
A
639;
640; Since we could not promote our lock, we need to convert to it.
641; That means that we drop the shared lock and wait to get it
642; exclusive. Since we release the lock, we need to do the look up
643; again.
644;
645
646 la r3,pmapSXlk(r28) ; Point to the pmap search lock
647 bl sxlkConvert ; Convert shared to exclusive
648 mr. r3,r3 ; Could we?
649 bne-- hrmBadLock ; Nope, we must have timed out...
650
651 mr r3,r28 ; Pass in pmap to search
652 mr r4,r29 ; High order of address
653 mr r5,r30 ; Low order of address
654 bl EXT(mapSearchFull) ; Rescan the list
655
656 andi. r0,r7,lo16(mpPerm|mpSpecial|mpNest) ; Is this nested, special, or a perm mapping?
657 rlwinm r0,r7,0,mpRemovableb,mpRemovableb ; Are we allowed to remove it?
658 crmove cr5_eq,cr0_eq ; Remember if we should remove this
659 mr. r31,r3 ; Did we lose it when we converted?
660 cmplwi cr1,r0,0 ; Are we allowed to remove?
661 mr r20,r7 ; Remember mpFlags
662 crorc cr5_eq,cr5_eq,cr1_eq ; cr5_eq is true if this is not removable
663 mr r15,r4 ; Save top of next vaddr
664 mr r16,r5 ; Save bottom of next vaddr
665 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
de355530 666
55e303ae
A
667 bf-- cr5_eq,hrmPerm ; This one can't be removed...
668
669;
670; We have an exclusive lock on the mapping chain. And we
671; also have the busy count bumped in the mapping so it can
672; not vanish on us.
673;
674
675hrmGotX: mr r3,r31 ; Get the mapping
676 bl mapBumpBusy ; Bump up the busy count
1c79356b 677
55e303ae
A
678;
679; Invalidate any PTEs associated with this
680; mapping (more than one if a block) and accumulate the reference
681; and change bits.
682;
683; Here is also where we need to split 32- and 64-bit processing
684;
1c79356b 685
55e303ae
A
686 lwz r21,mpPte(r31) ; Grab the offset to the PTE
687 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
688 mfsdr1 r29 ; Get the hash table base and size
689 rlwinm r0,r20,0,mpBlockb,mpBlockb ; Is this a block mapping?
690 andi. r2,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping?
691 cmplwi cr5,r0,0 ; Remember if this is a block mapping
692 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
693 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
694 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
695 rlwinm r21,r21,0,0,30 ; Clear out valid bit
696 crorc cr0_eq,cr1_eq,cr0_eq ; No need to look at PTE if none or a special mapping
697 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
698 andc r29,r29,r2 ; Clean up hash table base
699 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
700 mr r30,r23 ; Move the now merged vaddr to the correct register
701 add r26,r29,r21 ; Point to the PTEG slot
702
703 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
704
705 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
706 bne- cr5,hrmBlock32 ; Go treat block specially...
707 subfic r9,r9,-4 ; Get the PCA entry offset
708 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
709 add r7,r9,r29 ; Point to the PCA slot
1c79356b 710
55e303ae
A
711
712 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
713
714 lwz r21,mpPte(r31) ; Get the quick pointer again
715 lwz r5,0(r26) ; Get the top of PTE
1c79356b 716
55e303ae
A
717 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
718 rlwinm r21,r21,0,0,30 ; Clear out valid bit
719 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
720 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
721 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
1c79356b 722
55e303ae 723 stw r5,0(r26) ; Invalidate the PTE
1c79356b 724
55e303ae 725 li r9,tlbieLock ; Get the TLBIE lock
1c79356b 726
55e303ae
A
727 sync ; Make sure the invalid PTE is actually in memory
728
729hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
730 mr. r5,r5 ; Is it locked?
731 li r5,1 ; Get locked indicator
732 bne- hrmPtlb32 ; It is locked, go spin...
733 stwcx. r5,0,r9 ; Try to get it
734 bne- hrmPtlb32 ; We was beat...
735
736 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
737
738 tlbie r30 ; Invalidate it all corresponding TLB entries
1c79356b 739
55e303ae 740 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
de355530 741
55e303ae
A
742 eieio ; Make sure that the tlbie happens first
743 tlbsync ; Wait for everyone to catch up
744 sync ; Make sure of it all
745
746hrmNTlbs: li r0,0 ; Clear this
747 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
748 stw r0,tlbieLock(0) ; Clear the tlbie lock
749 lis r0,0x8000 ; Get bit for slot 0
750 eieio ; Make sure those RC bit have been stashed in PTE
751
752 srw r0,r0,r2 ; Get the allocation hash mask
753 lwz r22,4(r26) ; Get the latest reference and change bits
754 or r6,r6,r0 ; Show that this slot is free
755
756hrmUlckPCA32:
757 eieio ; Make sure all updates come first
758 stw r6,0(r7) ; Unlock the PTEG
759
760;
761; Now, it is time to remove the mapping and unlock the chain.
762; But first, we need to make sure no one else is using this
763; mapping so we drain the busy now
764;
9bccf70c 765
55e303ae
A
766hrmPysDQ32: mr r3,r31 ; Point to the mapping
767 bl mapDrainBusy ; Go wait until mapping is unused
d7e50217 768
55e303ae
A
769 mr r3,r28 ; Get the pmap to remove from
770 mr r4,r31 ; Point to the mapping
771 bl EXT(mapRemove) ; Remove the mapping from the list
d7e50217 772
d7e50217 773
55e303ae
A
774 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
775 andi. r0,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping?
776 cmplwi cr1,r0,0 ; Special thingie?
777 la r3,pmapSXlk(r28) ; Point to the pmap search lock
778 subi r4,r4,1 ; Drop down the mapped page count
779 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
780 bl sxlkUnlock ; Unlock the search list
781
782 bne-- cr1,hrmRetn32 ; This one has no real memory associated with it so we are done...
1c79356b 783
55e303ae 784 bl mapPhysFindLock ; Go find and lock the physent
de355530 785
55e303ae
A
786 lwz r9,ppLink+4(r3) ; Get first mapping
787
788 mr r4,r22 ; Get the RC bits we just got
789 bl mapPhysMerge ; Go merge the RC bits
790
791 rlwinm r9,r9,0,0,25 ; Clear the flags from the mapping pointer
d7e50217 792
55e303ae
A
793 cmplw r9,r31 ; Are we the first on the list?
794 bne- hrmNot1st ; Nope...
d7e50217 795
55e303ae
A
796 li r9,0 ; Get a 0
797 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
798 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
799 bl mapPhyCSet32 ; Go set the physent link and preserve flags
d7e50217 800
55e303ae 801 b hrmPhyDQd ; Join up and unlock it all...
d7e50217 802
55e303ae 803 .align 5
d7e50217 804
55e303ae
A
805hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
806 and r8,r8,r31 ; Get back to a page
807 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
de355530 808
55e303ae
A
809 la r3,pmapSXlk(r28) ; Point to the pmap search lock
810 bl sxlkUnlock ; Unlock the search list
811
812 xor r3,r31,r8 ; Flip mapping address to virtual
813 ori r3,r3,mapRtPerm ; Set permanent mapping error
814 b hrmErRtn
815
816hrmBadLock: li r3,mapRtBadLk ; Set bad lock
817 b hrmErRtn
818
819hrmEndInSight:
820 la r3,pmapSXlk(r28) ; Point to the pmap search lock
821 bl sxlkUnlock ; Unlock the search list
822
823hrmDoneChunk:
824 mr r3,r31 ; Point to the mapping
825 bl mapDropBusy ; Drop the busy here since we need to come back
826 li r3,mapRtRemove ; Say we are still removing this
827 b hrmErRtn
1c79356b 828
55e303ae
A
829 .align 5
830
831hrmNotFound:
832 la r3,pmapSXlk(r28) ; Point to the pmap search lock
833 bl sxlkUnlock ; Unlock the search list
834 li r3,0 ; Make sure we know we did not find it
1c79356b 835
55e303ae 836hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
1c79356b 837
55e303ae
A
838 mtmsr r17 ; Restore enables/translation/etc.
839 isync
840 b hrmRetnCmn ; Join the common return code...
de355530 841
55e303ae
A
842hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
843 isync
844 b hrmRetnCmn ; Join the common return code...
1c79356b
A
845
846 .align 5
1c79356b 847
55e303ae
A
848hrmNot1st: mr. r8,r9 ; Remember and test current node
849 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
850 lwz r9,mpAlias+4(r9) ; Chain to the next
851 cmplw r9,r31 ; Is this us?
852 bne- hrmNot1st ; Not us...
853
854 lwz r9,mpAlias+4(r9) ; Get our forward pointer
855 stw r9,mpAlias+4(r8) ; Unchain us
d7e50217 856
55e303ae
A
857 nop ; For alignment
858
859hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 860
55e303ae
A
861hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
862 mr r3,r31 ; Copy the pointer to the mapping
863 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
864 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 865
55e303ae 866 xor r3,r31,r8 ; Flip mapping address to virtual
1c79356b 867
55e303ae
A
868 mtmsr r17 ; Restore enables/translation/etc.
869 isync
1c79356b 870
55e303ae
A
871hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
872 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
873 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
874 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
875 mr. r6,r6 ; Should we pass back the "next" vaddr?
876 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
877 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
878 mtlr r0 ; Restore the return
879
880 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
881 beq hrmNoNextAdr ; Do not pass back the next vaddr...
882 stw r15,0(r6) ; Pass back the top of the next vaddr
883 stw r16,4(r6) ; Pass back the bottom of the next vaddr
884
885hrmNoNextAdr:
886 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
887 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
888 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
889 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
890 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
891 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
892 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
893 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
894 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
895 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
896 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
897 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
898 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
899 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
900 lwz r1,0(r1) ; Pop the stack
901 blr ; Leave...
902
903;
904; Here is where we come when all is lost. Somehow, we failed a mapping function
905; that must work... All hope is gone. Alas, we die.......
906;
d7e50217 907
55e303ae
A
908hrmPanic: lis r0,hi16(Choke) ; System abend
909 ori r0,r0,lo16(Choke) ; System abend
910 li r3,failMapping ; Show that we failed some kind of mapping thing
911 sc
1c79356b
A
912
913
55e303ae
A
914;
915; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
916; in the range. Then, if we did not finish, return a code indicating that we need to
917; be called again. Eventually, we will finish and then, we will do a TLBIE for each
918; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
919;
920; A potential speed up is that we stop the invalidate loop once we have walked through
921; the hash table once. This really is not worth the trouble because we need to have
922; mapped 1/2 of physical RAM in an individual block. Way unlikely.
923;
924; We should rethink this and see if we think it will be faster to check PTE and
925; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
926;
1c79356b 927
55e303ae 928 .align 5
1c79356b 929
55e303ae
A
930hrmBlock32:
931 lhz r23,mpSpace(r31) ; Get the address space hash
932 lhz r25,mpBSize(r31) ; Get the number of pages in block
933 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
934 ori r0,r20,mpRIP ; Turn on the remove in progress flag
935 mfsdr1 r29 ; Get the hash table base and size
936 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
937 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
938 sub r4,r25,r9 ; Get number of pages left
939 cmplw cr1,r9,r25 ; Have we already hit the end?
940 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
941 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
942 rlwinm r26,r29,16,7,15 ; Get the hash table size
943 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
944 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
945 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
946 cmpwi cr7,r2,0 ; Remember if we have finished
947 slwi r0,r9,12 ; Make cursor into page offset
948 or r24,r24,r23 ; Get full hash
949 and r4,r4,r2 ; If more than a chunk, bring this back to 0
950 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
951 add r27,r27,r0 ; Adjust vaddr to start of current chunk
952 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
953
954 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
955
956 la r3,pmapSXlk(r28) ; Point to the pmap search lock
957 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
958 bl sxlkUnlock ; Unlock the search list while we are invalidating
959
960 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
961 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
962 xor r24,r24,r8 ; Get the proper VSID
963 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
964 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
965 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
966 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
967 add r22,r22,r30 ; Get end address (in PTEG units)
968
969hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
970 xor r23,r23,r24 ; Hash it
971 and r23,r23,r26 ; Wrap it into the table
972 rlwinm r3,r23,28,4,29 ; Change to PCA offset
973 subfic r3,r3,-4 ; Get the PCA entry offset
974 add r7,r3,r29 ; Point to the PCA slot
975 cmplw cr5,r30,r22 ; Check if we reached the end of the range
976 addi r30,r30,64 ; bump to the next vaddr
977
978 bl mapLockPteg ; Lock the PTEG
979
980 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
981 add r5,r23,r29 ; Point to the PTEG
982 li r0,0 ; Set an invalid PTE value
983 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
984 mtcrf 0x80,r4 ; Set CRs to select PTE slots
985 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 986
55e303ae
A
987 bf 0,hrmSlot0 ; No autogen here
988 stw r0,0x00(r5) ; Invalidate PTE
1c79356b 989
55e303ae
A
990hrmSlot0: bf 1,hrmSlot1 ; No autogen here
991 stw r0,0x08(r5) ; Invalidate PTE
1c79356b 992
55e303ae
A
993hrmSlot1: bf 2,hrmSlot2 ; No autogen here
994 stw r0,0x10(r5) ; Invalidate PTE
1c79356b 995
55e303ae
A
996hrmSlot2: bf 3,hrmSlot3 ; No autogen here
997 stw r0,0x18(r5) ; Invalidate PTE
1c79356b 998
55e303ae
A
999hrmSlot3: bf 4,hrmSlot4 ; No autogen here
1000 stw r0,0x20(r5) ; Invalidate PTE
1c79356b 1001
55e303ae
A
1002hrmSlot4: bf 5,hrmSlot5 ; No autogen here
1003 stw r0,0x28(r5) ; Invalidate PTE
1c79356b 1004
55e303ae
A
1005hrmSlot5: bf 6,hrmSlot6 ; No autogen here
1006 stw r0,0x30(r5) ; Invalidate PTE
1c79356b 1007
55e303ae
A
1008hrmSlot6: bf 7,hrmSlot7 ; No autogen here
1009 stw r0,0x38(r5) ; Invalidate PTE
1c79356b 1010
55e303ae
A
1011hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1012 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1013 andc r6,r6,r0 ; Turn off all the old autogen bits
9bccf70c 1014
55e303ae 1015hrmBNone32: eieio ; Make sure all updates come first
9bccf70c 1016
55e303ae 1017 stw r6,0(r7) ; Unlock and set the PCA
1c79356b 1018
55e303ae 1019 bne+ cr5,hrmBInv32 ; Go invalidate the next...
1c79356b 1020
55e303ae 1021 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1c79356b 1022
55e303ae
A
1023 mr r3,r31 ; Copy the pointer to the mapping
1024 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1c79356b 1025
55e303ae
A
1026 sync ; Make sure memory is consistent
1027
1028 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
1029 li r6,63 ; Assume full invalidate for now
1030 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1031 andc r6,r6,r5 ; Clear max if we have less to do
1032 and r5,r25,r5 ; Clear count if we have more than max
1033 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1034 li r7,tlbieLock ; Get the TLBIE lock
1035 or r5,r5,r6 ; Get number of TLBIEs needed
1036
1037hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1038 mr. r2,r2 ; Is it locked?
1039 li r2,1 ; Get our lock value
1040 bne- hrmBTLBlck ; It is locked, go wait...
1041 stwcx. r2,0,r7 ; Try to get it
1042 bne- hrmBTLBlck ; We was beat...
1043
1044hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1045 tlbie r27 ; Invalidate it everywhere
1046 addi r27,r27,0x1000 ; Up to the next page
1047 bge+ hrmBTLBi ; Make sure we have done it all...
1048
1049 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1050 li r2,0 ; Lock clear value
1051
1052 sync ; Make sure all is quiet
1053 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1054
1055 eieio ; Make sure that the tlbie happens first
1056 tlbsync ; Wait for everyone to catch up
1057 sync ; Wait for quiet again
1058
1059hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1060
1061 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1062 bl sxlkShared ; Go get a shared lock on the mapping lists
1063 mr. r3,r3 ; Did we get the lock?
1064 bne- hrmPanic ; Nope...
1065
1066 lwz r4,mpVAddr(r31) ; High order of address
1067 lwz r5,mpVAddr+4(r31) ; Low order of address
1068 mr r3,r28 ; Pass in pmap to search
1069 mr r29,r4 ; Save this in case we need it (only promote fails)
1070 mr r30,r5 ; Save this in case we need it (only promote fails)
1071 bl EXT(mapSearchFull) ; Go see if we can find it
1072
1073 mr. r3,r3 ; Did we? (And remember mapping address for later)
1074 mr r15,r4 ; Save top of next vaddr
1075 mr r16,r5 ; Save bottom of next vaddr
1076 beq- hrmPanic ; Nope, not found...
1077
1078 cmplw r3,r31 ; Same mapping?
1079 bne- hrmPanic ; Not good...
1080
1081 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1082 bl sxlkPromote ; Try to promote shared to exclusive
1083 mr. r3,r3 ; Could we?
1084 mr r3,r31 ; Restore the mapping pointer
1085 beq+ hrmBDone1 ; Yeah...
1086
1087 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1088 bl sxlkConvert ; Convert shared to exclusive
1089 mr. r3,r3 ; Could we?
1090 bne-- hrmPanic ; Nope, we must have timed out...
1091
1092 mr r3,r28 ; Pass in pmap to search
1093 mr r4,r29 ; High order of address
1094 mr r5,r30 ; Low order of address
1095 bl EXT(mapSearchFull) ; Rescan the list
1096
1097 mr. r3,r3 ; Did we lose it when we converted?
1098 mr r15,r4 ; Save top of next vaddr
1099 mr r16,r5 ; Save bottom of next vaddr
1100 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1101
1102hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1103
1104 mr r3,r28 ; Get the pmap to remove from
1105 mr r4,r31 ; Point to the mapping
1106 bl EXT(mapRemove) ; Remove the mapping from the list
1107
1108 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1109 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1110 subi r4,r4,1 ; Drop down the mapped page count
1111 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1112 bl sxlkUnlock ; Unlock the search list
1113
1114 b hrmRetn32 ; We are all done, get out...
1c79356b 1115
55e303ae
A
1116;
1117; Here we handle the 64-bit version of hw_rem_map
1118;
1119
1c79356b 1120 .align 5
55e303ae
A
1121
1122hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
1123 bne-- cr5,hrmBlock64 ; Go treat block specially...
1124 subfic r9,r9,-4 ; Get the PCA entry offset
1125 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1126 add r7,r9,r29 ; Point to the PCA slot
1127
1128 bl mapLockPteg ; Go lock up the PTEG
1129
1130 lwz r21,mpPte(r31) ; Get the quick pointer again
1131 ld r5,0(r26) ; Get the top of PTE
1132
1133 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
1134 rlwinm r21,r21,0,0,30 ; Clear out valid bit
1135 sldi r23,r5,16 ; Shift AVPN up to EA format
1136 rldicr r5,r5,0,62 ; Clear the valid bit
1137 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1138 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1139 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1140
1141 std r5,0(r26) ; Invalidate the PTE
1142
1143 li r9,tlbieLock ; Get the TLBIE lock
1144
1145 sync ; Make sure the invalid PTE is actually in memory
1146
1147hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1148 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1149 mr. r5,r5 ; Is it locked?
1150 li r5,1 ; Get locked indicator
1151 bne-- hrmPtlb64w ; It is locked, go spin...
1152 stwcx. r5,0,r9 ; Try to get it
1153 bne-- hrmPtlb64 ; We was beat...
1154
1155 tlbie r23 ; Invalidate it all corresponding TLB entries
1c79356b 1156
55e303ae
A
1157 eieio ; Make sure that the tlbie happens first
1158 tlbsync ; Wait for everyone to catch up
1159 isync
1160
1161 ptesync ; Make sure of it all
1162 li r0,0 ; Clear this
1163 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1164 stw r0,tlbieLock(0) ; Clear the tlbie lock
1165 oris r0,r0,0x8000 ; Assume slot 0
1166 eieio ; Make sure those RC bit have been stashed in PTE
1167 srw r0,r0,r2 ; Get slot mask to deallocate
d7e50217 1168
55e303ae
A
1169 lwz r22,12(r26) ; Get the latest reference and change bits
1170 or r6,r6,r0 ; Make the guy we killed free
de355530 1171
55e303ae
A
1172hrmUlckPCA64:
1173 eieio ; Make sure all updates come first
1174
1175 stw r6,0(r7) ; Unlock and change the PCA
1176
1177hrmPysDQ64: mr r3,r31 ; Point to the mapping
1178 bl mapDrainBusy ; Go wait until mapping is unused
1179
1180 mr r3,r28 ; Get the pmap to insert into
1181 mr r4,r31 ; Point to the mapping
1182 bl EXT(mapRemove) ; Remove the mapping from the list
1183
1184 andi. r0,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping?
1185 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1186 cmplwi cr1,r0,0 ; Special thingie?
1187 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1188 subi r4,r4,1 ; Drop down the mapped page count
1189 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1190 bl sxlkUnlock ; Unlock the search list
1191
1192 bne-- cr1,hrmRetn64 ; This one has no real memory associated with it so we are done...
1c79356b 1193
55e303ae 1194 bl mapPhysFindLock ; Go find and lock the physent
1c79356b 1195
55e303ae
A
1196 li r0,0xFF ; Get mask to clean up mapping pointer
1197 ld r9,ppLink(r3) ; Get first mapping
1198 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1199 mr r4,r22 ; Get the RC bits we just got
1c79356b 1200
55e303ae 1201 bl mapPhysMerge ; Go merge the RC bits
d7e50217 1202
55e303ae 1203 andc r9,r9,r0 ; Clean up the mapping pointer
d7e50217 1204
55e303ae
A
1205 cmpld r9,r31 ; Are we the first on the list?
1206 bne- hrmNot1st64 ; Nope...
1c79356b 1207
55e303ae
A
1208 li r9,0 ; Get a 0
1209 ld r4,mpAlias(r31) ; Get our forward pointer
1210
1211 std r9,mpAlias(r31) ; Make sure we are off the chain
1212 bl mapPhyCSet64 ; Go set the physent link and preserve flags
de355530 1213
55e303ae
A
1214 b hrmPhyDQd64 ; Join up and unlock it all...
1215
1216hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1217 stwcx. r5,0,r5 ; Clear the pending reservation
de355530 1218
d7e50217 1219
55e303ae
A
1220hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1221 mr. r5,r5 ; is it locked?
1222 beq++ hrmPtlb64 ; Nope...
1223 b hrmPtlb64x ; Sniff some more...
1224
1225 .align 5
1226
1227hrmNot1st64:
1228 mr. r8,r9 ; Remember and test current node
1229 beq- hrmNotFound ; Could not find our node...
1230 ld r9,mpAlias(r9) ; Chain to the next
1231 cmpld r9,r31 ; Is this us?
1232 bne- hrmNot1st64 ; Not us...
1233
1234 ld r9,mpAlias(r9) ; Get our forward pointer
1235 std r9,mpAlias(r8) ; Unchain us
1236
1237 nop ; For alignment
1238
1239hrmPhyDQd64:
1240 bl mapPhysUnlock ; Unlock the physent chain
1c79356b 1241
55e303ae
A
1242hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1243 mr r3,r31 ; Copy the pointer to the mapping
1244 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1245 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 1246
55e303ae 1247 xor r3,r31,r8 ; Flip mapping address to virtual
d7e50217 1248
55e303ae 1249 mtmsrd r17 ; Restore enables/translation/etc.
de355530 1250 isync
55e303ae
A
1251
1252 b hrmRetnCmn ; Join the common return path...
1c79356b 1253
1c79356b 1254
55e303ae
A
1255;
1256; Check hrmBlock32 for comments.
1257;
1c79356b 1258
de355530 1259 .align 5
55e303ae
A
1260
1261hrmBlock64:
1262 lhz r24,mpSpace(r31) ; Get the address space hash
1263 lhz r25,mpBSize(r31) ; Get the number of pages in block
1264 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
1265 ori r0,r20,mpRIP ; Turn on the remove in progress flag
1266 mfsdr1 r29 ; Get the hash table base and size
1267 ld r27,mpVAddr(r31) ; Get the base vaddr
1268 rlwinm r5,r29,0,27,31 ; Isolate the size
1269 sub r4,r25,r9 ; Get number of pages left
1270 cmplw cr1,r9,r25 ; Have we already hit the end?
1271 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1272 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1273 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1274 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1275 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1276 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1277 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1278 srdi r27,r27,12 ; Change address into page index
1279 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1280 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1281
1282 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1283
1284 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1285 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1286 bl sxlkUnlock ; Unlock the search list while we are invalidating
1287
1288 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1289 eqv r26,r26,r26 ; Get all foxes here
1290 rldimi r24,r24,28,8 ; Make a couple copies up higher
1291 rldicr r29,r29,0,47 ; Isolate just the hash table base
1292 subfic r5,r5,46 ; Get number of leading zeros
1293 srd r26,r26,r5 ; Shift the size bits over
1294 mr r30,r27 ; Get start of chunk to invalidate
1295 rldicr r26,r26,0,56 ; Make length in PTEG units
1296 add r22,r4,r30 ; Get end page number
1297
1298hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1299 rldicr r0,r0,0,49 ; Clean all but segment portion
1300 rlwinm r2,r30,0,16,31 ; Get the current page index
1301 xor r0,r0,r24 ; Form VSID
1302 xor r8,r2,r0 ; Hash the vaddr
1303 sldi r8,r8,7 ; Make into PTEG offset
1304 and r23,r8,r26 ; Wrap into the hash table
1305 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1306 subfic r3,r3,-4 ; Get the PCA entry offset
1307 add r7,r3,r29 ; Point to the PCA slot
1308
1309 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1310
1311 bl mapLockPteg ; Lock the PTEG
1312
1313 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1314 add r5,r23,r29 ; Point to the PTEG
1315 li r0,0 ; Set an invalid PTE value
1316 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1317 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1318 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 1319
1c79356b 1320
55e303ae
A
1321 bf 0,hrmSlot0s ; No autogen here
1322 std r0,0x00(r5) ; Invalidate PTE
1c79356b 1323
55e303ae
A
1324hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1325 std r0,0x10(r5) ; Invalidate PTE
1c79356b 1326
55e303ae
A
1327hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1328 std r0,0x20(r5) ; Invalidate PTE
d7e50217 1329
55e303ae
A
1330hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1331 std r0,0x30(r5) ; Invalidate PTE
d7e50217 1332
55e303ae
A
1333hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1334 std r0,0x40(r5) ; Invalidate PTE
d7e50217 1335
55e303ae
A
1336hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1337 std r0,0x50(r5) ; Invalidate PTE
d7e50217 1338
55e303ae
A
1339hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1340 std r0,0x60(r5) ; Invalidate PTE
d7e50217 1341
55e303ae
A
1342hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1343 std r0,0x70(r5) ; Invalidate PTE
d7e50217 1344
55e303ae
A
1345hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1346 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1347 andc r6,r6,r0 ; Turn off all the old autogen bits
1348
1349hrmBNone64: eieio ; Make sure all updates come first
1350 stw r6,0(r7) ; Unlock and set the PCA
1351
1352 addi r30,r30,1 ; bump to the next PTEG
1353 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1354
1355 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1356
1357 mr r3,r31 ; Copy the pointer to the mapping
1358 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1359
1360 sync ; Make sure memory is consistent
1361
1362 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1363 li r6,255 ; Assume full invalidate for now
1364 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1365 andc r6,r6,r5 ; Clear max if we have less to do
1366 and r5,r25,r5 ; Clear count if we have more than max
1367 sldi r24,r24,28 ; Get the full XOR value over to segment position
1368 ld r27,mpVAddr(r31) ; Get the base vaddr
1369 li r7,tlbieLock ; Get the TLBIE lock
1370 or r5,r5,r6 ; Get number of TLBIEs needed
1c79356b 1371
55e303ae
A
1372hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1373 mr. r2,r2 ; Is it locked?
1374 li r2,1 ; Get our lock value
1375 bne-- hrmBTLBlcm ; It is locked, go wait...
1376 stwcx. r2,0,r7 ; Try to get it
1377 bne-- hrmBTLBlcl ; We was beat...
1378
1379hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1380 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1381 addic. r5,r5,-1 ; See if we did them all
1382 xor r2,r2,r24 ; Make the VSID
1383 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1384 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1c79356b 1385
55e303ae
A
1386 tlbie r2 ; Invalidate it everywhere
1387 addi r27,r27,0x1000 ; Up to the next page
1388 bge++ hrmBTLBj ; Make sure we have done it all...
1c79356b 1389
55e303ae 1390 sync ; Make sure all is quiet
1c79356b 1391
55e303ae
A
1392 eieio ; Make sure that the tlbie happens first
1393 tlbsync ; wait for everyone to catch up
150bd074 1394 isync
1c79356b 1395
55e303ae 1396 li r2,0 ; Lock clear value
d7e50217 1397
55e303ae
A
1398 ptesync ; Wait for quiet again
1399 sync ; Make sure that is done
1400
1401 stw r2,tlbieLock(0) ; Clear the tlbie lock
1402
1403 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1404 bl sxlkShared ; Go get a shared lock on the mapping lists
1405 mr. r3,r3 ; Did we get the lock?
1406 bne- hrmPanic ; Nope...
1407
1408 lwz r4,mpVAddr(r31) ; High order of address
1409 lwz r5,mpVAddr+4(r31) ; Low order of address
1410 mr r3,r28 ; Pass in pmap to search
1411 mr r29,r4 ; Save this in case we need it (only promote fails)
1412 mr r30,r5 ; Save this in case we need it (only promote fails)
1413 bl EXT(mapSearchFull) ; Go see if we can find it
1414
1415 mr. r3,r3 ; Did we? (And remember mapping address for later)
1416 mr r15,r4 ; Save top of next vaddr
1417 mr r16,r5 ; Save bottom of next vaddr
1418 beq- hrmPanic ; Nope, not found...
1419
1420 cmpld r3,r31 ; Same mapping?
1421 bne- hrmPanic ; Not good...
1422
1423 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1424 bl sxlkPromote ; Try to promote shared to exclusive
1425 mr. r3,r3 ; Could we?
1426 mr r3,r31 ; Restore the mapping pointer
1427 beq+ hrmBDone2 ; Yeah...
1428
1429 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1430 bl sxlkConvert ; Convert shared to exclusive
1431 mr. r3,r3 ; Could we?
1432 bne-- hrmPanic ; Nope, we must have timed out...
1433
1434 mr r3,r28 ; Pass in pmap to search
1435 mr r4,r29 ; High order of address
1436 mr r5,r30 ; Low order of address
1437 bl EXT(mapSearchFull) ; Rescan the list
1438
1439 mr. r3,r3 ; Did we lose it when we converted?
1440 mr r15,r4 ; Save top of next vaddr
1441 mr r16,r5 ; Save bottom of next vaddr
1442 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1443
1444hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1445
1446 mr r3,r28 ; Get the pmap to remove from
1447 mr r4,r31 ; Point to the mapping
1448 bl EXT(mapRemove) ; Remove the mapping from the list
1449
1450 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1451 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1452 subi r4,r4,1 ; Drop down the mapped page count
1453 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1454 bl sxlkUnlock ; Unlock the search list
1455
1456 b hrmRetn64 ; We are all done, get out...
1457
1458hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1459 stwcx. r2,0,r2 ; Unreserve it
1460
1461hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1462 mr. r2,r2 ; Is it held?
1463 beq++ hrmBTLBlcl ; Nope...
1464 b hrmBTLBlcn ; Yeah...
1c79356b 1465
1c79356b
A
1466
1467
1468/*
55e303ae 1469 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1c79356b 1470 *
55e303ae 1471 * Upon entry, R3 contains a pointer to a physent.
1c79356b 1472 *
55e303ae
A
1473 * This function removes the first mapping from a physical entry
1474 * alias list. It locks the list, extracts the vaddr and pmap from
1475 * the first entry. It then jumps into the hw_rem_map function.
1476 * NOTE: since we jump into rem_map, we need to set up the stack
1477 * identically. Also, we set the next parm to 0 so we do not
1478 * try to save a next vaddr.
1479 *
1480 * We return the virtual address of the removed mapping as a
1481 * R3.
de355530 1482 *
55e303ae 1483 * Note that this is designed to be called from 32-bit mode with a stack.
de355530 1484 *
55e303ae
A
1485 * We disable translation and all interruptions here. This keeps is
1486 * from having to worry about a deadlock due to having anything locked
1487 * and needing it to process a fault.
1c79356b 1488 *
55e303ae
A
1489 * Note that this must be done with both interruptions off and VM off
1490 *
1491 *
1492 * Remove mapping via physical page (mapping_purge)
1493 *
1494 * 1) lock physent
1495 * 2) extract vaddr and pmap
1496 * 3) unlock physent
1497 * 4) do "remove mapping via pmap"
1498 *
1c79356b 1499 *
1c79356b
A
1500 */
1501
1502 .align 5
55e303ae
A
1503 .globl EXT(hw_purge_phys)
1504
1505LEXT(hw_purge_phys)
1506 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1507 mflr r0 ; Save the link register
1508 stw r15,FM_ARG0+0x00(r1) ; Save a register
1509 stw r16,FM_ARG0+0x04(r1) ; Save a register
1510 stw r17,FM_ARG0+0x08(r1) ; Save a register
1511 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1512 stw r19,FM_ARG0+0x10(r1) ; Save a register
1513 stw r20,FM_ARG0+0x14(r1) ; Save a register
1514 stw r21,FM_ARG0+0x18(r1) ; Save a register
1515 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1516 stw r23,FM_ARG0+0x20(r1) ; Save a register
1517 stw r24,FM_ARG0+0x24(r1) ; Save a register
1518 stw r25,FM_ARG0+0x28(r1) ; Save a register
1519 li r6,0 ; Set no next address return
1520 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1521 stw r27,FM_ARG0+0x30(r1) ; Save a register
1522 stw r28,FM_ARG0+0x34(r1) ; Save a register
1523 stw r29,FM_ARG0+0x38(r1) ; Save a register
1524 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1525 stw r31,FM_ARG0+0x40(r1) ; Save a register
1526 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1527 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1528
1529 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1530
1531 bl mapPhysLock ; Lock the physent
1532
1533 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1534
1535 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1536 li r0,0x3F ; Set the bottom stuff to clear
1537 b hppJoin ; Join the common...
1538
1539hppSF: li r0,0xFF
1540 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1541 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1542
1543hppJoin: andc. r12,r12,r0 ; Clean and test link
1544 beq-- hppNone ; There are no more mappings on physical page
1545
1546 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1547 lhz r7,mpSpace(r12) ; Get the address space hash
1548 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1549 slwi r0,r7,2 ; Multiply space by 4
1550 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1551 slwi r7,r7,3 ; Multiply space by 8
1552 lwz r5,mpVAddr+4(r12) ; and the bottom
1553 add r7,r7,r0 ; Get correct displacement into translate table
1554 lwz r28,0(r28) ; Get the actual translation map
de355530 1555
55e303ae
A
1556 add r28,r28,r7 ; Point to the pmap translation
1557
1558 bl mapPhysUnlock ; Time to unlock the physical entry
1559
1560 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1561
1562 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1563 b hrmJoin ; Go remove the mapping...
1564
1565hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1566 b hrmJoin ; Go remove the mapping...
d7e50217 1567
de355530 1568 .align 5
55e303ae
A
1569
1570hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1571
1572 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1573
1574 mtmsr r11 ; Restore enables/translation/etc.
1575 isync
1576 b hppRetnCmn ; Join the common return code...
1c79356b 1577
55e303ae
A
1578hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1579 isync
1c79356b 1580
55e303ae
A
1581;
1582; NOTE: we have not used any registers other than the volatiles to this point
1583;
1c79356b 1584
55e303ae 1585hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1c79356b 1586
55e303ae
A
1587 li r3,0 ; Clear high order mapping address because we are 32-bit
1588 mtlr r12 ; Restore the return
1589 lwz r1,0(r1) ; Pop the stack
1590 blr ; Leave...
1c79356b
A
1591
1592/*
55e303ae
A
1593 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1594 *
1595 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1596 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1597 *
1598 * We return the virtual address of the removed mapping as a
1599 * R3.
1600 *
1601 * Note that this is designed to be called from 32-bit mode with a stack.
1602 *
1603 * We disable translation and all interruptions here. This keeps is
1604 * from having to worry about a deadlock due to having anything locked
1605 * and needing it to process a fault.
1606 *
1607 * Note that this must be done with both interruptions off and VM off
1608 *
1609 * Remove a mapping which can be reestablished by VM
1610 *
1c79356b 1611 */
1c79356b 1612
55e303ae
A
1613 .align 5
1614 .globl EXT(hw_purge_map)
1615
1616LEXT(hw_purge_map)
1617 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1618 mflr r0 ; Save the link register
1619 stw r15,FM_ARG0+0x00(r1) ; Save a register
1620 stw r16,FM_ARG0+0x04(r1) ; Save a register
1621 stw r17,FM_ARG0+0x08(r1) ; Save a register
1622 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1623 stw r19,FM_ARG0+0x10(r1) ; Save a register
1624 mfsprg r19,2 ; Get feature flags
1625 stw r20,FM_ARG0+0x14(r1) ; Save a register
1626 stw r21,FM_ARG0+0x18(r1) ; Save a register
1627 mtcrf 0x02,r19 ; move pf64Bit cr6
1628 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1629 stw r23,FM_ARG0+0x20(r1) ; Save a register
1630 stw r24,FM_ARG0+0x24(r1) ; Save a register
1631 stw r25,FM_ARG0+0x28(r1) ; Save a register
1632 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1633 stw r27,FM_ARG0+0x30(r1) ; Save a register
1634 stw r28,FM_ARG0+0x34(r1) ; Save a register
1635 stw r29,FM_ARG0+0x38(r1) ; Save a register
1636 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1637 stw r31,FM_ARG0+0x40(r1) ; Save a register
1638 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1639 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1640
1641 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1642 lwz r9,pmapvr+4(r3) ; Get conversion mask
1643 b hpmSF1x ; Done...
1644
1645hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1646
1647hpmSF1x:
1648 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1649
1650 xor r28,r3,r9 ; Convert the pmap to physical addressing
1651
1652 mr r17,r11 ; Save the MSR
1653
1654 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1655 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1656 mr. r3,r3 ; Did we get the lock?
1657 bne-- hrmBadLock ; Nope...
1658;
1659; Note that we do a full search (i.e., no shortcut level skips, etc.)
1660; here so that we will know the previous elements so we can dequeue them
1661; later.
1662;
1663hpmSearch:
1664 mr r3,r28 ; Pass in pmap to search
1665 mr r29,r4 ; Top half of vaddr
1666 mr r30,r5 ; Bottom half of vaddr
1667 bl EXT(mapSearchFull) ; Rescan the list
1668 mr. r31,r3 ; Did we? (And remember mapping address for later)
1669 or r0,r4,r5 ; Are we beyond the end?
1670 mr r15,r4 ; Save top of next vaddr
1671 cmplwi cr1,r0,0 ; See if there is another
1672 mr r16,r5 ; Save bottom of next vaddr
1673 bne-- hpmGotOne ; We found one, go check it out...
1674
1675hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1676 b hrmNotFound ; No more in pmap to check...
1677
1678hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
1679 andi. r9,r20,lo16(mpSpecial|mpNest|mpPerm|mpBlock) ; Are we allowed to remove it?
ab86ba33
A
1680 rlwinm r21,r20,8,24,31 ; Extract the busy count
1681 cmplwi cr2,r21,0 ; Is it busy?
1682 crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
55e303ae
A
1683 beq++ hrmGotX ; Found, branch to remove the mapping...
1684 b hpmCNext ; Nope...
1c79356b 1685
55e303ae
A
1686/*
1687 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1688 *
1689 * Upon entry, R3 contains a pointer to a pmap.
1690 * pa is a pointer to the physent
1691 *
1692 * This function removes the first mapping for a specific pmap from a physical entry
1693 * alias list. It locks the list, extracts the vaddr and pmap from
1694 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1695 * NOTE: since we jump into rem_map, we need to set up the stack
1696 * identically. Also, we set the next parm to 0 so we do not
1697 * try to save a next vaddr.
1698 *
1699 * We return the virtual address of the removed mapping as a
1700 * R3.
1701 *
1702 * Note that this is designed to be called from 32-bit mode with a stack.
1703 *
1704 * We disable translation and all interruptions here. This keeps is
1705 * from having to worry about a deadlock due to having anything locked
1706 * and needing it to process a fault.
1707 *
1708 * Note that this must be done with both interruptions off and VM off
1709 *
1710 *
1711 * Remove mapping via physical page (mapping_purge)
1712 *
1713 * 1) lock physent
1714 * 2) extract vaddr and pmap
1715 * 3) unlock physent
1716 * 4) do "remove mapping via pmap"
1717 *
1718 *
1719 */
1c79356b 1720
55e303ae
A
1721 .align 5
1722 .globl EXT(hw_purge_space)
1723
1724LEXT(hw_purge_space)
1725 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1726 mflr r0 ; Save the link register
1727 stw r15,FM_ARG0+0x00(r1) ; Save a register
1728 stw r16,FM_ARG0+0x04(r1) ; Save a register
1729 stw r17,FM_ARG0+0x08(r1) ; Save a register
1730 mfsprg r2,2 ; Get feature flags
1731 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1732 stw r19,FM_ARG0+0x10(r1) ; Save a register
1733 stw r20,FM_ARG0+0x14(r1) ; Save a register
1734 stw r21,FM_ARG0+0x18(r1) ; Save a register
1735 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1736 mtcrf 0x02,r2 ; move pf64Bit cr6
1737 stw r23,FM_ARG0+0x20(r1) ; Save a register
1738 stw r24,FM_ARG0+0x24(r1) ; Save a register
1739 stw r25,FM_ARG0+0x28(r1) ; Save a register
1740 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1741 stw r27,FM_ARG0+0x30(r1) ; Save a register
1742 li r6,0 ; Set no next address return
1743 stw r28,FM_ARG0+0x34(r1) ; Save a register
1744 stw r29,FM_ARG0+0x38(r1) ; Save a register
1745 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1746 stw r31,FM_ARG0+0x40(r1) ; Save a register
1747 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1748 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1749
1750 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
1751
1752 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
1753
1754 b hpsSF1x ; Done...
1755
1756hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
1757
1758hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1759
1760 xor r4,r4,r9 ; Convert the pmap to physical addressing
1761
1762 bl mapPhysLock ; Lock the physent
1763
1764 lwz r8,pmapSpace(r4) ; Get the space hash
1765
1766 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
1767
1768 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1769
1770hpsSrc32: rlwinm. r12,r12,0,0,25 ; Clean and test mapping address
1771 beq hpsNone ; Did not find one...
1772
1773 lhz r10,mpSpace(r12) ; Get the space
1774
1775 cmplw r10,r8 ; Is this one of ours?
1776 beq hpsFnd ; Yes...
1777
1778 lwz r12,mpAlias+4(r12) ; Chain on to the next
1779 b hpsSrc32 ; Check it out...
1c79356b 1780
55e303ae
A
1781 .align 5
1782
1783hpsSF: li r0,0xFF
1784 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1785 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1786
1787hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
1788 beq hpsNone ; Did not find one...
1789
1790 lhz r10,mpSpace(r12) ; Get the space
1791
1792 cmplw r10,r8 ; Is this one of ours?
1793 beq hpsFnd ; Yes...
1794
1795 ld r12,mpAlias(r12) ; Chain on to the next
1796 b hpsSrc64 ; Check it out...
1797
1798 .align 5
1c79356b 1799
55e303ae
A
1800hpsFnd: mr r28,r4 ; Set the pmap physical address
1801 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1802 lwz r5,mpVAddr+4(r12) ; and the bottom
1803
1804 bl mapPhysUnlock ; Time to unlock the physical entry
1805 b hrmJoin ; Go remove the mapping...
1806
1807 .align 5
1808
1809hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 1810
55e303ae 1811 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 1812
55e303ae
A
1813 mtmsr r11 ; Restore enables/translation/etc.
1814 isync
1815 b hpsRetnCmn ; Join the common return code...
1c79356b 1816
55e303ae
A
1817hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
1818 isync
1c79356b 1819
55e303ae
A
1820;
1821; NOTE: we have not used any registers other than the volatiles to this point
1822;
d7e50217 1823
55e303ae
A
1824hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1825
1826 li r3,0 ; Set return code
1827 mtlr r12 ; Restore the return
1828 lwz r1,0(r1) ; Pop the stack
1829 blr ; Leave...
1c79356b
A
1830
1831
1832/*
55e303ae
A
1833 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
1834 *
1835 * Upon entry, R3 contains a pointer to a physent.
1836 * space is the space ID from the pmap in question
1837 *
1838 * We return the virtual address of the found mapping in
1839 * R3. Note that the mapping busy is bumped.
1840 *
1841 * Note that this is designed to be called from 32-bit mode with a stack.
1842 *
1843 * We disable translation and all interruptions here. This keeps is
1844 * from having to worry about a deadlock due to having anything locked
1845 * and needing it to process a fault.
1846 *
1c79356b
A
1847 */
1848
1849 .align 5
55e303ae
A
1850 .globl EXT(hw_find_space)
1851
1852LEXT(hw_find_space)
1853 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
1854 mflr r0 ; Save the link register
1855 mr r8,r4 ; Remember the space
1856 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1857
1858 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1c79356b 1859
55e303ae 1860 bl mapPhysLock ; Lock the physent
1c79356b 1861
55e303ae
A
1862 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
1863
1864 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
d7e50217 1865
55e303ae
A
1866hfsSrc32: rlwinm. r12,r12,0,0,25 ; Clean and test mapping address
1867 beq hfsNone ; Did not find one...
1868
1869 lhz r10,mpSpace(r12) ; Get the space
1870
1871 cmplw r10,r8 ; Is this one of ours?
1872 beq hfsFnd ; Yes...
1873
1874 lwz r12,mpAlias+4(r12) ; Chain on to the next
1875 b hfsSrc32 ; Check it out...
1c79356b 1876
55e303ae
A
1877 .align 5
1878
1879hfsSF: li r0,0xFF
1880 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1881 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1882
1883hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
1884 beq hfsNone ; Did not find one...
1885
1886 lhz r10,mpSpace(r12) ; Get the space
1887
1888 cmplw r10,r8 ; Is this one of ours?
1889 beq hfsFnd ; Yes...
1890
1891 ld r12,mpAlias(r12) ; Chain on to the next
1892 b hfsSrc64 ; Check it out...
1893
1894 .align 5
1895
1896hfsFnd: mr r8,r3 ; Save the physent
1897 mr r3,r12 ; Point to the mapping
1898 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 1899
55e303ae
A
1900 mr r3,r8 ; Get back the physical entry
1901 li r7,0xFFF ; Get a page size mask
1902 bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 1903
55e303ae
A
1904 andc r3,r12,r7 ; Move the mapping back down to a page
1905 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
1906 xor r12,r3,r12 ; Convert to virtual
1907 b hfsRet ; Time to return
1908
1909 .align 5
1910
1911hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
1912
1913hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 1914
55e303ae
A
1915 mtmsr r11 ; Restore enables/translation/etc.
1916 isync
1917 b hfsRetnCmn ; Join the common return code...
1c79356b 1918
55e303ae
A
1919hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
1920 isync
1c79356b 1921
55e303ae
A
1922;
1923; NOTE: we have not used any registers other than the volatiles to this point
1924;
1c79356b 1925
55e303ae
A
1926hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
1927 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1928
1929 mtlr r12 ; Restore the return
1930 lwz r1,0(r1) ; Pop the stack
1931 blr ; Leave...
1c79356b 1932
1c79356b 1933
55e303ae
A
1934;
1935; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
1936; Returns 0 if not found or the virtual address of the mapping if
1937; if is. Also, the mapping has the busy count bumped.
1938;
1939 .align 5
1940 .globl EXT(hw_find_map)
1c79356b 1941
55e303ae
A
1942LEXT(hw_find_map)
1943 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
1944 mflr r0 ; Save the link register
1945 stw r25,FM_ARG0+0x00(r1) ; Save a register
1946 stw r26,FM_ARG0+0x04(r1) ; Save a register
1947 mr r25,r6 ; Remember address of next va
1948 stw r27,FM_ARG0+0x08(r1) ; Save a register
1949 stw r28,FM_ARG0+0x0C(r1) ; Save a register
1950 stw r29,FM_ARG0+0x10(r1) ; Save a register
1951 stw r30,FM_ARG0+0x14(r1) ; Save a register
1952 stw r31,FM_ARG0+0x18(r1) ; Save a register
1953 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 1954
55e303ae
A
1955 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
1956 lwz r7,pmapvr+4(r3) ; Get the second part
1c79356b 1957
1c79356b 1958
55e303ae
A
1959 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1960
1961 mr r27,r11 ; Remember the old MSR
1962 mr r26,r12 ; Remember the feature bits
9bccf70c 1963
55e303ae 1964 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 1965
55e303ae 1966 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
1c79356b 1967
55e303ae 1968 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 1969
55e303ae
A
1970hfmSF1: mr r29,r4 ; Save top half of vaddr
1971 mr r30,r5 ; Save the bottom half
1972
1973 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1974 bl sxlkShared ; Go get a shared lock on the mapping lists
1975 mr. r3,r3 ; Did we get the lock?
1976 bne-- hfmBadLock ; Nope...
1c79356b 1977
55e303ae
A
1978 mr r3,r28 ; get the pmap address
1979 mr r4,r29 ; Get bits 0:31 to look for
1980 mr r5,r30 ; Get bits 32:64
1981
1982 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
1c79356b 1983
55e303ae
A
1984 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
1985 mr. r31,r3 ; Save the mapping if we found it
1986 cmplwi cr1,r0,0 ; Are we removing?
1987 mr r29,r4 ; Save next va high half
1988 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
1989 mr r30,r5 ; Save next va low half
1990 li r6,0 ; Assume we did not find it
1991 li r26,0xFFF ; Get a mask to relocate to start of mapping page
1c79356b 1992
55e303ae 1993 bt-- cr0_eq,hfmNotFnd ; We did not find it...
1c79356b 1994
55e303ae 1995 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 1996
55e303ae 1997 andc r4,r31,r26 ; Get back to the mapping page start
1c79356b 1998
55e303ae
A
1999; Note: we can treat 32- and 64-bit the same here. Because we are going from
2000; physical to virtual and we only do 32-bit virtual, we only need the low order
2001; word of the xor.
d7e50217 2002
55e303ae
A
2003 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2004 li r6,-1 ; Indicate we found it and it is not being removed
2005 xor r31,r31,r4 ; Flip to virtual
d7e50217 2006
55e303ae
A
2007hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2008 bl sxlkUnlock ; Unlock the search list
d7e50217 2009
55e303ae
A
2010 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2011 and r3,r3,r6 ; Clear if not found or removing
de355530 2012
55e303ae 2013hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
de355530 2014
55e303ae
A
2015 mtmsr r27 ; Restore enables/translation/etc.
2016 isync
2017 b hfmReturnC ; Join common...
2018
2019hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2020 isync
2021
2022hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2023 stw r30,4(r25) ; Save the bottom of the next va
2024 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2025 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2026 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2027 and r3,r3,r6 ; Clear return if the mapping is being removed
2028 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2029 mtlr r0 ; Restore the return
2030 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2031 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2032 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2033 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2034 lwz r1,0(r1) ; Pop the stack
2035 blr ; Leave...
2036
2037 .align 5
2038
2039hfmBadLock: li r3,1 ; Set lock time out error code
2040 b hfmReturn ; Leave....
1c79356b 2041
1c79356b
A
2042
2043/*
55e303ae
A
2044 * unsigned int hw_walk_phys(pp, preop, op, postop, parm)
2045 * walks all mapping for a physical page and performs
2046 * specified operations on each.
1c79356b 2047 *
55e303ae
A
2048 * pp is unlocked physent
2049 * preop is operation to perform on physent before walk. This would be
2050 * used to set cache attribute or protection
2051 * op is the operation to perform on each mapping during walk
2052 * postop is operation to perform in the phsyent after walk. this would be
2053 * used to set or reset the RC bits.
2054 *
2055 * We return the RC bits from before postop is run.
2056 *
2057 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 2058 *
55e303ae
A
2059 * We disable translation and all interruptions here. This keeps is
2060 * from having to worry about a deadlock due to having anything locked
2061 * and needing it to process a fault.
d7e50217 2062 *
55e303ae
A
2063 * We lock the physent, execute preop, and then walk each mapping in turn.
2064 * If there is a PTE, it is invalidated and the RC merged into the physent.
2065 * Then we call the op function.
2066 * Then we revalidate the PTE.
2067 * Once all all mappings are finished, we save the physent RC and call the
2068 * postop routine. Then we unlock the physent and return the RC.
2069 *
2070 *
1c79356b
A
2071 */
2072
1c79356b 2073 .align 5
55e303ae
A
2074 .globl EXT(hw_walk_phys)
2075
2076LEXT(hw_walk_phys)
2077 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2078 mflr r0 ; Save the link register
2079 stw r25,FM_ARG0+0x00(r1) ; Save a register
2080 stw r26,FM_ARG0+0x04(r1) ; Save a register
2081 stw r27,FM_ARG0+0x08(r1) ; Save a register
2082 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2083 mr r25,r7 ; Save the parm
2084 stw r29,FM_ARG0+0x10(r1) ; Save a register
2085 stw r30,FM_ARG0+0x14(r1) ; Save a register
2086 stw r31,FM_ARG0+0x18(r1) ; Save a register
2087 stw r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2088
2089 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2090
2091 mr r26,r11 ; Save the old MSR
2092 lis r27,hi16(hwpOpBase) ; Get high order of op base
2093 slwi r4,r4,7 ; Convert preop to displacement
2094 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2095 slwi r5,r5,7 ; Convert op to displacement
2096 add r12,r4,r27 ; Point to the preop routine
2097 slwi r28,r6,7 ; Convert postop to displacement
2098 mtctr r12 ; Set preop routine
2099 add r28,r28,r27 ; Get the address of the postop routine
2100 add r27,r5,r27 ; Get the address of the op routine
1c79356b 2101
55e303ae 2102 bl mapPhysLock ; Lock the physent
1c79356b 2103
55e303ae
A
2104 mr r29,r3 ; Save the physent address
2105
2106 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2107
2108 bctrl ; Call preop routine
2109 bne- hwpEarly32 ; preop says to bail now...
1c79356b 2110
55e303ae
A
2111 mtctr r27 ; Set up the op function address
2112 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
2113
2114hwpSrc32: rlwinm. r31,r31,0,0,25 ; Clean and test mapping address
2115 beq hwpNone32 ; Did not find one...
d7e50217 2116
55e303ae
A
2117;
2118; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2119; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2120; If there is no PTE, PTE low is obtained from mapping
2121;
2122 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2123
2124 bctrl ; Call the op function
2125
2126 crmove cr1_eq,cr0_eq ; Save the return code
2127
2128 mr. r3,r3 ; Was there a previously valid PTE?
2129 beq- hwpNxt32 ; Nope...
1c79356b 2130
55e303ae
A
2131 stw r5,4(r3) ; Store second half of PTE
2132 eieio ; Make sure we do not reorder
2133 stw r4,0(r3) ; Revalidate the PTE
2134
2135 eieio ; Make sure all updates come first
2136 stw r6,0(r7) ; Unlock the PCA
d7e50217 2137
55e303ae
A
2138hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2139 lwz r31,mpAlias+4(r31) ; Chain on to the next
2140 b hwpSrc32 ; Check it out...
1c79356b 2141
55e303ae 2142 .align 5
1c79356b 2143
55e303ae 2144hwpNone32: mtctr r28 ; Get the post routine address
1c79356b 2145
55e303ae
A
2146 lwz r30,ppLink+4(r29) ; Save the old RC
2147 mr r3,r29 ; Get the physent address
2148 bctrl ; Call post routine
1c79356b 2149
55e303ae
A
2150 bl mapPhysUnlock ; Unlock the physent
2151
2152 mtmsr r26 ; Restore translation/mode/etc.
2153 isync
1c79356b 2154
55e303ae 2155 b hwpReturn ; Go restore registers and return...
1c79356b 2156
55e303ae 2157 .align 5
1c79356b 2158
55e303ae
A
2159hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2160 mr r3,r29 ; Get the physent address
2161 bl mapPhysUnlock ; Unlock the physent
2162
2163 mtmsr r26 ; Restore translation/mode/etc.
2164 isync
2165
2166 b hwpReturn ; Go restore registers and return...
1c79356b 2167
55e303ae 2168 .align 5
1c79356b 2169
55e303ae
A
2170hwp64: bctrl ; Call preop routine
2171 bne-- hwpEarly64 ; preop says to bail now...
d7e50217 2172
55e303ae
A
2173 mtctr r27 ; Set up the op function address
2174
2175 li r0,0xFF
2176 ld r31,ppLink(r3) ; Get the pointer to the first mapping
2177 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
2178
2179hwpSrc64: andc. r31,r31,r0 ; Clean and test mapping address
2180 beq hwpNone64 ; Did not find one...
2181;
2182; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2183; PTE low in R5. PTEG comes back locked if there is one
2184;
2185 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
1c79356b 2186
55e303ae 2187 bctrl ; Call the op function
1c79356b 2188
55e303ae 2189 crmove cr1_eq,cr0_eq ; Save the return code
1c79356b 2190
55e303ae
A
2191 mr. r3,r3 ; Was there a previously valid PTE?
2192 beq-- hwpNxt64 ; Nope...
2193
2194 std r5,8(r3) ; Save bottom of PTE
2195 eieio ; Make sure we do not reorder
2196 std r4,0(r3) ; Revalidate the PTE
d7e50217 2197
55e303ae
A
2198 eieio ; Make sure all updates come first
2199 stw r6,0(r7) ; Unlock the PCA
2200
2201hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2202 ld r31,mpAlias(r31) ; Chain on to the next
2203 li r0,0xFF
2204 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
2205 b hwpSrc64 ; Check it out...
1c79356b 2206
55e303ae
A
2207 .align 5
2208
2209hwpNone64: mtctr r28 ; Get the post routine address
2210
2211 lwz r30,ppLink+4(r29) ; Save the old RC
2212 mr r3,r29 ; Get the physent address
2213 bctrl ; Call post routine
2214
2215 bl mapPhysUnlock ; Unlock the physent
2216
2217 mtmsrd r26 ; Restore translation/mode/etc.
1c79356b 2218 isync
55e303ae
A
2219 b hwpReturn ; Go restore registers and return...
2220
2221 .align 5
1c79356b 2222
55e303ae
A
2223hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2224 mr r3,r29 ; Get the physent address
2225 bl mapPhysUnlock ; Unlock the physent
2226
2227 mtmsrd r26 ; Restore translation/mode/etc.
2228 isync
2229
2230hwpReturn: lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2231 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2232 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2233 mr r3,r30 ; Pass back the RC
2234 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2235 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2236 mtlr r0 ; Restore the return
2237 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2238 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2239 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2240 lwz r1,0(r1) ; Pop the stack
2241 blr ; Leave...
d7e50217 2242
d7e50217 2243
55e303ae
A
2244;
2245; The preop/op/postop function table.
2246; Each function must be 64-byte aligned and be no more than
2247; 16 instructions. If more than 16, we must fix address calculations
2248; at the start of hwpOpBase
2249;
2250; The routine must set CR0_EQ in order to continue scan.
2251; If CR0_EQ is not set, an early return from the function is made.
2252;
d7e50217 2253
55e303ae
A
2254 .align 7
2255
2256hwpOpBase:
2257
2258; Function 0 - No operation
2259
2260hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2261 blr ; Just return...
1c79356b
A
2262
2263 .align 5
1c79356b 2264
55e303ae 2265; This is the continuation of function 4 - Set attributes in mapping
1c79356b 2266
55e303ae
A
2267; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2268; NOTE: Do we have to deal with i-cache here?
2269
2270hwpSAM: li r11,4096 ; Get page size
d7e50217 2271
55e303ae
A
2272hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2273 dcbf r11,r5 ; Flush the line in the data cache
2274 bgt++ hwpSAMinvd ; Go do the rest of it...
2275
2276 sync ; Make sure it is done
1c79356b 2277
55e303ae
A
2278 li r11,4096 ; Get page size
2279
2280hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2281 icbi r11,r5 ; Flush the line in the icache
2282 bgt++ hwpSAMinvi ; Go do the rest of it...
2283
2284 sync ; Make sure it is done
1c79356b 2285
55e303ae
A
2286 cmpw r0,r0 ; Make sure we return CR0_EQ
2287 blr ; Return...
1c79356b 2288
1c79356b 2289
55e303ae 2290; Function 1 - Set protection in physent
1c79356b 2291
55e303ae
A
2292 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2293
2294hwpSPrtPhy: li r5,ppLink+4 ; Get offset for flag part of physent
d7e50217 2295
55e303ae
A
2296hwpSPrtPhX: lwarx r4,r5,r29 ; Get the old flags
2297 rlwimi r4,r25,0,ppPPb-32,ppPPe-32 ; Stick in the new protection
2298 stwcx. r4,r5,r29 ; Try to stuff it
2299 bne-- hwpSPrtPhX ; Try again...
2300; Note: CR0_EQ is set because of stwcx.
2301 blr ; Return...
1c79356b 2302
1c79356b 2303
55e303ae 2304; Function 2 - Set protection in mapping
1c79356b 2305
55e303ae 2306 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
1c79356b 2307
55e303ae
A
2308hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2309 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2310 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2311 li r0,lo16(mpPP) ; Get protection bits
2312 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2313 rlwinm r2,r25,0,mpPPb-32,mpPPb-32+2 ; Position new protection
2314 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2315 andc r5,r5,r0 ; Clear the old prot bits
2316 or r5,r5,r2 ; Move in the prot bits
2317 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2318 cmpw r0,r0 ; Make sure we return CR0_EQ
2319 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2320 blr ; Leave...
2321
2322; Function 3 - Set attributes in physent
1c79356b 2323
55e303ae 2324 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
1c79356b 2325
55e303ae 2326hwpSAtrPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2327
55e303ae
A
2328hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
2329 rlwimi r4,r25,0,ppIb-32,ppGb-32 ; Stick in the new attributes
2330 stwcx. r4,r5,r29 ; Try to stuff it
2331 bne-- hwpSAtrPhX ; Try again...
2332; Note: CR0_EQ is set because of stwcx.
2333 blr ; Return...
de355530 2334
55e303ae 2335; Function 4 - Set attributes in mapping
d7e50217 2336
55e303ae
A
2337 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2338
2339hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2340 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2341 li r2,0x10 ; Force on coherent
2342 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2343 li r0,lo16(mpWIMG) ; Get wimg mask
2344 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2345 rlwimi r2,r2,mpIb-ppIb,mpIb-32,mpIb-32 ; Copy in the cache inhibited bit
2346 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2347 andc r5,r5,r0 ; Clear the old wimg
2348 rlwimi r2,r2,32-(mpGb-ppGb),mpGb-32,mpGb-32 ; Copy in the guarded bit
2349 mfsprg r9,2 ; Feature flags
2350 or r5,r5,r2 ; Move in the new wimg
2351 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2352 lwz r2,mpPAddr(r31) ; Get the physical address
2353 li r0,0xFFF ; Start a mask
2354 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2355 rlwinm r5,r0,0,1,0 ; Copy to top half
2356 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2357 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2358 and r5,r5,r2 ; Clean stuff in top 32 bits
2359 andc r2,r2,r0 ; Clean bottom too
2360 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2361 b hwpSAM ; Join common
1c79356b 2362
55e303ae
A
2363; NOTE: we moved the remainder of the code out of here because it
2364; did not fit in the 128 bytes allotted. It got stuck into the free space
2365; at the end of the no-op function.
2366
2367
2368
de355530 2369
55e303ae 2370; Function 5 - Clear reference in physent
1c79356b 2371
55e303ae 2372 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
1c79356b 2373
55e303ae 2374hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2375
55e303ae
A
2376hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
2377 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
2378 stwcx. r4,r5,r29 ; Try to stuff it
2379 bne-- hwpCRefPhX ; Try again...
2380; Note: CR0_EQ is set because of stwcx.
2381 blr ; Return...
1c79356b
A
2382
2383
55e303ae 2384; Function 6 - Clear reference in mapping
1c79356b 2385
55e303ae 2386 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
1c79356b 2387
55e303ae
A
2388hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2389 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2390 andc r5,r5,r0 ; Clear in PTE copy
2391 andc r8,r8,r0 ; and in the mapping
2392 cmpw r0,r0 ; Make sure we return CR0_EQ
2393 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2394 blr ; Return...
1c79356b 2395
de355530 2396
55e303ae 2397; Function 7 - Clear change in physent
1c79356b 2398
55e303ae 2399 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
1c79356b 2400
55e303ae 2401hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2402
55e303ae
A
2403hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2404 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2405 stwcx. r4,r5,r29 ; Try to stuff it
2406 bne-- hwpCCngPhX ; Try again...
2407; Note: CR0_EQ is set because of stwcx.
2408 blr ; Return...
1c79356b 2409
de355530 2410
55e303ae 2411; Function 8 - Clear change in mapping
1c79356b 2412
55e303ae
A
2413 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2414
2415hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2416 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2417 andc r5,r5,r0 ; Clear in PTE copy
2418 andc r8,r8,r0 ; and in the mapping
2419 cmpw r0,r0 ; Make sure we return CR0_EQ
2420 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2421 blr ; Return...
d7e50217 2422
de355530 2423
55e303ae 2424; Function 9 - Set reference in physent
d7e50217 2425
55e303ae 2426 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
d7e50217 2427
55e303ae
A
2428hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2429
2430hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
2431 ori r4,r4,lo16(ppR) ; Set the reference
2432 stwcx. r4,r5,r29 ; Try to stuff it
2433 bne-- hwpSRefPhX ; Try again...
2434; Note: CR0_EQ is set because of stwcx.
2435 blr ; Return...
d7e50217 2436
1c79356b 2437
55e303ae 2438; Function 10 - Set reference in mapping
d7e50217 2439
55e303ae
A
2440 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
2441
2442hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2443 ori r5,r5,lo16(mpR) ; Set reference in PTE low
2444 ori r8,r8,lo16(mpR) ; Set reference in mapping
2445 cmpw r0,r0 ; Make sure we return CR0_EQ
2446 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2447 blr ; Return...
2448
2449; Function 11 - Set change in physent
1c79356b 2450
55e303ae 2451 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
1c79356b 2452
55e303ae 2453hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2454
55e303ae
A
2455hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
2456 ori r4,r4,lo16(ppC) ; Set the change bit
2457 stwcx. r4,r5,r29 ; Try to stuff it
2458 bne-- hwpSCngPhX ; Try again...
2459; Note: CR0_EQ is set because of stwcx.
2460 blr ; Return...
de355530 2461
55e303ae 2462; Function 12 - Set change in mapping
1c79356b 2463
55e303ae 2464 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
1c79356b 2465
55e303ae
A
2466hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2467 ori r5,r5,lo16(mpC) ; Set change in PTE low
2468 ori r8,r8,lo16(mpC) ; Set chage in mapping
2469 cmpw r0,r0 ; Make sure we return CR0_EQ
2470 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2471 blr ; Return...
1c79356b 2472
55e303ae 2473; Function 13 - Test reference in physent
1c79356b 2474
55e303ae
A
2475 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
2476
2477hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
2478 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
2479 blr ; Return (CR0_EQ set to continue if reference is off)...
1c79356b 2480
1c79356b 2481
55e303ae 2482; Function 14 - Test reference in mapping
1c79356b 2483
55e303ae 2484 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
de355530 2485
55e303ae
A
2486hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
2487 blr ; Return (CR0_EQ set to continue if reference is off)...
2488
2489; Function 15 - Test change in physent
1c79356b 2490
55e303ae 2491 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
1c79356b 2492
55e303ae
A
2493hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
2494 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
2495 blr ; Return (CR0_EQ set to continue if reference is off)...
2496
2497
2498; Function 16 - Test change in mapping
2499
2500 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
d7e50217 2501
55e303ae
A
2502hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
2503 blr ; Return (CR0_EQ set to continue if reference is off)...
2504
2505 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
2506
d7e50217 2507
d7e50217 2508
de355530 2509;
55e303ae
A
2510; int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
2511;
2512; Returns:
2513; mapRtOK - if all is ok
2514; mapRtBadLk - if mapping lock fails
2515; mapRtPerm - if mapping is permanent
2516; mapRtNotFnd - if mapping is not found
2517; mapRtBlock - if mapping is a block
de355530 2518;
55e303ae
A
2519 .align 5
2520 .globl EXT(hw_protect)
d7e50217 2521
55e303ae
A
2522LEXT(hw_protect)
2523 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2524 mflr r0 ; Save the link register
2525 stw r24,FM_ARG0+0x00(r1) ; Save a register
2526 stw r25,FM_ARG0+0x04(r1) ; Save a register
2527 mr r25,r7 ; Remember address of next va
2528 stw r26,FM_ARG0+0x08(r1) ; Save a register
2529 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2530 stw r28,FM_ARG0+0x10(r1) ; Save a register
2531 mr r24,r6 ; Save the new protection flags
2532 stw r29,FM_ARG0+0x14(r1) ; Save a register
2533 stw r30,FM_ARG0+0x18(r1) ; Save a register
2534 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2535 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 2536
55e303ae
A
2537 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2538 lwz r7,pmapvr+4(r3) ; Get the second part
d7e50217 2539
d7e50217 2540
55e303ae 2541 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 2542
55e303ae
A
2543 mr r27,r11 ; Remember the old MSR
2544 mr r26,r12 ; Remember the feature bits
9bccf70c 2545
55e303ae 2546 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 2547
55e303ae
A
2548 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
2549
2550 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
9bccf70c 2551
55e303ae
A
2552hpSF1: mr r29,r4 ; Save top half of vaddr
2553 mr r30,r5 ; Save the bottom half
2554
2555 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2556 bl sxlkShared ; Go get a shared lock on the mapping lists
2557 mr. r3,r3 ; Did we get the lock?
2558 bne-- hpBadLock ; Nope...
d7e50217 2559
55e303ae
A
2560 mr r3,r28 ; get the pmap address
2561 mr r4,r29 ; Get bits 0:31 to look for
2562 mr r5,r30 ; Get bits 32:64
de355530 2563
55e303ae 2564 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
d7e50217 2565
55e303ae
A
2566 andi. r7,r7,lo16(mpSpecial|mpNest|mpPerm|mpBlock|mpRIP) ; Are we allowed to change it or is it being removed?
2567 mr. r31,r3 ; Save the mapping if we found it
2568 cmplwi cr1,r7,0 ; Anything special going on?
2569 mr r29,r4 ; Save next va high half
2570 mr r30,r5 ; Save next va low half
d7e50217 2571
55e303ae 2572 beq-- hpNotFound ; Not found...
de355530 2573
55e303ae 2574 bne-- cr1,hpNotAllowed ; Something special is happening...
d7e50217 2575
55e303ae
A
2576 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
2577
2578 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
2579
2580 rlwimi r5,r24,0,mpPPb-32,mpPPb-32+2 ; Stick in the new pp
2581 mr. r3,r3 ; Was there a previously valid PTE?
2582
2583 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
2584
2585 beq-- hpNoOld32 ; Nope...
1c79356b 2586
55e303ae
A
2587 stw r5,4(r3) ; Store second half of PTE
2588 eieio ; Make sure we do not reorder
2589 stw r4,0(r3) ; Revalidate the PTE
2590
2591 eieio ; Make sure all updates come first
2592 stw r6,0(r7) ; Unlock PCA
2593
2594hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2595 bl sxlkUnlock ; Unlock the search list
de355530 2596
55e303ae
A
2597 li r3,mapRtOK ; Set normal return
2598 b hpR32 ; Join common...
2599
2600 .align 5
1c79356b 2601
d7e50217 2602
55e303ae
A
2603hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2604
2605 rldimi r5,r24,0,mpPPb ; Stick in the new pp
2606 mr. r3,r3 ; Was there a previously valid PTE?
2607
2608 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
2609
2610 beq-- hpNoOld64 ; Nope...
d7e50217 2611
55e303ae
A
2612 std r5,8(r3) ; Store second half of PTE
2613 eieio ; Make sure we do not reorder
2614 std r4,0(r3) ; Revalidate the PTE
de355530 2615
55e303ae
A
2616 eieio ; Make sure all updates come first
2617 stw r6,0(r7) ; Unlock PCA
de355530 2618
55e303ae
A
2619hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2620 bl sxlkUnlock ; Unlock the search list
de355530 2621
55e303ae
A
2622 li r3,mapRtOK ; Set normal return
2623 b hpR64 ; Join common...
de355530 2624
55e303ae
A
2625 .align 5
2626
2627hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
2628
2629hpR32: mtmsr r27 ; Restore enables/translation/etc.
2630 isync
2631 b hpReturnC ; Join common...
2632
2633hpR64: mtmsrd r27 ; Restore enables/translation/etc.
2634 isync
2635
2636hpReturnC: stw r29,0(r25) ; Save the top of the next va
2637 stw r30,4(r25) ; Save the bottom of the next va
2638 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2639 lwz r24,FM_ARG0+0x00(r1) ; Save a register
2640 lwz r25,FM_ARG0+0x04(r1) ; Save a register
2641 lwz r26,FM_ARG0+0x08(r1) ; Save a register
2642 mtlr r0 ; Restore the return
2643 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
2644 lwz r28,FM_ARG0+0x10(r1) ; Save a register
2645 lwz r29,FM_ARG0+0x14(r1) ; Save a register
2646 lwz r30,FM_ARG0+0x18(r1) ; Save a register
2647 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
2648 lwz r1,0(r1) ; Pop the stack
2649 blr ; Leave...
2650
2651 .align 5
2652
2653hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
2654 b hpReturn ; Leave....
d7e50217 2655
55e303ae
A
2656hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2657 bl sxlkUnlock ; Unlock the search list
d7e50217 2658
55e303ae
A
2659 li r3,mapRtNotFnd ; Set that we did not find the requested page
2660 b hpReturn ; Leave....
2661
2662hpNotAllowed:
2663 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
2664 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2665 bne-- hpNotFound ; Yeah...
2666 bl sxlkUnlock ; Unlock the search list
2667
2668 li r3,mapRtBlock ; Assume it was a block
2669 andi. r7,r7,lo16(mpBlock) ; Is this a block?
2670 bne++ hpReturn ; Yes, leave...
2671
2672 li r3,mapRtPerm ; Set that we hit a permanent page
2673 b hpReturn ; Leave....
9bccf70c 2674
9bccf70c 2675
55e303ae
A
2676;
2677; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
2678;
2679; Returns following code ORed with RC from mapping
2680; mapRtOK - if all is ok
2681; mapRtBadLk - if mapping lock fails
2682; mapRtNotFnd - if mapping is not found
2683;
2684 .align 5
2685 .globl EXT(hw_test_rc)
9bccf70c 2686
55e303ae
A
2687LEXT(hw_test_rc)
2688 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2689 mflr r0 ; Save the link register
2690 stw r24,FM_ARG0+0x00(r1) ; Save a register
2691 stw r25,FM_ARG0+0x04(r1) ; Save a register
2692 stw r26,FM_ARG0+0x08(r1) ; Save a register
2693 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2694 stw r28,FM_ARG0+0x10(r1) ; Save a register
2695 mr r24,r6 ; Save the reset request
2696 stw r29,FM_ARG0+0x14(r1) ; Save a register
2697 stw r30,FM_ARG0+0x18(r1) ; Save a register
2698 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2699 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
9bccf70c 2700
55e303ae
A
2701 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2702 lwz r7,pmapvr+4(r3) ; Get the second part
0b4e3aa0 2703
9bccf70c 2704
55e303ae 2705 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 2706
55e303ae
A
2707 mr r27,r11 ; Remember the old MSR
2708 mr r26,r12 ; Remember the feature bits
9bccf70c 2709
55e303ae 2710 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 2711
55e303ae 2712 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
1c79356b 2713
55e303ae 2714 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 2715
55e303ae
A
2716htrSF1: mr r29,r4 ; Save top half of vaddr
2717 mr r30,r5 ; Save the bottom half
1c79356b 2718
55e303ae
A
2719 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2720 bl sxlkShared ; Go get a shared lock on the mapping lists
2721 mr. r3,r3 ; Did we get the lock?
2722 li r25,0 ; Clear RC
2723 bne-- htrBadLock ; Nope...
2724
2725 mr r3,r28 ; get the pmap address
2726 mr r4,r29 ; Get bits 0:31 to look for
2727 mr r5,r30 ; Get bits 32:64
d7e50217 2728
55e303ae 2729 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
9bccf70c 2730
55e303ae
A
2731 andi. r0,r7,lo16(mpSpecial|mpNest|mpPerm|mpBlock|mpRIP) ; Are we allowed to change it or is it being removed?
2732 mr. r31,r3 ; Save the mapping if we found it
2733 cmplwi cr1,r0,0 ; Are we removing it?
2734 crorc cr0_eq,cr0_eq,cr1_eq ; Did we not find it or is it being removed?
d7e50217 2735
55e303ae 2736 bt-- cr0_eq,htrNotFound ; Not found, something special, or being removed...
1c79356b 2737
55e303ae
A
2738 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
2739
2740 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
2741
2742 cmplwi cr1,r24,0 ; Do we want to clear RC?
2743 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
2744 mr. r3,r3 ; Was there a previously valid PTE?
2745 li r0,lo16(mpR|mpC) ; Get bits to clear
9bccf70c 2746
55e303ae
A
2747 and r25,r5,r0 ; Save the RC bits
2748 beq++ cr1,htrNoClr32 ; Nope...
2749
2750 andc r12,r12,r0 ; Clear mapping copy of RC
2751 andc r5,r5,r0 ; Clear PTE copy of RC
2752 sth r12,mpVAddr+6(r31) ; Set the new RC
9bccf70c 2753
55e303ae 2754htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
d7e50217 2755
55e303ae
A
2756 sth r5,6(r3) ; Store updated RC
2757 eieio ; Make sure we do not reorder
2758 stw r4,0(r3) ; Revalidate the PTE
9bccf70c 2759
55e303ae
A
2760 eieio ; Make sure all updates come first
2761 stw r6,0(r7) ; Unlock PCA
1c79356b 2762
55e303ae
A
2763htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2764 bl sxlkUnlock ; Unlock the search list
2765 li r3,mapRtOK ; Set normal return
2766 b htrR32 ; Join common...
1c79356b 2767
55e303ae
A
2768 .align 5
2769
2770
2771htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2772
2773 cmplwi cr1,r24,0 ; Do we want to clear RC?
2774 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
2775 mr. r3,r3 ; Was there a previously valid PTE?
2776 li r0,lo16(mpR|mpC) ; Get bits to clear
1c79356b 2777
55e303ae
A
2778 and r25,r5,r0 ; Save the RC bits
2779 beq++ cr1,htrNoClr64 ; Nope...
2780
2781 andc r12,r12,r0 ; Clear mapping copy of RC
2782 andc r5,r5,r0 ; Clear PTE copy of RC
2783 sth r12,mpVAddr+6(r31) ; Set the new RC
1c79356b 2784
55e303ae
A
2785htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
2786
2787 sth r5,14(r3) ; Store updated RC
2788 eieio ; Make sure we do not reorder
2789 std r4,0(r3) ; Revalidate the PTE
1c79356b 2790
55e303ae
A
2791 eieio ; Make sure all updates come first
2792 stw r6,0(r7) ; Unlock PCA
1c79356b 2793
55e303ae
A
2794htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2795 bl sxlkUnlock ; Unlock the search list
2796 li r3,mapRtOK ; Set normal return
2797 b htrR64 ; Join common...
de355530 2798
55e303ae
A
2799 .align 5
2800
2801htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
de355530 2802
55e303ae
A
2803htrR32: mtmsr r27 ; Restore enables/translation/etc.
2804 isync
2805 b htrReturnC ; Join common...
de355530 2806
55e303ae
A
2807htrR64: mtmsrd r27 ; Restore enables/translation/etc.
2808 isync
1c79356b 2809
55e303ae
A
2810htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2811 or r3,r3,r25 ; Send the RC bits back
2812 lwz r24,FM_ARG0+0x00(r1) ; Save a register
2813 lwz r25,FM_ARG0+0x04(r1) ; Save a register
2814 lwz r26,FM_ARG0+0x08(r1) ; Save a register
2815 mtlr r0 ; Restore the return
2816 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
2817 lwz r28,FM_ARG0+0x10(r1) ; Save a register
2818 lwz r29,FM_ARG0+0x14(r1) ; Save a register
2819 lwz r30,FM_ARG0+0x18(r1) ; Save a register
2820 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
2821 lwz r1,0(r1) ; Pop the stack
1c79356b
A
2822 blr ; Leave...
2823
2824 .align 5
2825
55e303ae
A
2826htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
2827 b htrReturn ; Leave....
1c79356b 2828
55e303ae
A
2829htrNotFound:
2830 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2831 bl sxlkUnlock ; Unlock the search list
1c79356b 2832
55e303ae
A
2833 li r3,mapRtNotFnd ; Set that we did not find the requested page
2834 b htrReturn ; Leave....
2835
2836
2837
2838;
2839; mapPhysFindLock - find physent list and lock it
2840; R31 points to mapping
2841;
2842 .align 5
2843
2844mapPhysFindLock:
2845 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
2846 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
2847 rlwinm r4,r4,2,0,29 ; Change index into byte offset
2848 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
2849 add r3,r3,r4 ; Point to table entry
2850 lwz r5,mpPAddr(r31) ; Get physical page number
2851 lwz r7,mrStart(r3) ; Get the start of range
2852 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
2853 sub r6,r5,r7 ; Get index to physent
2854 rlwinm r6,r6,3,0,28 ; Get offset to physent
2855 add r3,r3,r6 ; Point right to the physent
2856 b mapPhysLock ; Join in the lock...
2857
2858;
2859; mapPhysLock - lock a physent list
2860; R3 contains list header
2861;
2862 .align 5
2863
2864mapPhysLockS:
2865 li r2,lgKillResv ; Get a spot to kill reservation
2866 stwcx. r2,0,r2 ; Kill it...
2867
2868mapPhysLockT:
2869 lwz r2,ppLink(r3) ; Get physent chain header
2870 rlwinm. r2,r2,0,0,0 ; Is lock clear?
2871 bne-- mapPhysLockT ; Nope, still locked...
2872
2873mapPhysLock:
2874 lwarx r2,0,r3 ; Get the lock
2875 rlwinm. r0,r2,0,0,0 ; Is it locked?
2876 oris r0,r2,0x8000 ; Set the lock bit
2877 bne-- mapPhysLockS ; It is locked, spin on it...
2878 stwcx. r0,0,r3 ; Try to stuff it back...
2879 bne-- mapPhysLock ; Collision, try again...
2880 isync ; Clear any speculations
2881 blr ; Leave...
2882
2883
2884;
2885; mapPhysUnlock - unlock a physent list
2886; R3 contains list header
2887;
2888 .align 5
2889
2890mapPhysUnlock:
2891 lwz r0,ppLink(r3) ; Get physent chain header
2892 rlwinm r0,r0,0,1,31 ; Clear the lock bit
2893 eieio ; Make sure unlock comes last
2894 stw r0,ppLink(r3) ; Unlock the list
2895 blr
2896
2897;
2898; mapPhysMerge - merge the RC bits into the master copy
2899; R3 points to the physent
2900; R4 contains the RC bits
2901;
2902; Note: we just return if RC is 0
2903;
2904 .align 5
2905
2906mapPhysMerge:
2907 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
2908 la r5,ppLink+4(r3) ; Point to the RC field
2909 beqlr-- ; Leave if RC is 0...
2910
2911mapPhysMergeT:
2912 lwarx r6,0,r5 ; Get the RC part
2913 or r6,r6,r4 ; Merge in the RC
2914 stwcx. r6,0,r5 ; Try to stuff it back...
2915 bne-- mapPhysMergeT ; Collision, try again...
2916 blr ; Leave...
2917
2918;
2919; Sets the physent link pointer and preserves all flags
2920; The list is locked
2921; R3 points to physent
2922; R4 has link to set
2923;
2924
2925 .align 5
2926
2927mapPhyCSet32:
2928 la r5,ppLink+4(r3) ; Point to the link word
2929
2930mapPhyCSetR:
2931 lwarx r2,0,r5 ; Get the link and flags
2932 rlwimi r4,r2,0,26,31 ; Insert the flags
2933 stwcx. r4,0,r5 ; Stick them back
2934 bne-- mapPhyCSetR ; Someone else did something, try again...
2935 blr ; Return...
2936
2937 .align 5
2938
2939mapPhyCSet64:
2940 li r0,0xFF ; Get mask to clean up mapping pointer
2941 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
2942
2943mapPhyCSet64x:
2944 ldarx r2,0,r3 ; Get the link and flags
2945 and r5,r2,r0 ; Isolate the flags
2946 or r6,r4,r5 ; Add them to the link
2947 stdcx. r6,0,r3 ; Stick them back
2948 bne-- mapPhyCSet64x ; Someone else did something, try again...
2949 blr ; Return...
2950
2951;
2952; mapBumpBusy - increment the busy count on a mapping
2953; R3 points to mapping
2954;
2955
2956 .align 5
2957
2958mapBumpBusy:
2959 lwarx r4,0,r3 ; Get mpBusy
2960 addis r4,r4,0x0100 ; Bump the busy count
2961 stwcx. r4,0,r3 ; Save it back
2962 bne-- mapBumpBusy ; This did not work, try again...
2963 blr ; Leave...
2964
2965;
2966; mapDropBusy - increment the busy count on a mapping
2967; R3 points to mapping
2968;
2969
2970 .globl EXT(mapping_drop_busy)
2971 .align 5
2972
2973LEXT(mapping_drop_busy)
2974mapDropBusy:
2975 lwarx r4,0,r3 ; Get mpBusy
2976 addis r4,r4,0xFF00 ; Drop the busy count
2977 stwcx. r4,0,r3 ; Save it back
2978 bne-- mapDropBusy ; This did not work, try again...
2979 blr ; Leave...
2980
2981;
2982; mapDrainBusy - drain the busy count on a mapping
2983; R3 points to mapping
2984; Note: we already have a busy for ourselves. Only one
2985; busy per processor is allowed, so we just spin here
2986; waiting for the count to drop to 1.
2987; Also, the mapping can not be on any lists when we do this
2988; so all we are doing is waiting until it can be released.
2989;
2990
2991 .align 5
2992
2993mapDrainBusy:
2994 lwz r4,mpFlags(r3) ; Get mpBusy
2995 rlwinm r4,r4,8,24,31 ; Clean it up
2996 cmplwi r4,1 ; Is is just our busy?
2997 beqlr++ ; Yeah, it is clear...
2998 b mapDrainBusy ; Try again...
2999
3000
3001
3002;
3003; handleDSeg - handle a data segment fault
3004; handleISeg - handle an instruction segment fault
3005;
3006; All that we do here is to map these to DSI or ISI and insure
3007; that the hash bit is not set. This forces the fault code
3008; to also handle the missing segment.
3009;
3010; At entry R2 contains per_proc, R13 contains savarea pointer,
3011; and R11 is the exception code.
3012;
3013
3014 .align 5
3015 .globl EXT(handleDSeg)
3016
3017LEXT(handleDSeg)
3018
3019 li r11,T_DATA_ACCESS ; Change fault to DSI
3020 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3021 b EXT(handlePF) ; Join common...
3022
3023 .align 5
3024 .globl EXT(handleISeg)
3025
3026LEXT(handleISeg)
3027
3028 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3029 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3030 b EXT(handlePF) ; Join common...
3031
3032
3033/*
3034 * handlePF - handle a page fault interruption
3035 *
3036 * At entry R2 contains per_proc, R13 contains savarea pointer,
3037 * and R11 is the exception code.
3038 *
3039 * This first part does a quick check to see if we can handle the fault.
3040 * We canot handle any kind of protection exceptions here, so we pass
3041 * them up to the next level.
3042 *
3043 * NOTE: In order for a page-fault redrive to work, the translation miss
3044 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3045 * before we come here.
3046 */
3047
3048 .align 5
3049 .globl EXT(handlePF)
3050
3051LEXT(handlePF)
3052
3053 mfsprg r12,2 ; Get feature flags
3054 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3055 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3056 mtcrf 0x02,r12 ; move pf64Bit to cr6
3057 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3058 lwz r18,SAVflags(r13) ; Get the flags
3059
3060 beq-- gotIfetch ; We have an IFETCH here...
3061
3062 lwz r27,savedsisr(r13) ; Get the DSISR
3063 lwz r29,savedar(r13) ; Get the first half of the DAR
3064 lwz r30,savedar+4(r13) ; And second half
3065
3066 b ckIfProt ; Go check if this is a protection fault...
3067
3068gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3069 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3070 lwz r30,savesrr0+4(r13) ; And second half
3071 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3072
3073ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3074 li r20,64 ; Set a limit of 64 nests for sanity check
3075 bne-- hpfExit ; Yes... (probably not though)
3076
3077;
3078; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3079; should be loading the user pmap here.
3080;
3081
3082 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3083 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3084 mr r19,r2 ; Remember the per_proc
3085 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3086 mr r23,r30 ; Save the low part of faulting address
3087 beq-- hpfInKern ; Skip if we are in the kernel
3088 la r8,ppUserPmap(r19) ; Point to the current user pmap
3089
3090hpfInKern: mr r22,r29 ; Save the high part of faulting address
3091
3092 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3093
3094;
3095; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3096; predefined value that corresponds to no address space. When we see that value
3097; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3098; cause the proper SR to be loaded.
3099;
3100
3101 lwz r28,4(r8) ; Pick up the pmap
3102 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3103 mr r25,r28 ; Save the original pmap (in case we nest)
3104 bne hpfNest ; Segs are not ours if so...
3105 mfsrin r4,r30 ; Get the SR that was used for translation
3106 cmplwi r4,invalSpace ; Is this a simulated segment fault?
3107 bne++ hpfNest ; No...
3108
3109 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
3110 b hpfNest ; Join on up...
3111
3112 .align 5
3113
3114 nop ; Push hpfNest to a 32-byte boundary
3115 nop ; Push hpfNest to a 32-byte boundary
3116 nop ; Push hpfNest to a 32-byte boundary
3117 nop ; Push hpfNest to a 32-byte boundary
3118 nop ; Push hpfNest to a 32-byte boundary
3119 nop ; Push hpfNest to a 32-byte boundary
3120
3121hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3122 mr r25,r28 ; Save the original pmap (in case we nest)
3123
3124;
3125; This is where we loop descending nested pmaps
3126;
3127
3128hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3129 addi r20,r20,-1 ; Count nest try
3130 bl sxlkShared ; Go get a shared lock on the mapping lists
3131 mr. r3,r3 ; Did we get the lock?
3132 bne-- hpfBadLock ; Nope...
3133
3134 mr r3,r28 ; Get the pmap pointer
3135 mr r4,r22 ; Get top of faulting vaddr
3136 mr r5,r23 ; Get bottom of faulting vaddr
3137 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3138
3139 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3140 mr. r31,r3 ; Save the mapping if we found it
3141 cmplwi cr1,r0,0 ; Check for removal
3142 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3143
3144 bt-- cr0_eq,hpfNotFound ; Not found or removing...
3145
3146 rlwinm. r0,r7,0,mpNestb,mpNestb ; Are we nested?
3147 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3148
3149 lhz r21,mpSpace(r31) ; Get the space
3150
3151 beq++ hpfFoundIt ; No, we found our guy...
3152
3153
3154#if pmapTransSize != 12
3155#error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3156#endif
3157 rlwinm. r0,r26,0,mpSpecialb,mpSpecialb ; Special handling?
3158 cmplwi cr1,r20,0 ; Too many nestings?
3159 bne-- hpfSpclNest ; Do we need to do special handling?
3160
3161hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3162 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3163 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3164 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3165 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3166 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3167 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3168 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3169 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3170 slwi r11,r21,3 ; Multiply space by 8
3171 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3172 addc r23,r23,r9 ; Relocate bottom half of vaddr
3173 lwz r10,0(r10) ; Get the actual translation map
3174 slwi r12,r21,2 ; Multiply space by 4
3175 add r10,r10,r11 ; Add in the higher part of the index
3176 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3177 adde r22,r22,r8 ; Relocate the top half of the vaddr
3178 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3179 bl sxlkUnlock ; Unlock the search list
3180
3181 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3182 bf-- pf64Bitb,hpfNest ; Done if 32-bit...
3183
3184 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3185 b hpfNest ; Go try the new pmap...
3186
3187;
3188; Error condition. We only allow 64 nestings. This keeps us from having to
3189; check for recusive nests when we install them.
3190;
3191
3192 .align 5
3193
3194hpfNestTooMuch:
3195 lwz r20,savedsisr(r13) ; Get the DSISR
3196 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3197 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3198 ori r20,r20,1 ; Indicate that there was a nesting problem
3199 stw r20,savedsisr(r13) ; Stash it
3200 lwz r11,saveexception(r13) ; Restore the exception code
3201 b EXT(PFSExit) ; Yes... (probably not though)
3202
3203;
3204; Error condition - lock failed - this is fatal
3205;
3206
3207 .align 5
3208
3209hpfBadLock:
3210 lis r0,hi16(Choke) ; System abend
3211 ori r0,r0,lo16(Choke) ; System abend
3212 li r3,failMapping ; Show mapping failure
3213 sc
3214;
3215; Did not find any kind of mapping
3216;
3217
3218 .align 5
3219
3220hpfNotFound:
3221 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3222 bl sxlkUnlock ; Unlock it
3223 lwz r11,saveexception(r13) ; Restore the exception code
3224
3225hpfExit: ; We need this because we can not do a relative branch
3226 b EXT(PFSExit) ; Yes... (probably not though)
3227
3228
3229;
3230; Here is where we handle special mappings. So far, the only use is to load a
3231; processor specific segment register for copy in/out handling.
3232;
3233; The only (so far implemented) special map is used for copyin/copyout.
3234; We keep a mapping of a "linkage" mapping in the per_proc.
3235; The linkage mapping is basically a nested pmap that is switched in
3236; as part of context switch. It relocates the appropriate user address
3237; space slice into the right place in the kernel.
3238;
3239
3240 .align 5
3241
3242hpfSpclNest:
3243 la r31,ppCIOmp(r19) ; Just point to the mapping
3244 oris r27,r27,hi16(dsiSpcNest) ; Show that we had a special nesting here
3245 b hpfCSrch ; Go continue search...
3246
3247
3248;
3249; We have now found a mapping for the address we faulted on.
3250;
3251
3252;
3253; Here we go about calculating what the VSID should be. We concatanate
3254; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3255; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3256; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3257; the VSID.
3258;
3259; This is used both for segment handling and PTE handling
3260;
3261
3262
3263#if maxAdrSpb != 14
3264#error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3265#endif
3266
3267 .align 5
3268
3269hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
3270 rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
3271 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3272 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3273 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
3274 rlwinm r0,r27,0,dsiSpcNestb,dsiSpcNestb ; Isolate special nest flag
3275 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
3276 cmplwi cr5,r0,0 ; Did we just do a special nesting?
3277 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
3278 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
3279 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
3280 xor r14,r14,r20 ; Calculate the top half of VSID
3281 xor r15,r15,r21 ; Calculate the bottom half of the VSID
3282 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
3283 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
3284 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
3285 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
3286 or r12,r12,r15 ; Add key into the bottom of VSID
3287;
3288; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
3289
3290 bne++ hpfPteMiss ; Nope, normal PTE miss...
3291
3292;
3293; Here is the only place that we make an entry in the pmap segment cache.
3294;
3295; Note that we do not make an entry in the segment cache for special
3296; nested mappings. This makes the copy in/out segment get refreshed
3297; when switching threads.
3298;
3299; The first thing that we do is to look up the ESID we are going to load
3300; into a segment in the pmap cache. If it is already there, this is
3301; a segment that appeared since the last time we switched address spaces.
3302; If all is correct, then it was another processors that made the cache
3303; entry. If not, well, it is an error that we should die on, but I have
3304; not figured a good way to trap it yet.
3305;
3306; If we get a hit, we just bail, otherwise, lock the pmap cache, select
3307; an entry based on the generation number, update the cache entry, and
3308; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
3309; entries that correspond to the last 4 bits (32:35 for 64-bit and
3310; 0:3 for 32-bit) of the ESID.
3311;
3312; Then we unlock and bail.
3313;
3314; First lock it. Then select a free slot or steal one based on the generation
3315; number. Then store it, update the allocation flags, and unlock.
3316;
3317; The cache entry contains an image of the ESID/VSID pair we would load for
3318; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
3319;
3320; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
3321; the current one, which may have changed because we nested.
3322;
3323; Also remember that we do not store the valid bit in the ESID. If we
3324; od, this will break some other stuff.
3325;
3326
3327 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
3328
3329 mr r3,r25 ; Point to the pmap
3330 mr r4,r22 ; ESID high half
3331 mr r5,r23 ; ESID low half
3332 bl pmapCacheLookup ; Go see if this is in the cache already
3333
3334 mr. r3,r3 ; Did we find it?
3335 mr r4,r11 ; Copy this to a different register
3336
3337 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
3338
3339 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
3340 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
3341
3342 cntlzw r7,r4 ; Find a free slot
3343
3344 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
3345 rlwinm r30,r30,0,0,3 ; Clean up the ESID
3346 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
3347 addi r5,r4,1 ; Bump the generation number
3348 and r7,r7,r6 ; Clear bit number if none empty
3349 andc r8,r4,r6 ; Clear generation count if we found an empty
3350 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
3351 or r7,r7,r8 ; Select a slot number
3352 li r8,0 ; Clear
3353 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
3354 oris r8,r8,0x8000 ; Get the high bit on
3355 la r9,pmapSegCache(r25) ; Point to the segment cache
3356 slwi r6,r7,4 ; Get index into the segment cache
3357 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
3358 srw r8,r8,r7 ; Get the mask
3359 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
3360 li r0,0 ; Clear
3361 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
3362 oris r0,r0,0xF000 ; Get the sub-tag mask
3363 add r9,r9,r6 ; Point to the cache slot
3364 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
3365 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
3366
3367 stw r29,sgcESID(r9) ; Save the top of the ESID
3368 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
3369 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
3370 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
3371 or r10,r10,r5 ; Stick in subtag in case top half
3372 or r11,r11,r5 ; Stick in subtag in case bottom half
3373 stw r14,sgcVSID(r9) ; Save the top of the VSID
3374 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
3375 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
3376 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
3377
3378 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
3379 b hpfNoCacheEnt ; Go finish up...
3380
3381hpfSCSTbottom:
3382 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
3383
3384
3385hpfNoCacheEnt:
3386 eieio ; Make sure cache is updated before lock
3387 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
3388
3389
3390hpfNoCacheEnt2:
3391 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
3392 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
3393
3394;
3395; Make and enter 32-bit segment register
3396;
3397
3398 lwz r16,validSegs(r19) ; Get the valid SR flags
3399 xor r12,r12,r4 ; Alter the storage key before loading segment register
3400 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
3401 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
3402 lis r0,0x8000 ; Set bit 0
3403 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
3404 srw r0,r0,r2 ; Get bit corresponding to SR
3405 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
3406 or r16,r16,r0 ; Show that SR is valid
3407
3408 mtsrin r6,r30 ; Set the actual SR
3409
3410 stw r16,validSegs(r19) ; Set the valid SR flags
3411
3412 b hpfPteMiss ; SR loaded, go do a PTE...
3413
3414;
3415; Make and enter 64-bit segment look-aside buffer entry.
3416; Note that the cache entry is the right format except for valid bit.
3417; We also need to convert from long long to 64-bit register values.
3418;
3419
3420
3421 .align 5
3422
3423hpfLoadSeg64:
3424 ld r16,validSegs(r19) ; Get the valid SLB entry flags
3425 sldi r8,r29,32 ; Move high order address over
3426 sldi r10,r14,32 ; Move high part of VSID over
3427
3428 not r3,r16 ; Make valids be 0s
3429 li r0,1 ; Prepare to set bit 0
3430
3431 cntlzd r17,r3 ; Find a free SLB
3432 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
3433 or r9,r8,r30 ; Form full 64-bit address
3434 cmplwi r17,63 ; Did we find a free SLB entry?
3435 sldi r0,r0,63 ; Get bit 0 set
3436 or r10,r10,r12 ; Move in low part and keys
3437 addi r17,r17,1 ; Skip SLB 0 always
3438 blt++ hpfFreeSeg ; Yes, go load it...
3439
3440;
3441; No free SLB entries, select one that is in use and invalidate it
3442;
3443 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
3444 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
3445 addi r4,r4,1 ; Set next slot to steal
3446 slbmfee r7,r17 ; Get the entry that is in the selected spot
3447 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
3448 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
3449 srawi r2,r2,31 ; Get -1 if steal index still in range
3450 slbie r7 ; Invalidate the in-use SLB entry
3451 and r4,r4,r2 ; Reset steal index when it should wrap
3452 isync ;
3453
3454 stw r4,ppSegSteal(r19) ; Set the next slot to steal
3455;
3456; We are now ready to stick the SLB entry in the SLB and mark it in use
3457;
3458
3459hpfFreeSeg:
3460 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
3461 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
3462 srd r0,r0,r4 ; Set bit mask for allocation
3463 oris r9,r9,0x0800 ; Turn on the valid bit
3464 or r16,r16,r0 ; Turn on the allocation flag
3465 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
3466
3467 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
3468 slbie r7 ; Blow away a potential duplicate
3469
3470hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
3471
3472 std r16,validSegs(r19) ; Mark as valid
3473 b hpfPteMiss ; STE loaded, go do a PTE...
3474
3475;
3476; The segment has been set up and loaded if need be. Now we are ready to build the
3477; PTE and get it into the hash table.
3478;
3479; Note that there is actually a race here. If we start fault processing on
3480; a different pmap, i.e., we have descended into a nested pmap, it is possible
3481; that the nest could have been removed from the original pmap. We would
3482; succeed with this translation anyway. I do not think we need to worry
3483; about this (famous last words) because nobody should be unnesting anything
3484; if there are still people activily using them. It should be up to the
3485; higher level VM system to put the kibosh on this.
3486;
3487; There is also another race here: if we fault on the same mapping on more than
3488; one processor at the same time, we could end up with multiple PTEs for the same
3489; mapping. This is not a good thing.... We really only need one of the
3490; fault handlers to finish, so what we do is to set a "fault in progress" flag in
3491; the mapping. If we see that set, we just abandon the handler and hope that by
3492; the time we restore context and restart the interrupted code, the fault has
3493; been resolved by the other guy. If not, we will take another fault.
3494;
3495
3496;
3497; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
3498; It is required to stay there until after we call mapSelSlot!!!!
3499;
3500
3501 .align 5
3502
3503hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
3504 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
3505 li r3,mpHValid ; Get the PTE valid bit
3506 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
3507 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
3508 crnot cr1_eq,cr0_eq ; Remember if FIP was on
3509 and. r12,r12,r3 ; Isolate the valid bit
3510 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
3511 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
3512 andi. r0,r2,mpBlock ; Is this a block mapping?
3513 crmove cr7_eq,cr0_eq ; Remember if we have a block mapping
3514 stwcx. r2,0,r31 ; Store the flags
3515 bne-- hpfPteMiss ; Collision, try again...
3516
3517 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
3518
3519;
3520; At this point we are about to do the 32-bit PTE generation.
3521;
3522; The following is the R14:R15 pair that contains the "shifted" VSID:
3523;
3524; 1 2 3 4 4 5 6
3525; 0 8 6 4 2 0 8 6 3
3526; +--------+--------+--------+--------+--------+--------+--------+--------+
3527; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
3528; +--------+--------+--------+--------+--------+--------+--------+--------+
3529;
3530; The 24 bits of the 32-bit architecture VSID is in the following:
3531;
3532; 1 2 3 4 4 5 6
3533; 0 8 6 4 2 0 8 6 3
3534; +--------+--------+--------+--------+--------+--------+--------+--------+
3535; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
3536; +--------+--------+--------+--------+--------+--------+--------+--------+
3537;
3538
3539
3540hpfBldPTE32:
3541 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
3542 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
3543
3544 mfsdr1 r27 ; Get the hash table base address
3545
3546 rlwinm r0,r23,0,4,19 ; Isolate just the page index
3547 rlwinm r18,r23,10,26,31 ; Extract the API
3548 xor r19,r15,r0 ; Calculate hash << 12
3549 mr r2,r25 ; Save the flag part of the mapping
3550 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
3551 rlwinm r16,r27,16,7,15 ; Extract the hash table size
3552 rlwinm r25,r25,0,0,19 ; Clear out the flags
3553 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
3554 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
3555 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
3556 rlwinm r27,r27,0,0,15 ; Extract the hash table base
3557 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
3558 add r24,r24,r25 ; Adjust to true physical address
3559 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
3560 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
3561 and r19,r19,r16 ; Wrap hash table offset into the hash table
3562 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
3563 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
3564 add r19,r19,r27 ; Point to the PTEG
3565 subfic r20,r20,-4 ; Get negative offset to PCA
3566 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
3567 add r20,r20,r27 ; Point to the PCA slot
3568
3569;
3570; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
3571; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
3572;
3573; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
3574; that some other processor beat us and stuck in a PTE or that
3575; all we had was a simple segment exception and the PTE was there the whole time.
3576; If we find one a pointer, we are done.
3577;
3578
3579 mr r7,r20 ; Copy the PCA pointer
3580 bl mapLockPteg ; Lock the PTEG
3581
3582 lwz r12,mpPte(r31) ; Get the offset to the PTE
3583 mr r17,r6 ; Remember the PCA image
3584 mr r16,r6 ; Prime the post-select PCA image
3585 andi. r0,r12,mpHValid ; Is there a PTE here already?
3586 li r21,8 ; Get the number of slots
3587
3588 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
3589
3590 bne- hpfBailOut ; Someone already did this for us...
3591
3592;
3593; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
3594; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
3595; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
3596; R4 returns the slot index.
3597;
3598; REMEMBER: CR7 indicates that we are building a block mapping.
3599;
3600
3601hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
3602 mr r6,r17 ; Get back the original PCA
3603 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
3604 blt- hpfBailOut ; Holy Cow, all slots are locked...
3605
3606 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
3607
3608 cmplwi cr5,r3,1 ; Did we steal a slot?
3609 rlwinm r5,r4,3,26,28 ; Convert index to slot offset
3610 add r19,r19,r5 ; Point directly to the PTE
3611 mr r16,r6 ; Remember the PCA image after selection
3612 blt+ cr5,hpfInser32 ; Nope, no steal...
3613
3614 lwz r6,0(r19) ; Get the old PTE
3615 lwz r7,4(r19) ; Get the real part of the stealee
3616 rlwinm r6,r6,0,1,31 ; Clear the valid bit
3617 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
3618 srwi r3,r7,12 ; Change phys address to a ppnum
3619 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
3620 cmplwi cr1,r3,0 ; Check if this is in RAM
3621 bne- hpfNoPte32 ; Could not get it, try for another...
3622
3623 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
3624
3625hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
3626
3627 sync ; Make sure the invalid is stored
3628 li r9,tlbieLock ; Get the TLBIE lock
3629 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
3630
3631hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
3632 mfsprg r4,0 ; Get the per_proc
3633 rlwinm r8,r6,25,18,31 ; Extract the space ID
3634 rlwinm r11,r6,25,18,31 ; Extract the space ID
3635 lwz r7,hwSteals(r4) ; Get the steal count
3636 srwi r2,r6,7 ; Align segment number with hash
3637 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
3638 mr. r0,r0 ; Is it locked?
3639 srwi r0,r19,6 ; Align PTEG offset for back hash
3640 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
3641 xor r11,r11,r0 ; Hash backwards to partial vaddr
3642 rlwinm r12,r2,14,0,3 ; Shift segment up
3643 mfsprg r2,2 ; Get feature flags
3644 li r0,1 ; Get our lock word
3645 rlwimi r12,r6,22,4,9 ; Move up the API
3646 bne- hpfTLBIE32 ; It is locked, go wait...
3647 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
3648
3649 stwcx. r0,0,r9 ; Try to get it
3650 bne- hpfTLBIE32 ; We was beat...
3651 addi r7,r7,1 ; Bump the steal count
3652
3653 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
3654 li r0,0 ; Lock clear value
3655
3656 tlbie r12 ; Invalidate it everywhere
3657
55e303ae
A
3658 beq- hpfNoTS32 ; Can not have MP on this machine...
3659
3660 eieio ; Make sure that the tlbie happens first
3661 tlbsync ; Wait for everyone to catch up
3662 sync ; Make sure of it all
3663
5eebf738
A
3664hpfNoTS32:
3665 stw r0,tlbieLock(0) ; Clear the tlbie lock
3666
3667 stw r7,hwSteals(r4) ; Save the steal count
55e303ae
A
3668 bgt cr5,hpfInser32 ; We just stole a block mapping...
3669
3670 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
3671
3672 la r11,ppLink+4(r3) ; Point to the master RC copy
3673 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
3674 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
3675
3676hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
3677 or r0,r0,r2 ; Merge in the new RC
3678 stwcx. r0,0,r11 ; Try to stick it back
3679 bne- hpfMrgRC32 ; Try again if we collided...
3680
3681
3682hpfFPnch: rlwinm. r7,r7,0,0,25 ; Clean and test mapping address
3683 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
3684
3685 lhz r10,mpSpace(r7) ; Get the space
3686 lwz r9,mpVAddr+4(r7) ; And the vaddr
3687 cmplw cr1,r10,r8 ; Is this one of ours?
3688 xor r9,r12,r9 ; Compare virtual address
3689 cmplwi r9,0x1000 ; See if we really match
3690 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
3691 beq+ hpfFPnch2 ; Yes, found ours...
3692
3693 lwz r7,mpAlias+4(r7) ; Chain on to the next
3694 b hpfFPnch ; Check it out...
3695
3696hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
3697 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
3698 bl mapPhysUnlock ; Unlock the physent now
3699
3700hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
3701
3702 stw r24,4(r19) ; Stuff in the real part of the PTE
3703 eieio ; Make sure this gets there first
3704
3705 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
3706 mr r17,r16 ; Get the PCA image to save
3707 b hpfFinish ; Go join the common exit code...
3708
3709
3710;
3711; At this point we are about to do the 64-bit PTE generation.
3712;
3713; The following is the R14:R15 pair that contains the "shifted" VSID:
3714;
3715; 1 2 3 4 4 5 6
3716; 0 8 6 4 2 0 8 6 3
3717; +--------+--------+--------+--------+--------+--------+--------+--------+
3718; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
3719; +--------+--------+--------+--------+--------+--------+--------+--------+
3720;
3721;
3722
3723 .align 5
3724
3725hpfBldPTE64:
3726 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
3727 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
3728
3729 mfsdr1 r27 ; Get the hash table base address
3730
3731 sldi r11,r22,32 ; Slide top of adjusted EA over
3732 sldi r14,r14,32 ; Slide top of VSID over
3733 rlwinm r5,r27,0,27,31 ; Isolate the size
3734 eqv r16,r16,r16 ; Get all foxes here
3735 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
3736 mr r2,r10 ; Save the flag part of the mapping
3737 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
3738 rldicr r27,r27,0,45 ; Clean up the hash table base
3739 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
3740 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
3741 subfic r5,r5,46 ; Get number of leading zeros
3742 xor r19,r0,r15 ; Calculate hash
3743 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
3744 srd r16,r16,r5 ; Shift over to get length of table
3745 srdi r19,r19,5 ; Convert page offset to hash table offset
3746 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
3747 rldicr r10,r10,0,51 ; Clear out flags
3748 sldi r24,r24,12 ; Change ppnum to physical address
3749 sub r11,r11,r10 ; Get the offset from the base mapping
3750 and r19,r19,r16 ; Wrap into hash table
3751 add r24,r24,r11 ; Get actual physical address of this page
3752 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
3753 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
3754 subfic r20,r20,-4 ; Get negative offset to PCA
3755 ori r24,r24,lo16(mpR) ; Force on the reference bit
3756 add r20,r20,r27 ; Point to the PCA slot
3757 add r19,r19,r27 ; Point to the PTEG
3758
3759;
3760; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
3761; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
3762;
3763; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
3764; that some other processor beat us and stuck in a PTE or that
3765; all we had was a simple segment exception and the PTE was there the whole time.
3766; If we find one a pointer, we are done.
3767;
3768
3769 mr r7,r20 ; Copy the PCA pointer
3770 bl mapLockPteg ; Lock the PTEG
3771
3772 lwz r12,mpPte(r31) ; Get the offset to the PTE
3773 mr r17,r6 ; Remember the PCA image
3774 mr r18,r6 ; Prime post-selection PCA image
3775 andi. r0,r12,mpHValid ; See if we have a PTE now
3776 li r21,8 ; Get the number of slots
3777
3778 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
3779
3780 bne-- hpfBailOut ; Someone already did this for us...
3781
3782;
3783; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
3784; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
3785; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
3786; R4 returns the slot index.
3787;
3788; REMEMBER: CR7 indicates that we are building a block mapping.
3789;
3790
3791hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
3792 mr r6,r17 ; Restore original state of PCA
3793 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
3794 blt- hpfBailOut ; Holy Cow, all slots are locked...
3795
3796 bl mapSelSlot ; Go select a slot
3797
3798 cmplwi cr5,r3,1 ; Did we steal a slot?
3799 rlwinm r5,r4,4,25,27 ; Convert index to slot offset
3800 mr r18,r6 ; Remember the PCA image after selection
3801 add r19,r19,r5 ; Point directly to the PTE
3802 lwz r10,hwSteals(r2) ; Get the steal count
3803 blt++ cr5,hpfInser64 ; Nope, no steal...
3804
3805 ld r6,0(r19) ; Get the old PTE
3806 ld r7,8(r19) ; Get the real part of the stealee
3807 rldicr r6,r6,0,62 ; Clear the valid bit
3808 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
3809 srdi r3,r7,12 ; Change page address to a page address
3810 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
3811 cmplwi cr1,r3,0 ; Check if this is in RAM
3812 bne-- hpfNoPte64 ; Could not get it, try for another...
3813
3814 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
3815
3816hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
3817 li r9,tlbieLock ; Get the TLBIE lock
3818
3819 srdi r11,r6,5 ; Shift VSID over for back hash
3820 mfsprg r4,0 ; Get the per_proc
3821 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
3822 sync ; Make sure the invalid is stored
3823
3824 sldi r12,r6,16 ; Move AVPN to EA position
3825 sldi r11,r11,5 ; Move this to the page position
3826
3827hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
3828 mr. r0,r0 ; Is it locked?
3829 li r0,1 ; Get our lock word
3830 bne-- hpfTLBIE65 ; It is locked, go wait...
3831
3832 stwcx. r0,0,r9 ; Try to get it
3833 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
3834 rldicl r8,r6,52,50 ; Isolate the address space ID
3835 bne-- hpfTLBIE64 ; We was beat...
3836 addi r10,r10,1 ; Bump the steal count
3837
3838 rldicl r11,r12,0,16 ; Clear cause the book says so
3839 li r0,0 ; Lock clear value
3840
3841 tlbie r11 ; Invalidate it everywhere
3842
55e303ae
A
3843 mr r7,r8 ; Get a copy of the space ID
3844 eieio ; Make sure that the tlbie happens first
3845 rldimi r7,r7,14,36 ; Copy address space to make hash value
3846 tlbsync ; Wait for everyone to catch up
3847 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
3848 isync
3849 srdi r2,r6,26 ; Shift original segment down to bottom
3850
3851 ptesync ; Make sure of it all
5eebf738
A
3852
3853 stw r0,tlbieLock(0) ; Clear the tlbie lock
3854
55e303ae
A
3855 xor r7,r7,r2 ; Compute original segment
3856
3857 stw r10,hwSteals(r4) ; Save the steal count
3858 bgt cr5,hpfInser64 ; We just stole a block mapping...
3859
3860 rldimi r12,r7,28,0 ; Insert decoded segment
3861 rldicl r4,r12,0,13 ; Trim to max supported address
3862
3863 ld r12,8(r19) ; Get the RC of the just invalidated PTE
3864
3865 la r11,ppLink+4(r3) ; Point to the master RC copy
3866 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
3867 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
3868
3869hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
3870 li r12,0xFF ; Get mask to clean up alias pointer
3871 or r0,r0,r2 ; Merge in the new RC
3872 rldicl r12,r12,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
3873 stwcx. r0,0,r11 ; Try to stick it back
3874 bne-- hpfMrgRC64 ; Try again if we collided...
3875
3876hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
3877 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
3878
3879 lhz r10,mpSpace(r7) ; Get the space
3880 ld r9,mpVAddr(r7) ; And the vaddr
3881 cmplw cr1,r10,r8 ; Is this one of ours?
3882 xor r9,r4,r9 ; Compare virtual address
3883 cmpldi r9,0x1000 ; See if we really match
3884 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
3885 beq++ hpfFPnch2x ; Yes, found ours...
3886
3887 ld r7,mpAlias(r7) ; Chain on to the next
3888 b hpfFPnchx ; Check it out...
3889
3890 .align 5
3891
3892hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
3893 stwcx. r7,0,r7 ; Kill reservation
3894
3895hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
3896 mr. r0,r0 ; Is it locked?
3897 beq++ hpfTLBIE64 ; Yup, wait for it...
3898 b hpfTLBIE63 ; Nope, try again..
3899
3900
3901
3902hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
3903 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
3904 bl mapPhysUnlock ; Unlock the physent now
3905
3906
3907hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
3908 eieio ; Make sure this gets there first
3909 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
3910 mr r17,r18 ; Get the PCA image to set
3911 b hpfFinish ; Go join the common exit code...
3912
3913hpfLostPhys:
3914 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
3915 ori r0,r0,lo16(Choke) ; System abend
3916 sc
3917
3918;
3919; This is the common code we execute when we are finished setting up the PTE.
3920;
3921
3922 .align 5
3923
3924hpfFinish: sub r4,r19,r27 ; Get offset of PTE
3925 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
3926 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
3927 stw r4,mpPte(r31) ; Remember our PTE
3928
3929hpfBailOut: eieio ; Make sure all updates come first
3930 stw r17,0(r20) ; Unlock and set the final PCA
3931
3932;
3933; This is where we go if we have started processing the fault, but find that someone
3934; else has taken care of it.
3935;
3936
3937hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
3938 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
3939 sth r2,mpFlags+2(r31) ; Set it
3940
3941 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3942 bl sxlkUnlock ; Unlock the search list
3943
3944 li r11,T_IN_VAIN ; Say that it was handled
3945 b EXT(PFSExit) ; Leave...
3946
3947;
3948; This is where we go when we find that someone else
3949; is in the process of handling the fault.
3950;
3951
3952hpfAbandon: li r3,lgKillResv ; Kill off any reservation
3953 stwcx. r3,0,r3 ; Do it
3954
3955 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3956 bl sxlkUnlock ; Unlock the search list
3957
3958 li r11,T_IN_VAIN ; Say that it was handled
3959 b EXT(PFSExit) ; Leave...
3960
3961
3962
3963/*
3964 * hw_set_user_space(pmap)
3965 * hw_set_user_space_dis(pmap)
3966 *
3967 * Indicate whether memory space needs to be switched.
3968 * We really need to turn off interrupts here, because we need to be non-preemptable
de355530
A
3969 *
3970 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
3971 * register usage here. The VMM switch code in vmachmon.s that calls this
3972 * know what registers are in use. Check that if these change.
3973 */
1c79356b 3974
1c79356b 3975
55e303ae
A
3976
3977 .align 5
3978 .globl EXT(hw_set_user_space)
3979
3980LEXT(hw_set_user_space)
3981
3982 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
3983 mfmsr r10 ; Get the current MSR
3984 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
3985 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
3986 andc r10,r10,r8 ; Turn off VEC, FP for good
3987 andc r9,r10,r9 ; Turn off EE also
3988 mtmsr r9 ; Disable them
3989 isync ; Make sure FP and vec are off
3990 mfsprg r6,0 ; Get the per_proc_info address
3991 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
3992 mfsprg r4,2 ; The the feature flags
3993 lwz r7,pmapvr(r3) ; Get the v to r translation
3994 lwz r8,pmapvr+4(r3) ; Get the v to r translation
3995 mtcrf 0x80,r4 ; Get the Altivec flag
3996 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
3997 cmplw cr1,r3,r2 ; Same address space as before?
3998 stw r7,ppUserPmap(r6) ; Show our real pmap address
3999 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4000 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4001 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4002 mtmsr r10 ; Restore interruptions
4003 beqlr-- cr1 ; Leave if the same address space or not Altivec
4004
4005 dssall ; Need to kill all data streams if adrsp changed
4006 sync
4007 blr ; Return...
4008
4009 .align 5
4010 .globl EXT(hw_set_user_space_dis)
4011
4012LEXT(hw_set_user_space_dis)
4013
4014 lwz r7,pmapvr(r3) ; Get the v to r translation
4015 mfsprg r4,2 ; The the feature flags
4016 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4017 mfsprg r6,0 ; Get the per_proc_info address
4018 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4019 mtcrf 0x80,r4 ; Get the Altivec flag
4020 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4021 cmplw cr1,r3,r2 ; Same address space as before?
4022 stw r7,ppUserPmap(r6) ; Show our real pmap address
4023 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4024 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4025 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4026 beqlr-- cr1 ; Leave if the same
4027
4028 dssall ; Need to kill all data streams if adrsp changed
4029 sync
4030 blr ; Return...
4031
4032/* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4033 *
4034 * Lock must already be held on mapping block list
4035 * returns 0 if all slots filled.
4036 * returns n if a slot is found and it is not the last
4037 * returns -n if a slot is found and it is the last
4038 * when n and -n are returned, the corresponding bit is cleared
4039 * the mapping is zeroed out before return
4040 *
4041 */
4042
4043 .align 5
4044 .globl EXT(mapalc1)
4045
4046LEXT(mapalc1)
4047 lwz r4,mbfree(r3) ; Get the 1st mask
4048 lis r0,0x8000 ; Get the mask to clear the first free bit
4049 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4050 mr r12,r3 ; Save the block ptr
4051 cntlzw r3,r4 ; Get first 1-bit in 1st word
4052 srw. r9,r0,r3 ; Get bit corresponding to first free one
4053 cntlzw r10,r5 ; Get first free field in second word
4054 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4055 bne mapalc1f ; Found one in 1st word
4056
4057 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4058 li r3,0 ; assume failure return
4059 andc r5,r5,r9 ; Turn it off
4060 beqlr-- ; There are no 1 bits left...
4061 addi r3,r10,32 ; set the correct number
4062
4063mapalc1f:
4064 or. r0,r4,r5 ; any more bits set?
4065 stw r4,mbfree(r12) ; update bitmasks
4066 stw r5,mbfree+4(r12)
4067
4068 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4069 addi r7,r6,32
4070 dcbz r6,r12 ; clear the 64-byte mapping
4071 dcbz r7,r12
4072
4073 bnelr++ ; return if another bit remains set
4074
4075 neg r3,r3 ; indicate we just returned the last bit
4076 blr
4077
4078
4079/* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4080 *
4081 * Lock must already be held on mapping block list
4082 * returns 0 if all slots filled.
4083 * returns n if a slot is found and it is not the last
4084 * returns -n if a slot is found and it is the last
4085 * when n and -n are returned, the corresponding bits are cleared
4086 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4087 * the mapping is zero'd out before return
4088 */
4089
4090 .align 5
4091 .globl EXT(mapalc2)
4092LEXT(mapalc2)
4093 lwz r4,mbfree(r3) ; Get the first mask
4094 lis r0,0x8000 ; Get the mask to clear the first free bit
4095 lwz r5,mbfree+4(r3) ; Get the second mask
4096 mr r12,r3 ; Save the block ptr
4097 slwi r6,r4,1 ; shift first word over
4098 and r6,r4,r6 ; lite start of double bit runs in 1st word
4099 slwi r7,r5,1 ; shift 2nd word over
4100 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4101 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4102 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4103 cntlzw r10,r7 ; Get first free field in second word
4104 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4105 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4106 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4107 bne mapalc2a ; Found two consecutive free bits in 1st word
4108
4109 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4110 li r3,0 ; assume failure
4111 srwi r11,r9,1 ; get mask for 2nd bit
4112 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4113 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4114 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4115 addi r3,r10,32 ; set the correct number
4116
4117mapalc2a:
4118 or. r0,r4,r5 ; any more bits set?
4119 stw r4,mbfree(r12) ; update bitmasks
4120 stw r5,mbfree+4(r12)
4121 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4122 addi r7,r6,32
4123 addi r8,r6,64
4124 addi r9,r6,96
4125 dcbz r6,r12 ; zero out the 128-byte mapping
4126 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
4127 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
4128 dcbz r9,r12
4129
4130 bnelr++ ; return if another bit remains set
4131
4132 neg r3,r3 ; indicate we just returned the last bit
4133 blr
4134
4135mapalc2c:
4136 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
4137 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
4138 beqlr ; no, we failed
4139 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
4140 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
4141 li r3,31 ; get index of this field
4142 b mapalc2a
4143
4144
4145;
4146; This routine initialzes the hash table and PCA.
4147; It is done here because we may need to be 64-bit to do it.
4148;
4149
4150 .align 5
4151 .globl EXT(hw_hash_init)
4152
4153LEXT(hw_hash_init)
4154
4155 mfsprg r10,2 ; Get feature flags
4156 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
4157 mtcrf 0x02,r10 ; move pf64Bit to cr6
4158 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
4159 lis r4,0xFF01 ; Set all slots free and start steal at end
4160 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
4161 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
4162
4163 lwz r12,0(r12) ; Get hash table size
4164 li r3,0 ; Get start
4165 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
4166
4167 lwz r11,4(r11) ; Get hash table base
4168
4169hhiNext32: cmplw r3,r12 ; Have we reached the end?
4170 bge- hhiCPCA32 ; Yes...
4171 dcbz r3,r11 ; Clear the line
4172 addi r3,r3,32 ; Next one...
4173 b hhiNext32 ; Go on...
4174
4175hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
4176 li r3,-4 ; Displacement to first PCA entry
4177 neg r12,r12 ; Get negative end of PCA
4178
4179hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
4180 subi r3,r3,4 ; Next slot
4181 cmpw r3,r12 ; Have we finished?
4182 bge+ hhiNPCA32 ; Not yet...
4183 blr ; Leave...
4184
4185hhiSF: mfmsr r9 ; Save the MSR
4186 li r8,1 ; Get a 1
4187 mr r0,r9 ; Get a copy of the MSR
4188 ld r11,0(r11) ; Get hash table base
4189 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
4190 mtmsrd r0 ; Turn on SF
4191 isync
4192
4193
4194hhiNext64: cmpld r3,r12 ; Have we reached the end?
4195 bge-- hhiCPCA64 ; Yes...
4196 dcbz128 r3,r11 ; Clear the line
4197 addi r3,r3,128 ; Next one...
4198 b hhiNext64 ; Go on...
4199
4200hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
4201 li r3,-4 ; Displacement to first PCA entry
4202 neg r12,r12 ; Get negative end of PCA
4203
4204hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
4205 subi r3,r3,4 ; Next slot
4206 cmpd r3,r12 ; Have we finished?
4207 bge++ hhiNPCA64 ; Not yet...
4208
4209 mtmsrd r9 ; Turn off SF if it was off
4210 isync
4211 blr ; Leave...
4212
4213
4214;
4215; This routine sets up the hardware to start translation.
4216; Note that we do NOT start translation.
4217;
4218
4219 .align 5
4220 .globl EXT(hw_setup_trans)
4221
4222LEXT(hw_setup_trans)
4223
4224 mfsprg r11,0 ; Get the per_proc block
4225 mfsprg r12,2 ; Get feature flags
4226 li r0,0 ; Get a 0
4227 li r2,1 ; And a 1
4228 mtcrf 0x02,r12 ; Move pf64Bit to cr6
4229 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
4230 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
4231 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
4232 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
4233
4234 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
4235
4236 li r9,0 ; Clear out a register
4237 sync
4238 isync
4239 mtdbatu 0,r9 ; Invalidate maps
4240 mtdbatl 0,r9 ; Invalidate maps
4241 mtdbatu 1,r9 ; Invalidate maps
4242 mtdbatl 1,r9 ; Invalidate maps
4243 mtdbatu 2,r9 ; Invalidate maps
4244 mtdbatl 2,r9 ; Invalidate maps
4245 mtdbatu 3,r9 ; Invalidate maps
4246 mtdbatl 3,r9 ; Invalidate maps
4247
4248 mtibatu 0,r9 ; Invalidate maps
4249 mtibatl 0,r9 ; Invalidate maps
4250 mtibatu 1,r9 ; Invalidate maps
4251 mtibatl 1,r9 ; Invalidate maps
4252 mtibatu 2,r9 ; Invalidate maps
4253 mtibatl 2,r9 ; Invalidate maps
4254 mtibatu 3,r9 ; Invalidate maps
4255 mtibatl 3,r9 ; Invalidate maps
4256
4257 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
4258 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
4259 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
4260 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
4261 lwz r11,4(r11) ; Get hash table base
4262 lwz r12,0(r12) ; Get hash table size
4263 subi r12,r12,1 ; Back off by 1
4264 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
4265
4266 mtsdr1 r11 ; Ok, we now have the hash table set up
4267 sync
4268
4269 li r12,invalSpace ; Get the invalid segment value
4270 li r10,0 ; Start low
4271
4272hstsetsr: mtsrin r12,r10 ; Set the SR
4273 addis r10,r10,0x1000 ; Bump the segment
4274 mr. r10,r10 ; Are we finished?
4275 bne+ hstsetsr ; Nope...
4276 sync
4277 blr ; Return...
4278
4279;
4280; 64-bit version
4281;
4282
4283hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
4284 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
4285 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
4286 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
4287 ld r11,0(r11) ; Get hash table base
4288 lwz r12,0(r12) ; Get hash table size
4289 cntlzw r10,r12 ; Get the number of bits
4290 subfic r10,r10,13 ; Get the extra bits we need
4291 or r11,r11,r10 ; Add the size field to SDR1
4292
4293 mtsdr1 r11 ; Ok, we now have the hash table set up
4294 sync
4295
4296 li r0,0 ; Set an SLB slot index of 0
4297 slbia ; Trash all SLB entries (except for entry 0 that is)
4298 slbmfee r7,r0 ; Get the entry that is in SLB index 0
4299 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4300 slbie r7 ; Invalidate it
4301
4302 blr ; Return...
4303
4304
4305;
4306; This routine turns on translation for the first time on a processor
4307;
4308
4309 .align 5
4310 .globl EXT(hw_start_trans)
4311
4312LEXT(hw_start_trans)
4313
4314
4315 mfmsr r10 ; Get the msr
4316 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
4317
4318 mtmsr r10 ; Everything falls apart here
4319 isync
4320
4321 blr ; Back to it.
4322
4323
4324
4325;
4326; This routine validates a segment register.
4327; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
4328;
4329; r3 = virtual pmap
4330; r4 = segment[0:31]
4331; r5 = segment[32:63]
4332; r6 = va[0:31]
4333; r7 = va[32:63]
4334;
4335; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
4336; Note that there is no reason to apply the key modifier here because this is only
4337; used for kernel accesses.
4338;
4339
4340 .align 5
4341 .globl EXT(hw_map_seg)
4342
4343LEXT(hw_map_seg)
4344
4345 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
4346 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
4347 mfsprg r10,2 ; Get feature flags
4348 mfsprg r12,0 ; Get the per_proc
4349
4350;
4351; Note: the following code would problably be easier to follow if I split it,
4352; but I just wanted to see if I could write this to work on both 32- and 64-bit
4353; machines combined.
4354;
4355
4356;
4357; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
4358; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
4359
4360 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
4361 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
4362 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
4363 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
4364 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
4365 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
4366 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
4367 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
4368 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
4369 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
4370
4371 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
4372 ; concatenated together. There is garbage
4373 ; at the top for 64-bit but we will clean
4374 ; that out later.
4375 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
4376
4377
4378;
4379; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
4380; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
4381;
4382
4383;
4384; What we have now is:
4385;
4386; 0 0 1 2 3 4 4 5 6
4387; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
4388; +--------+--------+--------+--------+--------+--------+--------+--------+
4389; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
4390; +--------+--------+--------+--------+--------+--------+--------+--------+
4391; 0 0 1 2 3 - for 32-bit machines
4392; 0 8 6 4 1
4393;
4394; 0 0 1 2 3 4 4 5 6
4395; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
4396; +--------+--------+--------+--------+--------+--------+--------+--------+
4397; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
4398; +--------+--------+--------+--------+--------+--------+--------+--------+
4399; 0 0 1 2 3 - for 32-bit machines
4400; 0 8 6 4 1
4401;
4402; 0 0 1 2 3 4 4 5 6
4403; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
4404; +--------+--------+--------+--------+--------+--------+--------+--------+
4405; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
4406; +--------+--------+--------+--------+--------+--------+--------+--------+
4407; 0 0 1 2 3 - for 32-bit machines
4408; 0 8 6 4 1
4409
4410
4411 xor r8,r8,r2 ; Calculate VSID
4412
4413 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
4414
4415 li r0,1 ; Prepare to set bit 0 (also to clear EE)
4416 mfmsr r6 ; Get current MSR
4417 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
4418 mtmsrd r0,1 ; Set only the EE bit to 0
4419 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
4420 mfmsr r11 ; Get the MSR right now, after disabling EE
4421 andc r2,r11,r2 ; Turn off translation now
4422 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
4423 or r11,r11,r6 ; Turn on the EE bit if it was on
4424 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
4425 isync ; Hang out a bit
4426
4427 ld r6,validSegs(r12) ; Get the valid SLB entry flags
4428 sldi r9,r9,9 ; Position the key and noex bit
4429
4430 rldimi r5,r8,12,0 ; Form the VSID/key
4431
4432 not r3,r6 ; Make valids be 0s
4433
4434 cntlzd r7,r3 ; Find a free SLB
4435 cmplwi r7,63 ; Did we find a free SLB entry?
4436
4437 slbie r4 ; Since this ESID may still be in an SLBE, kill it
4438
4439 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
4440 addi r7,r7,1 ; Make sure we skip slb 0
4441 blt++ hmsFreeSeg ; Yes, go load it...
4442
4443;
4444; No free SLB entries, select one that is in use and invalidate it
4445;
4446 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
4447 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4448 addi r2,r2,1 ; Set next slot to steal
4449 slbmfee r3,r7 ; Get the entry that is in the selected spot
4450 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
4451 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
4452 srawi r8,r8,31 ; Get -1 if steal index still in range
4453 slbie r3 ; Invalidate the in-use SLB entry
4454 and r2,r2,r8 ; Reset steal index when it should wrap
4455 isync ;
4456
4457 stw r2,ppSegSteal(r12) ; Set the next slot to steal
4458;
4459; We are now ready to stick the SLB entry in the SLB and mark it in use
4460;
4461
4462hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
4463 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
4464 srd r0,r0,r2 ; Set bit mask for allocation
4465 rldicl r5,r5,0,15 ; Clean out the unsupported bits
4466 or r6,r6,r0 ; Turn on the allocation flag
4467
4468 slbmte r5,r4 ; Make that SLB entry
4469
4470 std r6,validSegs(r12) ; Mark as valid
4471 mtmsrd r11 ; Restore the MSR
4472 isync
4473 blr ; Back to it...
4474
4475 .align 5
4476
4477hms32bit: rlwinm r8,r8,0,8,31 ; Clean up the VSID
4478 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
4479 lis r0,0x8000 ; Set bit 0
4480 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
4481 srw r0,r0,r2 ; Get bit corresponding to SR
4482 addi r7,r12,validSegs ; Point to the valid segment flags directly
4483
4484 mtsrin r8,r4 ; Set the actual SR
4485 isync ; Need to make sure this is done
4486
4487hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
4488 or r6,r6,r0 ; Show that SR is valid
4489 stwcx. r6,0,r7 ; Set the valid SR flags
4490 bne-- hmsrupt ; Had an interrupt, need to get flags again...
4491
4492 blr ; Back to it...
4493
4494
4495;
4496; This routine invalidates a segment register.
4497;
4498
4499 .align 5
4500 .globl EXT(hw_blow_seg)
4501
4502LEXT(hw_blow_seg)
4503
4504 mfsprg r10,2 ; Get feature flags
4505 mfsprg r12,0 ; Get the per_proc
4506 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
4507
4508 addi r7,r12,validSegs ; Point to the valid segment flags directly
4509 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
4510
4511 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
4512
4513 li r0,1 ; Prepare to set bit 0 (also to clear EE)
4514 mfmsr r6 ; Get current MSR
4515 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
4516 mtmsrd r0,1 ; Set only the EE bit to 0
4517 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
4518 mfmsr r11 ; Get the MSR right now, after disabling EE
4519 andc r2,r11,r2 ; Turn off translation now
4520 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
4521 or r11,r11,r6 ; Turn on the EE bit if it was on
4522 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
4523 isync ; Hang out a bit
4524
4525 rldimi r9,r3,32,0 ; Insert the top part of the ESID
4526
4527 slbie r9 ; Invalidate the associated SLB entry
4528
4529 mtmsrd r11 ; Restore the MSR
4530 isync
4531 blr ; Back to it.
4532
4533 .align 5
4534
4535hbs32bit: lwarx r4,0,r7 ; Get and reserve the valid segment flags
4536 rlwinm r6,r9,4,28,31 ; Convert segment to number
4537 lis r2,0x8000 ; Set up a mask
4538 srw r2,r2,r6 ; Make a mask
4539 and. r0,r4,r2 ; See if this is even valid
4540 li r5,invalSpace ; Set the invalid address space VSID
4541 beqlr ; Leave if already invalid...
4542
4543 mtsrin r5,r9 ; Slam the segment register
4544 isync ; Need to make sure this is done
4545
4546hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
4547 stwcx. r4,0,r7 ; Set the valid SR flags
4548 beqlr++ ; Stored ok, no interrupt, time to leave...
4549
4550 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
4551 b hbsrupt ; Try again...
4552
4553;
4554; This routine invadates the entire pmap segment cache
4555;
4556; Translation is on, interrupts may or may not be enabled.
4557;
4558
4559 .align 5
4560 .globl EXT(invalidateSegs)
4561
4562LEXT(invalidateSegs)
4563
4564 la r10,pmapCCtl(r3) ; Point to the segment cache control
4565 eqv r2,r2,r2 ; Get all foxes
4566
4567isInv: lwarx r4,0,r10 ; Get the segment cache control value
4568 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
4569 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4570 bne-- isInv0 ; Yes, try again...
4571
4572 stwcx. r4,0,r10 ; Try to invalidate it
4573 bne-- isInv ; Someone else just stuffed it...
4574 blr ; Leave...
4575
4576
4577isInv0: li r4,lgKillResv ; Get reservation kill zone
4578 stwcx. r4,0,r4 ; Kill reservation
4579
4580isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
4581 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4582 bne-- isInv ; Nope...
4583 b isInv1 ; Still locked do it again...
4584
4585;
4586; This routine switches segment registers between kernel and user.
4587; We have some assumptions and rules:
4588; We are in the exception vectors
4589; pf64Bitb is set up
4590; R3 contains the MSR we going to
4591; We can not use R4, R13, R20, R21, R29
4592; R13 is the savearea
4593; R29 has the per_proc
4594;
4595; We return R3 as 0 if we did not switch between kernel and user
4596; We also maintain and apply the user state key modifier used by VMM support;
4597; If we go to the kernel it is set to 0, otherwise it follows the bit
4598; in spcFlags.
4599;
4600
d7e50217 4601 .align 5
55e303ae 4602 .globl EXT(switchSegs)
1c79356b 4603
55e303ae
A
4604LEXT(switchSegs)
4605
4606 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
4607 lwz r9,spcFlags(r29) ; Pick up the special user state flags
4608 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
4609 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
4610 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
4611 or r2,r2,r3 ; This will 1 if we will be using user segments
4612 li r3,0 ; Get a selection mask
4613 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
4614 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
4615 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
4616 la r19,ppUserPmap(r29) ; Point to the current user pmap
4617
4618; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
4619 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
4620
4621 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
4622 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
4623 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
4624 or r8,r8,r19 ; Get the pointer to the pmap we are using
4625
4626 beqlr ; We are staying in the same mode, do not touch segs...
4627
4628 lwz r28,0(r8) ; Get top half of pmap address
4629 lwz r10,4(r8) ; Get bottom half
4630
4631 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
4632 rlwinm r28,r28,0,1,0 ; Copy top to top
4633 stw r30,ppMapFlags(r29) ; Set the key modifier
4634 rlwimi r28,r10,0,0,31 ; Insert bottom
4635
4636 la r10,pmapCCtl(r28) ; Point to the segment cache control
4637 la r9,pmapSegCache(r28) ; Point to the segment cache
4638
4639ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
4640 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
4641 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
4642 bne-- ssgLock0 ; Yup, this is in use...
4643
4644 stwcx. r16,0,r10 ; Try to set the lock
4645 bne-- ssgLock ; Did we get contention?
4646
4647 not r11,r15 ; Invert the invalids to valids
4648 li r17,0 ; Set a mask for the SRs we are loading
4649 isync ; Make sure we are all caught up
4650
4651 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
4652
4653 li r0,0 ; Clear
4654 slbia ; Trash all SLB entries (except for entry 0 that is)
4655 li r17,1 ; Get SLB index to load (skip slb 0)
4656 oris r0,r0,0x8000 ; Get set for a mask
4657 b ssg64Enter ; Start on a cache line...
d7e50217
A
4658
4659 .align 5
d7e50217 4660
55e303ae
A
4661ssgLock0: li r15,lgKillResv ; Killing field
4662 stwcx. r15,0,r15 ; Kill reservation
d7e50217 4663
55e303ae
A
4664ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
4665 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
4666 beq++ ssgLock ; Yup, this is in use...
4667 b ssgLock1 ; Nope, try again...
4668;
4669; This is the 32-bit address space switch code.
4670; We take a reservation on the segment cache and walk through.
4671; For each entry, we load the specified entries and remember which
4672; we did with a mask. Then, we figure out which segments should be
4673; invalid and then see which actually are. Then we load those with the
4674; defined invalid VSID.
4675; Afterwards, we unlock the segment cache.
4676;
d7e50217 4677
55e303ae
A
4678 .align 5
4679
4680ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
4681 cmplwi r12,pmapSegCacheUse ; See if we are done
4682 slwi r14,r12,4 ; Index to the cache slot
4683 lis r0,0x8000 ; Get set for a mask
4684 add r14,r14,r9 ; Point to the entry
4685
4686 bge- ssg32Done ; All done...
4687
4688 lwz r5,sgcESID+4(r14) ; Get the ESID part
4689 srw r2,r0,r12 ; Form a mask for the one we are loading
4690 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
4691
4692 andc r11,r11,r2 ; Clear the bit
4693 lwz r6,sgcVSID(r14) ; And get the VSID top
4694
4695 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
4696
4697 xor r7,r7,r30 ; Modify the key before we actually set it
4698 srw r0,r0,r2 ; Get a mask for the SR we are loading
4699 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
4700 or r17,r17,r0 ; Remember the segment
4701 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
4702 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
4703
4704 mtsrin r8,r5 ; Load the segment
4705 b ssg32Enter ; Go enter the next...
4706
4707 .align 5
4708
4709ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
4710 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
4711
4712 lis r0,0x8000 ; Get set for a mask
4713 li r2,invalSpace ; Set the invalid address space VSID
4714
4715 nop ; Align loop
4716 nop ; Align loop
4717 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
4718 nop ; Align loop
4719
4720ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
4721 cmplwi r18,16 ; Have we finished?
4722 srw r22,r0,r18 ; Get the mask bit
4723 rlwinm r23,r18,28,0,3 ; Get the segment register we need
4724 andc r16,r16,r22 ; Get rid of the guy we just did
4725 bge ssg32Really ; Yes, we are really done now...
4726
4727 mtsrin r2,r23 ; Invalidate the SR
4728 b ssg32Inval ; Do the next...
4729
4730 .align 5
4731
4732ssg32Really:
4733 stw r17,validSegs(r29) ; Set the valid SR flags
4734 li r3,1 ; Set kernel/user transition
4735 blr
4736
4737;
4738; This is the 64-bit address space switch code.
4739; First we blow away all of the SLB entries.
4740; Walk through,
4741; loading the SLB. Afterwards, we release the cache lock
4742;
4743; Note that because we have to treat SLBE 0 specially, we do not ever use it...
4744; Its a performance thing...
4745;
1c79356b
A
4746
4747 .align 5
1c79356b 4748
55e303ae
A
4749ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
4750 cmplwi r12,pmapSegCacheUse ; See if we are done
4751 slwi r14,r12,4 ; Index to the cache slot
4752 srw r16,r0,r12 ; Form a mask for the one we are loading
4753 add r14,r14,r9 ; Point to the entry
4754 andc r11,r11,r16 ; Clear the bit
4755 bge-- ssg64Done ; All done...
4756
4757 ld r5,sgcESID(r14) ; Get the ESID part
4758 ld r6,sgcVSID(r14) ; And get the VSID part
4759 oris r5,r5,0x0800 ; Turn on the valid bit
4760 or r5,r5,r17 ; Insert the SLB slot
4761 xor r6,r6,r30 ; Modify the key before we actually set it
4762 addi r17,r17,1 ; Bump to the next slot
4763 slbmte r6,r5 ; Make that SLB entry
4764 b ssg64Enter ; Go enter the next...
1c79356b 4765
55e303ae 4766 .align 5
d7e50217 4767
55e303ae 4768ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
d7e50217 4769
55e303ae
A
4770 eqv r16,r16,r16 ; Load up with all foxes
4771 subfic r17,r17,64 ; Get the number of 1 bits we need
4772
4773 sld r16,r16,r17 ; Get a mask for the used SLB entries
4774 li r3,1 ; Set kernel/user transition
4775 std r16,validSegs(r29) ; Set the valid SR flags
1c79356b
A
4776 blr
4777
55e303ae
A
4778;
4779; mapSetUp - this function sets initial state for all mapping functions.
4780; We turn off all translations (physical), disable interruptions, and
4781; enter 64-bit mode if applicable.
4782;
4783; We also return the original MSR in r11, the feature flags in R12,
4784; and CR6 set up so we can do easy branches for 64-bit
4785;
4786
4787 .align 5
4788 .globl EXT(mapSetUp)
4789
4790LEXT(mapSetUp)
4791
4792 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
4793 mfsprg r12,2 ; Get feature flags
4794 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
4795 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
4796 mfmsr r11 ; Save the MSR
4797 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
4798 andc r11,r11,r0 ; Clear VEC and FP for good
4799 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
4800 li r2,1 ; Prepare for 64 bit
4801 andc r0,r11,r0 ; Clear the rest
4802 bt pfNoMSRirb,msuNoMSR ; No MSR...
4803 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
d7e50217 4804
55e303ae
A
4805 mtmsr r0 ; Translation and all off
4806 isync ; Toss prefetch
4807 blr ; Return...
4808
4809 .align 5
4810
4811msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
4812 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
4813 isync ; synchronize
4814 blr ; Return...
4815
4816 .align 5
4817
4818msuNoMSR: mr r2,r3 ; Save R3 across call
4819 mr r3,r0 ; Get the new MSR value
4820 li r0,loadMSR ; Get the MSR setter SC
4821 sc ; Set it
4822 mr r3,r2 ; Restore R3
4823 blr ; Go back all set up...
4824
4825
4826;
4827; Find the physent based on a physical page and try to lock it (but not too hard)
4828; Note that this table always has an entry that with a 0 table pointer at the end
4829;
4830; R3 contains ppnum on entry
4831; R3 is 0 if no entry was found
4832; R3 is physent if found
4833; cr0_eq is true if lock was obtained or there was no entry to lock
4834; cr0_eq is false of there was an entry and it was locked
4835;
4836
4837 .align 5
4838
4839mapFindPhyTry:
4840 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
4841 mr r2,r3 ; Save our target
4842 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
4843
4844mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address
4845 lwz r5,mrStart(r9) ; Get start of table entry
4846 lwz r0,mrEnd(r9) ; Get end of table entry
4847 addi r9,r9,mrSize ; Point to the next slot
4848 cmplwi cr2,r3,0 ; Are we at the end of the table?
4849 cmplw r2,r5 ; See if we are in this table
4850 cmplw cr1,r2,r0 ; Check end also
4851 sub r4,r2,r5 ; Calculate index to physical entry
4852 beq-- cr2,mapFindNo ; Leave if we did not find an entry...
4853 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
4854 slwi r4,r4,3 ; Get offset to physical entry
4855
4856 blt-- mapFindPhz ; Did not find it...
4857
4858 add r3,r3,r4 ; Point right to the slot
4859
4860mapFindOv: lwz r2,0(r3) ; Get the lock contents right now
4861 rlwinm. r0,r2,0,0,0 ; Is it locked?
4862 bnelr-- ; Yes it is...
4863
4864 lwarx r2,0,r3 ; Get the lock
4865 rlwinm. r0,r2,0,0,0 ; Is it locked?
4866 oris r0,r2,0x8000 ; Set the lock bit
4867 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
4868 stwcx. r0,0,r3 ; Try to stuff it back...
4869 bne-- mapFindOv ; Collision, try again...
4870 isync ; Clear any speculations
4871 blr ; Leave...
4872
4873mapFindKl: li r2,lgKillResv ; Killing field
4874 stwcx. r2,0,r2 ; Trash reservation...
4875 crclr cr0_eq ; Make sure we do not think we got the lock
4876 blr ; Leave...
4877
4878mapFindNo: crset cr0_eq ; Make sure that we set this
4879 li r3,0 ; Show that we did not find it
4880 blr ; Leave...
4881;
4882; pmapCacheLookup - This function will look up an entry in the pmap segment cache.
4883;
4884; How the pmap cache lookup works:
4885;
4886; We use a combination of three things: a mask of valid entries, a sub-tag, and the
4887; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
4888; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits
4889; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
4890; entry contains the full 36 bit ESID.
4891;
4892; The purpose of the sub-tag is to limit the number of searches necessary when looking
4893; for an existing cache entry. Because there are 16 slots in the cache, we could end up
4894; searching all 16 if an match is not found.
4895;
4896; Essentially, we will search only the slots that have a valid entry and whose sub-tag
4897; matches. More than likely, we will eliminate almost all of the searches.
4898;
4899; Inputs:
4900; R3 = pmap
4901; R4 = ESID high half
4902; R5 = ESID low half
4903;
4904; Outputs:
4905; R3 = pmap cache slot if found, 0 if not
4906; R10 = pmapCCtl address
4907; R11 = pmapCCtl image
4908; pmapCCtl locked on exit
4909;
4910
4911 .align 5
4912
4913pmapCacheLookup:
4914 la r10,pmapCCtl(r3) ; Point to the segment cache control
4915
4916pmapCacheLookuq:
4917 lwarx r11,0,r10 ; Get the segment cache control value
4918 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4919 ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit
4920 bne-- pmapCacheLookur ; Nope...
4921 stwcx. r0,0,r10 ; Try to take the lock
4922 bne-- pmapCacheLookuq ; Someone else just stuffed it, try again...
4923
4924 isync ; Make sure we get reservation first
4925 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
4926 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
4927 lwz r10,pmapSCSubTag+4(r3) ; And the bottom half
4928 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
4929 lis r8,0x8888 ; Get some eights
4930 rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right
4931 ori r8,r8,0x8888 ; Fill the rest with eights
4932
4933 eqv r10,r10,r5 ; Get 0xF where we hit in bottom half
4934 eqv r9,r9,r5 ; Get 0xF where we hit in top half
4935
4936 rlwinm r2,r10,1,0,30 ; Shift over 1
4937 rlwinm r0,r9,1,0,30 ; Shift over 1
4938 and r2,r2,r10 ; AND the even/odd pair into the even
4939 and r0,r0,r9 ; AND the even/odd pair into the even
4940 rlwinm r10,r2,2,0,28 ; Shift over 2
4941 rlwinm r9,r0,2,0,28 ; Shift over 2
4942 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
4943 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
4944
4945 and r10,r10,r8 ; Clear out extras
4946 and r9,r9,r8 ; Clear out extras
4947
4948 rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other
4949 rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other
4950 or r10,r0,r10 ; Merge them
4951 or r9,r2,r9 ; Merge them
4952 rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other
4953 rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other
4954 or r10,r0,r10 ; Merge them
4955 or r9,r2,r9 ; Merge them
4956 rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad
4957 rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad
4958 not r6,r11 ; Turn invalid into valid
4959 rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask
4960
4961 la r10,pmapSegCache(r3) ; Point at the cache slots
4962 and. r6,r9,r6 ; Get mask of valid and hit
4963 li r0,0 ; Clear
4964 li r3,0 ; Assume not found
4965 oris r0,r0,0x8000 ; Start a mask
4966 beqlr++ ; Leave, should usually be no hits...
4967
4968pclNextEnt: cntlzw r5,r6 ; Find an in use one
4969 cmplwi cr1,r5,pmapSegCacheUse ; Did we find one?
4970 rlwinm r7,r5,4,0,27 ; Index to the cache entry
4971 srw r2,r0,r5 ; Get validity mask bit
4972 add r7,r7,r10 ; Point to the cache slot
4973 andc r6,r6,r2 ; Clear the validity bit we just tried
4974 bgelr-- cr1 ; Leave if there are no more to check...
4975
4976 lwz r5,sgcESID(r7) ; Get the top half
4977
4978 cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half
4979
4980 bne++ pclNextEnt ; Nope, try again...
4981
4982 mr r3,r7 ; Point to the slot
4983 blr ; Leave....
d7e50217 4984
de355530 4985 .align 5
d7e50217 4986
55e303ae
A
4987pmapCacheLookur:
4988 li r11,lgKillResv ; The killing spot
4989 stwcx. r11,0,r11 ; Kill the reservation
d7e50217 4990
55e303ae
A
4991pmapCacheLookus:
4992 lwz r11,pmapCCtl(r3) ; Get the segment cache control
4993 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4994 beq++ pmapCacheLookup ; Nope...
4995 b pmapCacheLookus ; Yup, keep waiting...
4996
4997
4998
4999
5000;
5001; This routine, given a mapping, will find and lock the PTEG
5002; If mpPte does not point to a PTE (checked before and after lock), it will unlock the
5003; PTEG and return. In this case we will have undefined in R4
5004; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
5005;
5006; If the mapping is still valid, we will invalidate the PTE and merge
5007; the RC bits into the physent and also save them into the mapping.
5008;
5009; We then return with R3 pointing to the PTE slot, R4 is the
5010; top of the PTE and R5 is the bottom. R6 contains the PCA.
5011; R7 points to the PCA entry.
5012;
5013; Note that we should NEVER be called on a block or special mapping.
5014; We could do many bad things.
5015;
5016
5017 .align 5
5018
5019mapInvPte32:
5020 lwz r0,mpPte(r31) ; Grab the PTE offset
5021 mfsdr1 r7 ; Get the pointer to the hash table
5022 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
5023 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
5024 andi. r3,r0,mpHValid ; Is there a possible PTE?
5025 srwi r7,r0,4 ; Convert to PCA units
5026 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
5027 mflr r2 ; Save the return
5028 subfic r7,r7,-4 ; Convert to -4 based negative index
5029 add r7,r10,r7 ; Point to the PCA directly
5030 beqlr-- ; There was no PTE to start with...
5031
5032 bl mapLockPteg ; Lock the PTEG
5033
5034 lwz r0,mpPte(r31) ; Grab the PTE offset
5035 mtlr r2 ; Restore the LR
5036 andi. r3,r0,mpHValid ; Is there a possible PTE?
5037 beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
5038
5039 rlwinm r3,r0,0,0,30 ; Clear the valid bit
5040 add r3,r3,r10 ; Point to actual PTE
5041 lwz r4,0(r3) ; Get the top of the PTE
5042
5043 li r8,tlbieLock ; Get the TLBIE lock
5044 rlwinm r0,r4,0,1,31 ; Clear the valid bit
5045 stw r0,0(r3) ; Invalidate the PTE
5046
5047 sync ; Make sure everyone sees the invalidate
5048
5049mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock
5050 mfsprg r2,2 ; Get feature flags
5051 mr. r0,r0 ; Is it locked?
5052 li r0,1 ; Get our lock word
5053 bne- mITLBIE32 ; It is locked, go wait...
5054
5055 stwcx. r0,0,r8 ; Try to get it
5056 bne- mITLBIE32 ; We was beat...
5057
5058 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
5059 li r0,0 ; Lock clear value
5060
5061 tlbie r5 ; Invalidate it everywhere
5062
55e303ae
A
5063 beq- mINoTS32 ; Can not have MP on this machine...
5064
5065 eieio ; Make sure that the tlbie happens first
5066 tlbsync ; Wait for everyone to catch up
5067 sync ; Make sure of it all
5068
5eebf738
A
5069mINoTS32:
5070 stw r0,tlbieLock(0) ; Clear the tlbie lock
5071
5072 lwz r5,4(r3) ; Get the real part
55e303ae
A
5073 srwi r10,r5,12 ; Change physical address to a ppnum
5074
5075mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
5076 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
5077 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
5078 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
5079 rlwinm r11,r11,2,0,29 ; Change index into byte offset
5080 add r11,r11,r8 ; Point to the bank table
5081 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
5082 lwz r11,mrStart(r11) ; Get the start of bank
5083 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
5084 addi r2,r2,4 ; Offset to last half of field
5085 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
5086 sub r11,r10,r11 ; Get the index into the table
5087 rlwinm r11,r11,3,0,28 ; Get offset to the physent
5088
5089
5090mImrgRC: lwarx r10,r11,r2 ; Get the master RC
5091 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
5092 or r0,r0,r10 ; Merge in the new RC
5093 stwcx. r0,r11,r2 ; Try to stick it back
5094 bne-- mImrgRC ; Try again if we collided...
5095
5096 blr ; Leave with the PCA still locked up...
5097
5098mIPUnlock: eieio ; Make sure all updates come first
5099
5100 stw r6,0(r7) ; Unlock
de355530 5101 blr
d7e50217 5102
55e303ae
A
5103;
5104; 64-bit version
5105;
5106 .align 5
d7e50217 5107
55e303ae
A
5108mapInvPte64:
5109 lwz r0,mpPte(r31) ; Grab the PTE offset
5110 ld r5,mpVAddr(r31) ; Grab the virtual address
5111 mfsdr1 r7 ; Get the pointer to the hash table
5112 rldicr r10,r7,0,45 ; Clean up the hash table base
5113 andi. r3,r0,mpHValid ; Is there a possible PTE?
5114 srdi r7,r0,5 ; Convert to PCA units
5115 rldicr r7,r7,0,61 ; Clean up PCA
5116 subfic r7,r7,-4 ; Convert to -4 based negative index
5117 mflr r2 ; Save the return
5118 add r7,r10,r7 ; Point to the PCA directly
5119 beqlr-- ; There was no PTE to start with...
5120
5121 bl mapLockPteg ; Lock the PTEG
5122
5123 lwz r0,mpPte(r31) ; Grab the PTE offset again
5124 mtlr r2 ; Restore the LR
5125 andi. r3,r0,mpHValid ; Is there a possible PTE?
5126 beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
5127
5128 rlwinm r3,r0,0,0,30 ; Clear the valid bit
5129 add r3,r3,r10 ; Point to the actual PTE
5130 ld r4,0(r3) ; Get the top of the PTE
5131
5132 li r8,tlbieLock ; Get the TLBIE lock
5133 rldicr r0,r4,0,62 ; Clear the valid bit
5134 std r0,0(r3) ; Invalidate the PTE
5135
5136 rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN
5137 sync ; Make sure everyone sees the invalidate
5138 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
5139
5140mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock
5141 mr. r0,r0 ; Is it locked?
5142 li r0,1 ; Get our lock word
5143 bne-- mITLBIE64a ; It is locked, toss reservation and wait...
5144
5145 stwcx. r0,0,r8 ; Try to get it
5146 bne-- mITLBIE64 ; We was beat...
5147
5148 rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders
5149
5150 li r0,0 ; Lock clear value
5151
5152 tlbie r2 ; Invalidate it everywhere
5153
55e303ae
A
5154 eieio ; Make sure that the tlbie happens first
5155 tlbsync ; Wait for everyone to catch up
5156 isync
5157 ptesync ; Wait for quiet again
5158
5eebf738
A
5159mINoTS64:
5160 stw r0,tlbieLock(0) ; Clear the tlbie lock
5161
5162 sync ; Make sure of it all
55e303ae
A
5163
5164 ld r5,8(r3) ; Get the real part
5165 srdi r10,r5,12 ; Change physical address to a ppnum
5166 b mINmerge ; Join the common 32-64-bit code...
5167
5168mITLBIE64a: li r5,lgKillResv ; Killing field
5169 stwcx. r5,0,r5 ; Kill reservation
5170
5171mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock
5172 mr. r0,r0 ; Is it locked?
5173 beq++ mITLBIE64 ; Nope, try again...
5174 b mITLBIE64b ; Yup, wait for it...
5175
5176;
5177; mapLockPteg - Locks a PTEG
5178; R7 points to PCA entry
5179; R6 contains PCA on return
5180;
5181;
1c79356b
A
5182
5183 .align 5
55e303ae
A
5184
5185mapLockPteg:
5186 lwarx r6,0,r7 ; Pick up the PCA
5187 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
5188 ori r0,r6,PCAlock ; Set the lock bit
5189 bne-- mLSkill ; It is locked...
5190
5191 stwcx. r0,0,r7 ; Try to lock the PTEG
5192 bne-- mapLockPteg ; We collided...
5193
5194 isync ; Nostradamus lied
5195 blr ; Leave...
5196
5197mLSkill: li r6,lgKillResv ; Get killing field
5198 stwcx. r6,0,r6 ; Kill it
1c79356b 5199
55e303ae
A
5200mapLockPteh:
5201 lwz r6,0(r7) ; Pick up the PCA
5202 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
5203 beq++ mapLockPteg ; Nope, try again...
5204 b mapLockPteh ; Yes, wait for it...
1c79356b 5205
55e303ae
A
5206
5207;
5208; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6
5209; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was
5210; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
5211; R4 returns the slot index.
5212;
5213; CR7 also indicates that we have a block mapping
5214;
5215; The PTEG allocation controls are a bit map of the state of the PTEG.
5216; PCAfree indicates that the PTE slot is empty.
5217; PCAauto means that it comes from an autogen area. These
5218; guys do not keep track of reference and change and are actually "wired".
5219; They are easy to maintain. PCAsteal
5220; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
5221; fields fit in a single word and are loaded and stored under control of the
5222; PTEG control area lock (PCAlock).
5223;
5224; Note that PCAauto does not contribute to the steal calculations at all. Originally
5225; it did, autogens were second in priority. This can result in a pathalogical
5226; case where an instruction can not make forward progress, or one PTE slot
5227; thrashes.
5228;
5229; Note that the PCA must be locked when we get here.
5230;
5231; Physically, the fields are arranged:
5232; 0: PCAfree
5233; 1: PCAsteal
5234; 2: PCAauto
5235; 3: PCAmisc
5236;
5237;
5238; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched)
5239;
5240; At exit:
5241;
5242; R3 = 0 - no steal
5243; R3 = 1 - steal regular
5244; R3 = 2 - steal autogen
5245; R4 contains slot number
5246; R6 contains updated PCA image
5247;
5248
5249 .align 5
1c79356b 5250
55e303ae
A
5251mapSelSlot: lis r10,0 ; Clear autogen mask
5252 li r9,0 ; Start a mask
5253 beq cr7,mSSnotblk ; Skip if this is not a block mapping
5254 ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen)
5255
5256mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask
5257 oris r9,r9,0x8000 ; Get a mask
5258 cntlzw r4,r6 ; Find a slot or steal one
5259 ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000
5260 rlwinm r4,r4,0,29,31 ; Isolate bit position
5261 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
5262 srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags
5263 srwi r11,r11,1 ; Slide steal mask right
5264 and r8,r6,r2 ; Isolate the old in use and autogen bits
5265 andc r6,r6,r2 ; Allocate the slot and also clear autogen flag
5266 addi r0,r8,0x7F00 ; Push autogen flag to bit 16
5267 and r2,r2,r10 ; Keep the autogen part if autogen
5268 addis r8,r8,0xFF00 ; Push in use to bit 0 and invert
5269 or r6,r6,r2 ; Add in the new autogen bit
5270 rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use)
5271 rlwinm r8,r8,1,31,31 ; Isolate old in use
5272 rlwimi r6,r11,16,8,15 ; Stick the new steal slot in
5273
5274 add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen
5275 blr ; Leave...
1c79356b 5276
55e303ae
A
5277;
5278; Shared/Exclusive locks
5279;
5280; A shared/exclusive lock allows multiple shares of a lock to be taken
5281; but only one exclusive. A shared lock can be "promoted" to exclusive
5282; when it is the only share. If there are multiple sharers, the lock
5283; must be "converted". A promotion drops the share and gains exclusive as
5284; an atomic operation. If anyone else has a share, the operation fails.
5285; A conversion first drops the share and then takes an exclusive lock.
5286;
5287; We will want to add a timeout to this eventually.
5288;
5289; R3 is set to 0 for success, non-zero for failure
5290;
5291
5292;
5293; Convert a share into an exclusive
5294;
5295
5296 .align 5
1c79356b 5297
55e303ae
A
5298sxlkConvert:
5299
5300 lis r0,0x8000 ; Get the locked lock image
5301#if 0
5302 mflr r0 ; (TEST/DEBUG)
5303 oris r0,r0,0x8000 ; (TEST/DEBUG)
5304#endif
5305
5306sxlkCTry: lwarx r2,0,r3 ; Get the lock word
5307 cmplwi r2,1 ; Does it just have our share?
5308 subi r2,r2,1 ; Drop our share in case we do not get it
5309 bne-- sxlkCnotfree ; No, we need to unlock...
5310 stwcx. r0,0,r3 ; Try to take it exclusively
5311 bne-- sxlkCTry ; Collision, try again...
1c79356b 5312
55e303ae
A
5313 isync
5314 li r3,0 ; Set RC
5315 blr ; Leave...
5316
5317sxlkCnotfree:
5318 stwcx. r2,0,r3 ; Try to drop our share...
5319 bne-- sxlkCTry ; Try again if we collided...
5320 b sxlkExclusive ; Go take it exclusively...
5321
5322;
5323; Promote shared to exclusive
5324;
5325
5326 .align 5
1c79356b 5327
55e303ae
A
5328sxlkPromote:
5329 lis r0,0x8000 ; Get the locked lock image
5330#if 0
5331 mflr r0 ; (TEST/DEBUG)
5332 oris r0,r0,0x8000 ; (TEST/DEBUG)
5333#endif
5334
5335sxlkPTry: lwarx r2,0,r3 ; Get the lock word
5336 cmplwi r2,1 ; Does it just have our share?
5337 bne-- sxlkPkill ; No, just fail (R3 is non-zero)...
5338 stwcx. r0,0,r3 ; Try to take it exclusively
5339 bne-- sxlkPTry ; Collision, try again...
1c79356b 5340
55e303ae
A
5341 isync
5342 li r3,0 ; Set RC
1c79356b 5343 blr ; Leave...
55e303ae
A
5344
5345sxlkPkill: li r2,lgKillResv ; Point to killing field
5346 stwcx. r2,0,r2 ; Kill reservation
5347 blr ; Leave
5348
5349
5350
5351;
5352; Take lock exclusivily
5353;
5354
5355 .align 5
1c79356b 5356
55e303ae
A
5357sxlkExclusive:
5358 lis r0,0x8000 ; Get the locked lock image
5359#if 0
5360 mflr r0 ; (TEST/DEBUG)
5361 oris r0,r0,0x8000 ; (TEST/DEBUG)
5362#endif
5363
5364sxlkXTry: lwarx r2,0,r3 ; Get the lock word
5365 mr. r2,r2 ; Is it locked?
5366 bne-- sxlkXWait ; Yes...
5367 stwcx. r0,0,r3 ; Try to take it
5368 bne-- sxlkXTry ; Collision, try again...
1c79356b 5369
55e303ae
A
5370 isync ; Toss anything younger than us
5371 li r3,0 ; Set RC
5372 blr ; Leave...
1c79356b 5373
55e303ae
A
5374 .align 5
5375
5376sxlkXWait: li r2,lgKillResv ; Point to killing field
5377 stwcx. r2,0,r2 ; Kill reservation
1c79356b 5378
55e303ae
A
5379sxlkXWaiu: lwz r2,0(r3) ; Get the lock again
5380 mr. r2,r2 ; Is it free yet?
5381 beq++ sxlkXTry ; Yup...
5382 b sxlkXWaiu ; Hang around a bit more...
1c79356b 5383
55e303ae
A
5384;
5385; Take a share of the lock
5386;
1c79356b
A
5387
5388 .align 5
55e303ae
A
5389
5390sxlkShared: lwarx r2,0,r3 ; Get the lock word
5391 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
5392 addi r2,r2,1 ; Up the share count
5393 bne-- sxlkSWait ; Yes...
5394 stwcx. r2,0,r3 ; Try to take it
5395 bne-- sxlkShared ; Collision, try again...
5396
5397 isync ; Toss anything younger than us
5398 li r3,0 ; Set RC
5399 blr ; Leave...
5400
5401 .align 5
d7e50217 5402
55e303ae
A
5403sxlkSWait: li r2,lgKillResv ; Point to killing field
5404 stwcx. r2,0,r2 ; Kill reservation
d7e50217 5405
55e303ae
A
5406sxlkSWaiu: lwz r2,0(r3) ; Get the lock again
5407 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
5408 beq++ sxlkShared ; Nope...
5409 b sxlkSWaiu ; Hang around a bit more...
5410
5411;
5412; Unlock either exclusive or shared.
5413;
5414
5415 .align 5
5416
5417sxlkUnlock: eieio ; Make sure we order our stores out
5418
5419sxlkUnTry: lwarx r2,0,r3 ; Get the lock
5420 rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively?
5421 subi r2,r2,1 ; Remove our share if we have one
5422 li r0,0 ; Clear this
5423 bne-- sxlkUExclu ; We hold exclusive...
5424
5425 stwcx. r2,0,r3 ; Try to lose our share
5426 bne-- sxlkUnTry ; Collision...
5427 blr ; Leave...
5428
5429sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation
5430 beqlr++ ; Leave if ok...
5431 b sxlkUnTry ; Could not store, try over...
5432
5433
5434 .align 5
5435 .globl EXT(fillPage)
5436
5437LEXT(fillPage)
5438
5439 mfsprg r0,2 ; Get feature flags
5440 mtcrf 0x02,r0 ; move pf64Bit to cr
5441
5442 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
5443 lis r2,0x0200 ; Get vec
5444 mr r6,r4 ; Copy
5445 ori r2,r2,0x2000 ; Get FP
5446 mr r7,r4 ; Copy
5447 mfmsr r5 ; Get MSR
5448 mr r8,r4 ; Copy
5449 andc r5,r5,r2 ; Clear out permanent turn-offs
5450 mr r9,r4 ; Copy
5451 ori r2,r2,0x8030 ; Clear IR, DR and EE
5452 mr r10,r4 ; Copy
5453 andc r0,r5,r2 ; Kill them
5454 mr r11,r4 ; Copy
5455 mr r12,r4 ; Copy
5456 bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint)
5457
5458 slwi r3,r3,12 ; Make into a physical address
5459 mtmsr r2 ; Interrupts and translation off
5460 isync
5461
5462 li r2,4096/32 ; Get number of cache lines
5463
5464fp32again: dcbz 0,r3 ; Clear
5465 addic. r2,r2,-1 ; Count down
5466 stw r4,0(r3) ; Fill
5467 stw r6,4(r3) ; Fill
5468 stw r7,8(r3) ; Fill
5469 stw r8,12(r3) ; Fill
5470 stw r9,16(r3) ; Fill
5471 stw r10,20(r3) ; Fill
5472 stw r11,24(r3) ; Fill
5473 stw r12,28(r3) ; Fill
5474 addi r3,r3,32 ; Point next
5475 bgt+ fp32again ; Keep going
5476
5477 mtmsr r5 ; Restore all
1c79356b 5478 isync
55e303ae
A
5479 blr ; Return...
5480
5481 .align 5
5482
5483fpSF1: li r2,1
5484 sldi r2,r2,63 ; Get 64-bit bit
5485 or r0,r0,r2 ; Turn on 64-bit
5486 sldi r3,r3,12 ; Make into a physical address
1c79356b 5487
55e303ae 5488 mtmsrd r0 ; Interrupts and translation off
1c79356b 5489 isync
55e303ae
A
5490
5491 li r2,4096/128 ; Get number of cache lines
5492
5493fp64again: dcbz128 0,r3 ; Clear
5494 addic. r2,r2,-1 ; Count down
5495 std r4,0(r3) ; Fill
5496 std r6,8(r3) ; Fill
5497 std r7,16(r3) ; Fill
5498 std r8,24(r3) ; Fill
5499 std r9,32(r3) ; Fill
5500 std r10,40(r3) ; Fill
5501 std r11,48(r3) ; Fill
5502 std r12,56(r3) ; Fill
5503 std r4,64+0(r3) ; Fill
5504 std r6,64+8(r3) ; Fill
5505 std r7,64+16(r3) ; Fill
5506 std r8,64+24(r3) ; Fill
5507 std r9,64+32(r3) ; Fill
5508 std r10,64+40(r3) ; Fill
5509 std r11,64+48(r3) ; Fill
5510 std r12,64+56(r3) ; Fill
5511 addi r3,r3,128 ; Point next
5512 bgt+ fp64again ; Keep going
5513
5514 mtmsrd r5 ; Restore all
5515 isync
5516 blr ; Return...
5517
5518 .align 5
5519 .globl EXT(mapLog)
5520
5521LEXT(mapLog)
5522
5523 mfmsr r12
5524 lis r11,hi16(EXT(mapdebug))
5525 ori r11,r11,lo16(EXT(mapdebug))
5526 lwz r10,0(r11)
5527 mr. r10,r10
5528 bne++ mLxx
5529 mr r10,r3
5530mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1
5531 mtmsr r0
5532 isync
5533 stw r4,0(r10)
5534 stw r4,4(r10)
5535 stw r5,8(r10)
5536 stw r6,12(r10)
5537 mtmsr r12
5538 isync
5539 addi r10,r10,16
5540 stw r10,0(r11)
1c79356b 5541 blr
55e303ae
A
5542
5543#if 1
5544 .align 5
5545 .globl EXT(checkBogus)
5546
5547LEXT(checkBogus)
5548
5549 BREAKPOINT_TRAP
5550 blr ; No-op normally
5551
5552#endif
5553
5554
1c79356b
A
5555
5556