]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/hw_vm.s
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_vm.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25#include <assym.s>
26#include <debug.h>
27#include <cpus.h>
28#include <db_machine_commands.h>
29#include <mach_rt.h>
30
31#include <mach_debug.h>
32#include <ppc/asm.h>
33#include <ppc/proc_reg.h>
34#include <ppc/exception.h>
35#include <ppc/Performance.h>
36#include <ppc/exception.h>
1c79356b 37#include <mach/ppc/vm_param.h>
55e303ae
A
38
39#define INSTRUMENT 0
1c79356b
A
40
41 .text
42
55e303ae
A
43;
44; 0 0 1 2 3 4 4 5 6
45; 0 8 6 4 2 0 8 6 3
46; +--------+--------+--------+--------+--------+--------+--------+--------+
47; |00000000|00000SSS|SSSSSSSS|SSSSSSSS|SSSSPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - EA
48; +--------+--------+--------+--------+--------+--------+--------+--------+
49;
50; 0 0 1
51; 0 8 6
52; +--------+--------+--------+
53; |//////BB|BBBBBBBB|BBBB////| - SID - base
54; +--------+--------+--------+
55;
56; 0 0 1
57; 0 8 6
58; +--------+--------+--------+
59; |////////|11111111|111111//| - SID - copy 1
60; +--------+--------+--------+
61;
62; 0 0 1
63; 0 8 6
64; +--------+--------+--------+
65; |////////|//222222|22222222| - SID - copy 2
66; +--------+--------+--------+
67;
68; 0 0 1
69; 0 8 6
70; +--------+--------+--------+
71; |//////33|33333333|33//////| - SID - copy 3 - not needed
72; +--------+--------+--------+ for 65 bit VPN
73;
74; 0 0 1 2 3 4 4 5 5
75; 0 8 6 4 2 0 8 1 5
76; +--------+--------+--------+--------+--------+--------+--------+
77; |00000000|00000002|22222222|11111111|111111BB|BBBBBBBB|BBBB////| - SID Hash - this is all
78; +--------+--------+--------+--------+--------+--------+--------+ SID copies ORed
79; 0 0 1 2 3 4 4 5 5
80; 0 8 6 4 2 0 8 1 5
81; +--------+--------+--------+--------+--------+--------+--------+
82; |00000000|0000000S|SSSSSSSS|SSSSSSSS|SSSSSS00|00000000|0000////| - Shifted high order EA
83; +--------+--------+--------+--------+--------+--------+--------+ left shifted "segment"
84; part of EA to make
85; room for SID base
86;
87;
88; 0 0 1 2 3 4 4 5 5
89; 0 8 6 4 2 0 8 1 5
90; +--------+--------+--------+--------+--------+--------+--------+
91; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////| - VSID - SID Hash XORed
92; +--------+--------+--------+--------+--------+--------+--------+ with shifted EA
93;
94; 0 0 1 2 3 4 4 5 6 7 7
95; 0 8 6 4 2 0 8 6 4 2 9
96; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
97; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVPPPP|PPPPPPPP|PPPPxxxx|xxxxxxxx| - VPN
98; +--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
99;
1c79356b
A
100
101
55e303ae 102/* addr64_t hw_add_map(struct pmap *pmap, struct mapping *mp) - Adds a mapping
1c79356b 103 *
55e303ae 104 * Maps a page or block into a pmap
de355530 105 *
55e303ae 106 * Returns 0 if add worked or the vaddr of the first overlap if not
1c79356b 107 *
55e303ae
A
108 * Make mapping - not block or I/O - note: this is low-level, upper should remove duplicates
109 *
110 * 1) bump mapping busy count
111 * 2) lock pmap share
112 * 3) find mapping full path - finds all possible list previous elements
113 * 4) upgrade pmap to exclusive
114 * 5) add mapping to search list
115 * 6) find physent
116 * 7) lock physent
117 * 8) add to physent
118 * 9) unlock physent
119 * 10) unlock pmap
120 * 11) drop mapping busy count
121 *
122 *
123 * Make mapping - block or I/O - note: this is low-level, upper should remove duplicates
124 *
125 * 1) bump mapping busy count
126 * 2) lock pmap share
127 * 3) find mapping full path - finds all possible list previous elements
128 * 4) upgrade pmap to exclusive
129 * 5) add mapping to search list
130 * 6) unlock pmap
131 * 7) drop mapping busy count
132 *
1c79356b
A
133 */
134
135 .align 5
136 .globl EXT(hw_add_map)
137
138LEXT(hw_add_map)
55e303ae
A
139
140 stwu r1,-(FM_ALIGN((31-17+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
141 mflr r0 ; Save the link register
142 stw r17,FM_ARG0+0x00(r1) ; Save a register
143 stw r18,FM_ARG0+0x04(r1) ; Save a register
144 stw r19,FM_ARG0+0x08(r1) ; Save a register
145 mfsprg r19,2 ; Get feature flags
146 stw r20,FM_ARG0+0x0C(r1) ; Save a register
147 stw r21,FM_ARG0+0x10(r1) ; Save a register
148 mtcrf 0x02,r19 ; move pf64Bit cr6
149 stw r22,FM_ARG0+0x14(r1) ; Save a register
150 stw r23,FM_ARG0+0x18(r1) ; Save a register
151 stw r24,FM_ARG0+0x1C(r1) ; Save a register
152 stw r25,FM_ARG0+0x20(r1) ; Save a register
153 stw r26,FM_ARG0+0x24(r1) ; Save a register
154 stw r27,FM_ARG0+0x28(r1) ; Save a register
155 stw r28,FM_ARG0+0x2C(r1) ; Save a register
156 stw r29,FM_ARG0+0x30(r1) ; Save a register
157 stw r30,FM_ARG0+0x34(r1) ; Save a register
158 stw r31,FM_ARG0+0x38(r1) ; Save a register
159 stw r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
160
161 rlwinm r11,r4,0,0,19 ; Round down to get mapping block address
162 mr r28,r3 ; Save the pmap
163 mr r31,r4 ; Save the mapping
164 bt++ pf64Bitb,hamSF1 ; skip if 64-bit (only they take the hint)
165 lwz r20,pmapvr+4(r3) ; Get conversion mask for pmap
166 lwz r21,mbvrswap+4(r11) ; Get conversion mask for mapping
167
168 b hamSF1x ; Done...
169
170hamSF1: ld r20,pmapvr(r3) ; Get conversion mask for pmap
171 ld r21,mbvrswap(r11) ; Get conversion mask for mapping
172
173hamSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
174
175 mr r17,r11 ; Save the MSR
176 xor r28,r28,r20 ; Convert the pmap to physical addressing
177 xor r31,r31,r21 ; Convert the mapping to physical addressing
178
179 la r3,pmapSXlk(r28) ; Point to the pmap search lock
180 bl sxlkShared ; Go get a shared lock on the mapping lists
181 mr. r3,r3 ; Did we get the lock?
182 lwz r24,mpFlags(r31) ; Pick up the flags
183 bne-- hamBadLock ; Nope...
184
185 li r21,0 ; Remember that we have the shared lock
1c79356b 186
55e303ae
A
187;
188; Note that we do a full search (i.e., no shortcut level skips, etc.)
189; here so that we will know the previous elements so we can dequeue them
190; later.
191;
de355530 192
55e303ae
A
193hamRescan: lwz r4,mpVAddr(r31) ; Get the new vaddr top half
194 lwz r5,mpVAddr+4(r31) ; Get the new vaddr bottom half
195 mr r3,r28 ; Pass in pmap to search
196 lhz r23,mpBSize(r31) ; Get the block size for later
197 mr r29,r4 ; Save top half of vaddr for later
198 mr r30,r5 ; Save bottom half of vaddr for later
199
200#if INSTRUMENT
201 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[16] - Take stamp before mapSearchFull
202 stw r0,0x6100+(16*16)+0x0(0) ; INSTRUMENT - Save it
203 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
204 stw r0,0x6100+(16*16)+0x4(0) ; INSTRUMENT - Save it
205 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
206 stw r0,0x6100+(16*16)+0x8(0) ; INSTRUMENT - Save it
207 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
208 stw r0,0x6100+(16*16)+0xC(0) ; INSTRUMENT - Save it
209#endif
210
211 bl EXT(mapSearchFull) ; Go see if we can find it
212
213#if INSTRUMENT
214 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[14] - Take stamp after mapSearchFull
215 stw r0,0x6100+(17*16)+0x0(0) ; INSTRUMENT - Save it
216 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
217 stw r0,0x6100+(17*16)+0x4(0) ; INSTRUMENT - Save it
218 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
219 stw r0,0x6100+(17*16)+0x8(0) ; INSTRUMENT - Save it
220 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
221 stw r0,0x6100+(17*16)+0xC(0) ; INSTRUMENT - Save it
222#endif
223
224 andi. r0,r24,mpNest ; See if we are a nest
225 rlwinm r23,r23,12,0,19 ; Convert standard block size to bytes
226 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
227 li r22,0 ; Assume high part of size is 0
228 beq++ hamNoNest ; This is not a nest...
229
230 rlwinm r22,r23,16,16,31 ; Convert partially converted size to segments
231 rlwinm r23,r23,16,0,3 ; Finish shift
232
233hamNoNest: add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
234 mr. r3,r3 ; Did we find a mapping here?
235 or r0,r0,r30 ; Make sure a carry will propagate all the way in 64-bit
236 crmove cr5_eq,cr0_eq ; Remember that if we found the mapping
237 addc r9,r0,r23 ; Add size to get last page in new range
238 or. r0,r4,r5 ; Are we beyond the end?
239 adde r8,r29,r22 ; Add the rest of the length on
240 bne-- cr5,hamOverlay ; Yeah, this is no good, can not double map...
241 rlwinm r9,r9,0,0,31 ; Clean top half of sum
242 beq++ hamFits ; We are at the end...
243
244 cmplw cr1,r9,r5 ; Is the bottom part of our end less?
245 cmplw r8,r4 ; Is our end before the next (top part)
246 crand cr0_eq,cr0_eq,cr1_lt ; Is the second half less and the first half equal?
247 cror cr0_eq,cr0_eq,cr0_lt ; Or is the top half less
248
249 bf-- cr0_eq,hamOverlay ; No, we do fit, there is an overlay...
d7e50217 250
55e303ae
A
251;
252; Here we try to convert to an exclusive lock. This will fail if someone else
253; has it shared.
254;
255hamFits: mr. r21,r21 ; Do we already have the exclusive lock?
256 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1c79356b 257
55e303ae
A
258 bne-- hamGotX ; We already have the exclusive...
259
260 bl sxlkPromote ; Try to promote shared to exclusive
261 mr. r3,r3 ; Could we?
262 beq++ hamGotX ; Yeah...
263
264;
265; Since we could not promote our lock, we need to convert to it.
266; That means that we drop the shared lock and wait to get it
267; exclusive. Since we release the lock, we need to do the look up
268; again.
269;
d7e50217 270
55e303ae
A
271 la r3,pmapSXlk(r28) ; Point to the pmap search lock
272 bl sxlkConvert ; Convert shared to exclusive
273 mr. r3,r3 ; Could we?
274 bne-- hamBadLock ; Nope, we must have timed out...
1c79356b 275
55e303ae
A
276 li r21,1 ; Remember that we have the exclusive lock
277 b hamRescan ; Go look again...
1c79356b 278
55e303ae 279 .align 5
1c79356b 280
55e303ae
A
281hamGotX:
282#if INSTRUMENT
283 mfspr r3,pmc1 ; INSTRUMENT - saveinstr[18] - Take stamp before mapSearchFull
284 stw r3,0x6100+(18*16)+0x0(0) ; INSTRUMENT - Save it
285 mfspr r3,pmc2 ; INSTRUMENT - Get stamp
286 stw r3,0x6100+(18*16)+0x4(0) ; INSTRUMENT - Save it
287 mfspr r3,pmc3 ; INSTRUMENT - Get stamp
288 stw r3,0x6100+(18*16)+0x8(0) ; INSTRUMENT - Save it
289 mfspr r3,pmc4 ; INSTRUMENT - Get stamp
290 stw r4,0x6100+(18*16)+0xC(0) ; INSTRUMENT - Save it
291#endif
292 mr r3,r28 ; Get the pmap to insert into
293 mr r4,r31 ; Point to the mapping
294 bl EXT(mapInsert) ; Insert the mapping into the list
295
296#if INSTRUMENT
297 mfspr r4,pmc1 ; INSTRUMENT - saveinstr[19] - Take stamp before mapSearchFull
298 stw r4,0x6100+(19*16)+0x0(0) ; INSTRUMENT - Save it
299 mfspr r4,pmc2 ; INSTRUMENT - Get stamp
300 stw r4,0x6100+(19*16)+0x4(0) ; INSTRUMENT - Save it
301 mfspr r4,pmc3 ; INSTRUMENT - Get stamp
302 stw r4,0x6100+(19*16)+0x8(0) ; INSTRUMENT - Save it
303 mfspr r4,pmc4 ; INSTRUMENT - Get stamp
304 stw r4,0x6100+(19*16)+0xC(0) ; INSTRUMENT - Save it
305#endif
306
307 lhz r8,mpSpace(r31) ; Get the address space
308 mfsdr1 r7 ; Get the hash table base/bounds
309 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
310 andi. r0,r24,mpNest|mpBlock ; Is this a nest or block?
311
312 rlwimi r8,r8,14,4,17 ; Double address space
313 rlwinm r9,r30,20,16,31 ; Isolate the page number
314 rlwinm r10,r30,18,14,17 ; Shift EA[32:35] down to correct spot in VSID (actually shift up 14)
315 rlwimi r8,r8,28,0,3 ; Get the last nybble of the hash
316 rlwimi r10,r29,18,0,13 ; Shift EA[18:31] down to VSID (31-bit math works because of max hash table size)
317 rlwinm r7,r7,0,16,31 ; Isolate length mask (or count)
318 addi r4,r4,1 ; Bump up the mapped page count
319 xor r10,r10,r8 ; Calculate the low 32 bits of the VSID
320 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
321 xor r9,r9,r10 ; Get the hash to the PTEG
322
323 bne-- hamDoneNP ; This is a block or nest, therefore, no physent...
324
325 bl mapPhysFindLock ; Go find and lock the physent
326
327 bt++ pf64Bitb,ham64 ; This is 64-bit...
328
329 lwz r11,ppLink+4(r3) ; Get the alias chain pointer
330 rlwinm r7,r7,16,0,15 ; Get the PTEG wrap size
331 slwi r9,r9,6 ; Make PTEG offset
332 ori r7,r7,0xFFC0 ; Stick in the bottom part
333 rlwinm r12,r11,0,0,25 ; Clean it up
334 and r9,r9,r7 ; Wrap offset into table
335 mr r4,r31 ; Set the link to install
336 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
337 stw r12,mpAlias+4(r31) ; Move to the mapping
338 bl mapPhyCSet32 ; Install the link
339 b hamDone ; Go finish up...
340
341 .align 5
1c79356b 342
55e303ae
A
343ham64: li r0,0xFF ; Get mask to clean up alias pointer
344 subfic r7,r7,46 ; Get number of leading zeros
345 eqv r4,r4,r4 ; Get all ones
346 ld r11,ppLink(r3) ; Get the alias chain pointer
347 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
348 srd r4,r4,r7 ; Get the wrap mask
349 sldi r9,r9,7 ; Change hash to PTEG offset
350 andc r11,r11,r0 ; Clean out the lock and flags
351 and r9,r9,r4 ; Wrap to PTEG
352 mr r4,r31
353 stw r9,mpPte(r31) ; Point the mapping at the PTEG (exact offset is invalid)
354 std r11,mpAlias(r31) ; Set the alias pointer in the mapping
355
356 bl mapPhyCSet64 ; Install the link
357
358hamDone: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 359
55e303ae
A
360hamDoneNP: la r3,pmapSXlk(r28) ; Point to the pmap search lock
361 bl sxlkUnlock ; Unlock the search list
1c79356b 362
55e303ae
A
363 mr r3,r31 ; Get the mapping pointer
364 bl mapDropBusy ; Drop the busy count
1c79356b 365
55e303ae
A
366 li r3,0 ; Set successful return
367 li r4,0 ; Set successful return
1c79356b 368
55e303ae 369hamReturn: bt++ pf64Bitb,hamR64 ; Yes...
1c79356b 370
55e303ae
A
371 mtmsr r17 ; Restore enables/translation/etc.
372 isync
373 b hamReturnC ; Join common...
1c79356b 374
55e303ae
A
375hamR64: mtmsrd r17 ; Restore enables/translation/etc.
376 isync
1c79356b 377
55e303ae
A
378hamReturnC:
379#if INSTRUMENT
380 mfspr r0,pmc1 ; INSTRUMENT - saveinstr[20] - Take stamp before mapSearchFull
381 stw r0,0x6100+(20*16)+0x0(0) ; INSTRUMENT - Save it
382 mfspr r0,pmc2 ; INSTRUMENT - Get stamp
383 stw r0,0x6100+(20*16)+0x4(0) ; INSTRUMENT - Save it
384 mfspr r0,pmc3 ; INSTRUMENT - Get stamp
385 stw r0,0x6100+(20*16)+0x8(0) ; INSTRUMENT - Save it
386 mfspr r0,pmc4 ; INSTRUMENT - Get stamp
387 stw r0,0x6100+(20*16)+0xC(0) ; INSTRUMENT - Save it
388#endif
389 lwz r0,(FM_ALIGN((31-17+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
390 lwz r17,FM_ARG0+0x00(r1) ; Save a register
391 lwz r18,FM_ARG0+0x04(r1) ; Save a register
392 lwz r19,FM_ARG0+0x08(r1) ; Save a register
393 lwz r20,FM_ARG0+0x0C(r1) ; Save a register
394 mtlr r0 ; Restore the return
395 lwz r21,FM_ARG0+0x10(r1) ; Save a register
396 lwz r22,FM_ARG0+0x14(r1) ; Save a register
397 lwz r23,FM_ARG0+0x18(r1) ; Save a register
398 lwz r24,FM_ARG0+0x1C(r1) ; Save a register
399 lwz r25,FM_ARG0+0x20(r1) ; Save a register
400 lwz r26,FM_ARG0+0x24(r1) ; Save a register
401 lwz r27,FM_ARG0+0x28(r1) ; Save a register
402 lwz r28,FM_ARG0+0x2C(r1) ; Save a register
403 lwz r29,FM_ARG0+0x30(r1) ; Save a register
404 lwz r30,FM_ARG0+0x34(r1) ; Save a register
405 lwz r31,FM_ARG0+0x38(r1) ; Save a register
406 lwz r1,0(r1) ; Pop the stack
d7e50217 407
55e303ae 408 blr ; Leave...
d7e50217 409
de355530 410
de355530 411 .align 5
d7e50217 412
55e303ae
A
413hamOverlay: lwz r22,mpFlags(r3) ; Get the overlay flags
414 li r0,mpC|mpR ; Get a mask to turn off RC bits
415 lwz r23,mpFlags(r31) ; Get the requested flags
416 lwz r20,mpVAddr(r3) ; Get the overlay address
417 lwz r8,mpVAddr(r31) ; Get the requested address
418 lwz r21,mpVAddr+4(r3) ; Get the overlay address
419 lwz r9,mpVAddr+4(r31) ; Get the requested address
420 lhz r10,mpBSize(r3) ; Get the overlay length
421 lhz r11,mpBSize(r31) ; Get the requested length
422 lwz r24,mpPAddr(r3) ; Get the overlay physical address
423 lwz r25,mpPAddr(r31) ; Get the requested physical address
424 andc r21,r21,r0 ; Clear RC bits
425 andc r9,r9,r0 ; Clear RC bits
426
427 la r3,pmapSXlk(r28) ; Point to the pmap search lock
428 bl sxlkUnlock ; Unlock the search list
429
430 rlwinm. r0,r22,0,mpRIPb,mpRIPb ; Are we in the process of removing this one?
431 mr r3,r20 ; Save the top of the colliding address
432 rlwinm r4,r21,0,0,19 ; Save the bottom of the colliding address
433
434 bne++ hamRemv ; Removing, go say so so we help...
435
436 cmplw r20,r8 ; High part of vaddr the same?
437 cmplw cr1,r21,r9 ; Low part?
438 crand cr5_eq,cr0_eq,cr1_eq ; Remember if same
439
440 cmplw r10,r11 ; Size the same?
441 cmplw cr1,r24,r25 ; Physical address?
442 crand cr5_eq,cr5_eq,cr0_eq ; Remember
443 crand cr5_eq,cr5_eq,cr1_eq ; Remember if same
444
445 xor r23,r23,r22 ; Check for differences in flags
446 ori r23,r23,mpFIP ; "Fault in Progress" is ok to be different
447 xori r23,r23,mpFIP ; Force mpFIP off
448 rlwinm. r0,r23,0,mpSpecialb,mpListsb-1 ; See if any important flags are different
449 crand cr5_eq,cr5_eq,cr0_eq ; Merge in final check
450 bf-- cr5_eq,hamReturn ; This is not the same, so we just return a collision...
451
452 ori r4,r4,mapRtMapDup ; Set duplicate
453 b hamReturn ; And leave...
454
455hamRemv: ori r4,r4,mapRtRemove ; We are in the process of removing the collision
456 b hamReturn ; Come back yall...
457
458 .align 5
459
460hamBadLock: li r3,0 ; Set lock time out error code
461 li r4,mapRtBadLk ; Set lock time out error code
462 b hamReturn ; Leave....
463
464
1c79356b 465
1c79356b
A
466
467
468/*
55e303ae 469 * mapping *hw_rem_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
de355530 470 *
55e303ae
A
471 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
472 * a 64-bit quantity, it is a long long so it is in R4 and R5.
473 *
474 * We return the virtual address of the removed mapping as a
475 * R3.
1c79356b 476 *
55e303ae 477 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 478 *
55e303ae
A
479 * We disable translation and all interruptions here. This keeps is
480 * from having to worry about a deadlock due to having anything locked
481 * and needing it to process a fault.
1c79356b
A
482 *
483 * Note that this must be done with both interruptions off and VM off
484 *
55e303ae
A
485 * Remove mapping via pmap, regular page, no pte
486 *
487 * 1) lock pmap share
488 * 2) find mapping full path - finds all possible list previous elements
489 * 4) upgrade pmap to exclusive
490 * 3) bump mapping busy count
491 * 5) remove mapping from search list
492 * 6) unlock pmap
493 * 7) lock physent
494 * 8) remove from physent
495 * 9) unlock physent
496 * 10) drop mapping busy count
497 * 11) drain mapping busy count
498 *
499 *
500 * Remove mapping via pmap, regular page, with pte
501 *
502 * 1) lock pmap share
503 * 2) find mapping full path - finds all possible list previous elements
504 * 3) upgrade lock to exclusive
505 * 4) bump mapping busy count
506 * 5) lock PTEG
507 * 6) invalidate pte and tlbie
508 * 7) atomic merge rc into physent
509 * 8) unlock PTEG
510 * 9) remove mapping from search list
511 * 10) unlock pmap
512 * 11) lock physent
513 * 12) remove from physent
514 * 13) unlock physent
515 * 14) drop mapping busy count
516 * 15) drain mapping busy count
517 *
518 *
519 * Remove mapping via pmap, I/O or block
520 *
521 * 1) lock pmap share
522 * 2) find mapping full path - finds all possible list previous elements
523 * 3) upgrade lock to exclusive
524 * 4) bump mapping busy count
525 * 5) mark remove-in-progress
526 * 6) check and bump remove chunk cursor if needed
527 * 7) unlock pmap
528 * 8) if something to invalidate, go to step 11
529
530 * 9) drop busy
531 * 10) return with mapRtRemove to force higher level to call again
532
533 * 11) Lock PTEG
534 * 12) invalidate ptes, no tlbie
535 * 13) unlock PTEG
536 * 14) repeat 11 - 13 for all pages in chunk
537 * 15) if not final chunk, go to step 9
538 * 16) invalidate tlb entries for the whole block map but no more than the full tlb
539 * 17) lock pmap share
540 * 18) find mapping full path - finds all possible list previous elements
541 * 19) upgrade lock to exclusive
542 * 20) remove mapping from search list
543 * 21) drop mapping busy count
544 * 22) drain mapping busy count
545 *
1c79356b
A
546 */
547
548 .align 5
549 .globl EXT(hw_rem_map)
550
551LEXT(hw_rem_map)
1c79356b 552
55e303ae
A
553;
554; NOTE NOTE NOTE - IF WE CHANGE THIS STACK FRAME STUFF WE NEED TO CHANGE
555; THE HW_PURGE_* ROUTINES ALSO
556;
1c79356b 557
55e303ae
A
558#define hrmStackSize ((31-15+1)*4)+4
559 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
560 mflr r0 ; Save the link register
561 stw r15,FM_ARG0+0x00(r1) ; Save a register
562 stw r16,FM_ARG0+0x04(r1) ; Save a register
563 stw r17,FM_ARG0+0x08(r1) ; Save a register
564 stw r18,FM_ARG0+0x0C(r1) ; Save a register
565 stw r19,FM_ARG0+0x10(r1) ; Save a register
566 mfsprg r19,2 ; Get feature flags
567 stw r20,FM_ARG0+0x14(r1) ; Save a register
568 stw r21,FM_ARG0+0x18(r1) ; Save a register
569 mtcrf 0x02,r19 ; move pf64Bit cr6
570 stw r22,FM_ARG0+0x1C(r1) ; Save a register
571 stw r23,FM_ARG0+0x20(r1) ; Save a register
572 stw r24,FM_ARG0+0x24(r1) ; Save a register
573 stw r25,FM_ARG0+0x28(r1) ; Save a register
574 stw r26,FM_ARG0+0x2C(r1) ; Save a register
575 stw r27,FM_ARG0+0x30(r1) ; Save a register
576 stw r28,FM_ARG0+0x34(r1) ; Save a register
577 stw r29,FM_ARG0+0x38(r1) ; Save a register
578 stw r30,FM_ARG0+0x3C(r1) ; Save a register
579 stw r31,FM_ARG0+0x40(r1) ; Save a register
580 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
581 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
582
583 bt++ pf64Bitb,hrmSF1 ; skip if 64-bit (only they take the hint)
584 lwz r9,pmapvr+4(r3) ; Get conversion mask
585 b hrmSF1x ; Done...
586
587hrmSF1: ld r9,pmapvr(r3) ; Get conversion mask
588
589hrmSF1x:
590 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
591
592 xor r28,r3,r9 ; Convert the pmap to physical addressing
1c79356b 593
55e303ae
A
594;
595; Here is where we join in from the hw_purge_* routines
596;
1c79356b 597
55e303ae 598hrmJoin: mfsprg r19,2 ; Get feature flags again (for alternate entries)
1c79356b 599
55e303ae
A
600 mr r17,r11 ; Save the MSR
601 mr r29,r4 ; Top half of vaddr
602 mr r30,r5 ; Bottom half of vaddr
1c79356b 603
55e303ae
A
604 la r3,pmapSXlk(r28) ; Point to the pmap search lock
605 bl sxlkShared ; Go get a shared lock on the mapping lists
606 mr. r3,r3 ; Did we get the lock?
607 bne-- hrmBadLock ; Nope...
1c79356b 608
55e303ae
A
609;
610; Note that we do a full search (i.e., no shortcut level skips, etc.)
611; here so that we will know the previous elements so we can dequeue them
612; later. Note: we get back mpFlags in R7.
613;
d7e50217 614
55e303ae
A
615 mr r3,r28 ; Pass in pmap to search
616 mr r4,r29 ; High order of address
617 mr r5,r30 ; Low order of address
618 bl EXT(mapSearchFull) ; Go see if we can find it
619
620 andi. r0,r7,lo16(mpPerm|mpSpecial|mpNest) ; Is this nested, special, or a perm mapping?
621 mr r20,r7 ; Remember mpFlags
622 rlwinm r0,r7,0,mpRemovableb,mpRemovableb ; Are we allowed to remove it?
623 crmove cr5_eq,cr0_eq ; Remember if we should remove this
624 mr. r31,r3 ; Did we? (And remember mapping address for later)
625 cmplwi cr1,r0,0 ; Are we allowed to remove?
626 mr r15,r4 ; Save top of next vaddr
627 crorc cr5_eq,cr5_eq,cr1_eq ; cr5_eq is true if this is not removable
628 mr r16,r5 ; Save bottom of next vaddr
629 beq hrmNotFound ; Nope, not found...
630
631 bf-- cr5_eq,hrmPerm ; This one can't be removed...
632;
633; Here we try to promote to an exclusive lock. This will fail if someone else
634; has it shared.
635;
1c79356b 636
55e303ae
A
637 la r3,pmapSXlk(r28) ; Point to the pmap search lock
638 bl sxlkPromote ; Try to promote shared to exclusive
639 mr. r3,r3 ; Could we?
640 beq++ hrmGotX ; Yeah...
1c79356b 641
55e303ae
A
642;
643; Since we could not promote our lock, we need to convert to it.
644; That means that we drop the shared lock and wait to get it
645; exclusive. Since we release the lock, we need to do the look up
646; again.
647;
648
649 la r3,pmapSXlk(r28) ; Point to the pmap search lock
650 bl sxlkConvert ; Convert shared to exclusive
651 mr. r3,r3 ; Could we?
652 bne-- hrmBadLock ; Nope, we must have timed out...
653
654 mr r3,r28 ; Pass in pmap to search
655 mr r4,r29 ; High order of address
656 mr r5,r30 ; Low order of address
657 bl EXT(mapSearchFull) ; Rescan the list
658
659 andi. r0,r7,lo16(mpPerm|mpSpecial|mpNest) ; Is this nested, special, or a perm mapping?
660 rlwinm r0,r7,0,mpRemovableb,mpRemovableb ; Are we allowed to remove it?
661 crmove cr5_eq,cr0_eq ; Remember if we should remove this
662 mr. r31,r3 ; Did we lose it when we converted?
663 cmplwi cr1,r0,0 ; Are we allowed to remove?
664 mr r20,r7 ; Remember mpFlags
665 crorc cr5_eq,cr5_eq,cr1_eq ; cr5_eq is true if this is not removable
666 mr r15,r4 ; Save top of next vaddr
667 mr r16,r5 ; Save bottom of next vaddr
668 beq-- hrmNotFound ; Yeah, we did, someone tossed it for us...
de355530 669
55e303ae
A
670 bf-- cr5_eq,hrmPerm ; This one can't be removed...
671
672;
673; We have an exclusive lock on the mapping chain. And we
674; also have the busy count bumped in the mapping so it can
675; not vanish on us.
676;
677
678hrmGotX: mr r3,r31 ; Get the mapping
679 bl mapBumpBusy ; Bump up the busy count
1c79356b 680
55e303ae
A
681;
682; Invalidate any PTEs associated with this
683; mapping (more than one if a block) and accumulate the reference
684; and change bits.
685;
686; Here is also where we need to split 32- and 64-bit processing
687;
1c79356b 688
55e303ae
A
689 lwz r21,mpPte(r31) ; Grab the offset to the PTE
690 rlwinm r23,r29,0,1,0 ; Copy high order vaddr to high if 64-bit machine
691 mfsdr1 r29 ; Get the hash table base and size
692 rlwinm r0,r20,0,mpBlockb,mpBlockb ; Is this a block mapping?
693 andi. r2,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping?
694 cmplwi cr5,r0,0 ; Remember if this is a block mapping
695 rlwinm r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
696 ori r2,r2,0xFFFF ; Get mask to clean out hash table base (works for both 32- and 64-bit)
697 cmpwi cr1,r0,0 ; Have we made a PTE for this yet?
698 rlwinm r21,r21,0,0,30 ; Clear out valid bit
699 crorc cr0_eq,cr1_eq,cr0_eq ; No need to look at PTE if none or a special mapping
700 rlwimi r23,r30,0,0,31 ; Insert low under high part of address
701 andc r29,r29,r2 ; Clean up hash table base
702 li r22,0 ; Clear this on out (also sets RC to 0 if we bail)
703 mr r30,r23 ; Move the now merged vaddr to the correct register
704 add r26,r29,r21 ; Point to the PTEG slot
705
706 bt++ pf64Bitb,hrmSplit64 ; Go do 64-bit version...
707
708 rlwinm r9,r21,28,4,29 ; Convert PTEG to PCA entry
709 bne- cr5,hrmBlock32 ; Go treat block specially...
710 subfic r9,r9,-4 ; Get the PCA entry offset
711 bt- cr0_eq,hrmPysDQ32 ; Skip next if no possible PTE...
712 add r7,r9,r29 ; Point to the PCA slot
1c79356b 713
55e303ae
A
714
715 bl mapLockPteg ; Go lock up the PTEG (Note: we need to save R6 to set PCA)
716
717 lwz r21,mpPte(r31) ; Get the quick pointer again
718 lwz r5,0(r26) ; Get the top of PTE
1c79356b 719
55e303ae
A
720 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
721 rlwinm r21,r21,0,0,30 ; Clear out valid bit
722 rlwinm r5,r5,0,1,31 ; Turn off valid bit in PTE
723 stw r21,mpPte(r31) ; Make sure we invalidate mpPte, still pointing to PTEG (keep walk_page from making a mistake)
724 beq- hrmUlckPCA32 ; Pte is gone, no need to invalidate...
1c79356b 725
55e303ae 726 stw r5,0(r26) ; Invalidate the PTE
1c79356b 727
55e303ae 728 li r9,tlbieLock ; Get the TLBIE lock
1c79356b 729
55e303ae
A
730 sync ; Make sure the invalid PTE is actually in memory
731
732hrmPtlb32: lwarx r5,0,r9 ; Get the TLBIE lock
733 mr. r5,r5 ; Is it locked?
734 li r5,1 ; Get locked indicator
735 bne- hrmPtlb32 ; It is locked, go spin...
736 stwcx. r5,0,r9 ; Try to get it
737 bne- hrmPtlb32 ; We was beat...
738
739 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
740
741 tlbie r30 ; Invalidate it all corresponding TLB entries
1c79356b 742
55e303ae 743 beq- hrmNTlbs ; Jump if we can not do a TLBSYNC....
de355530 744
55e303ae
A
745 eieio ; Make sure that the tlbie happens first
746 tlbsync ; Wait for everyone to catch up
747 sync ; Make sure of it all
748
749hrmNTlbs: li r0,0 ; Clear this
750 rlwinm r2,r21,29,29,31 ; Get slot number (8 byte entries)
751 stw r0,tlbieLock(0) ; Clear the tlbie lock
752 lis r0,0x8000 ; Get bit for slot 0
753 eieio ; Make sure those RC bit have been stashed in PTE
754
755 srw r0,r0,r2 ; Get the allocation hash mask
756 lwz r22,4(r26) ; Get the latest reference and change bits
757 or r6,r6,r0 ; Show that this slot is free
758
759hrmUlckPCA32:
760 eieio ; Make sure all updates come first
761 stw r6,0(r7) ; Unlock the PTEG
762
763;
764; Now, it is time to remove the mapping and unlock the chain.
765; But first, we need to make sure no one else is using this
766; mapping so we drain the busy now
767;
9bccf70c 768
55e303ae
A
769hrmPysDQ32: mr r3,r31 ; Point to the mapping
770 bl mapDrainBusy ; Go wait until mapping is unused
d7e50217 771
55e303ae
A
772 mr r3,r28 ; Get the pmap to remove from
773 mr r4,r31 ; Point to the mapping
774 bl EXT(mapRemove) ; Remove the mapping from the list
d7e50217 775
d7e50217 776
55e303ae
A
777 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
778 andi. r0,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping?
779 cmplwi cr1,r0,0 ; Special thingie?
780 la r3,pmapSXlk(r28) ; Point to the pmap search lock
781 subi r4,r4,1 ; Drop down the mapped page count
782 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
783 bl sxlkUnlock ; Unlock the search list
784
785 bne-- cr1,hrmRetn32 ; This one has no real memory associated with it so we are done...
1c79356b 786
55e303ae 787 bl mapPhysFindLock ; Go find and lock the physent
de355530 788
55e303ae
A
789 lwz r9,ppLink+4(r3) ; Get first mapping
790
791 mr r4,r22 ; Get the RC bits we just got
792 bl mapPhysMerge ; Go merge the RC bits
793
794 rlwinm r9,r9,0,0,25 ; Clear the flags from the mapping pointer
d7e50217 795
55e303ae
A
796 cmplw r9,r31 ; Are we the first on the list?
797 bne- hrmNot1st ; Nope...
d7e50217 798
55e303ae
A
799 li r9,0 ; Get a 0
800 lwz r4,mpAlias+4(r31) ; Get our new forward pointer
801 stw r9,mpAlias+4(r31) ; Make sure we are off the chain
802 bl mapPhyCSet32 ; Go set the physent link and preserve flags
d7e50217 803
55e303ae 804 b hrmPhyDQd ; Join up and unlock it all...
d7e50217 805
55e303ae 806 .align 5
d7e50217 807
55e303ae
A
808hrmPerm: li r8,-4096 ; Get the value we need to round down to a page
809 and r8,r8,r31 ; Get back to a page
810 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
de355530 811
55e303ae
A
812 la r3,pmapSXlk(r28) ; Point to the pmap search lock
813 bl sxlkUnlock ; Unlock the search list
814
815 xor r3,r31,r8 ; Flip mapping address to virtual
816 ori r3,r3,mapRtPerm ; Set permanent mapping error
817 b hrmErRtn
818
819hrmBadLock: li r3,mapRtBadLk ; Set bad lock
820 b hrmErRtn
821
822hrmEndInSight:
823 la r3,pmapSXlk(r28) ; Point to the pmap search lock
824 bl sxlkUnlock ; Unlock the search list
825
826hrmDoneChunk:
827 mr r3,r31 ; Point to the mapping
828 bl mapDropBusy ; Drop the busy here since we need to come back
829 li r3,mapRtRemove ; Say we are still removing this
830 b hrmErRtn
1c79356b 831
55e303ae
A
832 .align 5
833
834hrmNotFound:
835 la r3,pmapSXlk(r28) ; Point to the pmap search lock
836 bl sxlkUnlock ; Unlock the search list
837 li r3,0 ; Make sure we know we did not find it
1c79356b 838
55e303ae 839hrmErRtn: bt++ pf64Bitb,hrmSF1z ; skip if 64-bit (only they take the hint)
1c79356b 840
55e303ae
A
841 mtmsr r17 ; Restore enables/translation/etc.
842 isync
843 b hrmRetnCmn ; Join the common return code...
de355530 844
55e303ae
A
845hrmSF1z: mtmsrd r17 ; Restore enables/translation/etc.
846 isync
847 b hrmRetnCmn ; Join the common return code...
1c79356b
A
848
849 .align 5
1c79356b 850
55e303ae
A
851hrmNot1st: mr. r8,r9 ; Remember and test current node
852 beq- hrmPhyDQd ; Could not find our node, someone must have unmapped us...
853 lwz r9,mpAlias+4(r9) ; Chain to the next
854 cmplw r9,r31 ; Is this us?
855 bne- hrmNot1st ; Not us...
856
857 lwz r9,mpAlias+4(r9) ; Get our forward pointer
858 stw r9,mpAlias+4(r8) ; Unchain us
d7e50217 859
55e303ae
A
860 nop ; For alignment
861
862hrmPhyDQd: bl mapPhysUnlock ; Unlock the physent chain
1c79356b 863
55e303ae
A
864hrmRetn32: rlwinm r8,r31,0,0,19 ; Find start of page
865 mr r3,r31 ; Copy the pointer to the mapping
866 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
867 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 868
55e303ae 869 xor r3,r31,r8 ; Flip mapping address to virtual
1c79356b 870
55e303ae
A
871 mtmsr r17 ; Restore enables/translation/etc.
872 isync
1c79356b 873
55e303ae
A
874hrmRetnCmn: lwz r6,FM_ARG0+0x44(r1) ; Get address to save next mapped vaddr
875 lwz r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
876 lwz r17,FM_ARG0+0x08(r1) ; Restore a register
877 lwz r18,FM_ARG0+0x0C(r1) ; Restore a register
878 mr. r6,r6 ; Should we pass back the "next" vaddr?
879 lwz r19,FM_ARG0+0x10(r1) ; Restore a register
880 lwz r20,FM_ARG0+0x14(r1) ; Restore a register
881 mtlr r0 ; Restore the return
882
883 rlwinm r16,r16,0,0,19 ; Clean to a page boundary
884 beq hrmNoNextAdr ; Do not pass back the next vaddr...
885 stw r15,0(r6) ; Pass back the top of the next vaddr
886 stw r16,4(r6) ; Pass back the bottom of the next vaddr
887
888hrmNoNextAdr:
889 lwz r15,FM_ARG0+0x00(r1) ; Restore a register
890 lwz r16,FM_ARG0+0x04(r1) ; Restore a register
891 lwz r21,FM_ARG0+0x18(r1) ; Restore a register
892 rlwinm r3,r3,0,0,31 ; Clear top of register if 64-bit
893 lwz r22,FM_ARG0+0x1C(r1) ; Restore a register
894 lwz r23,FM_ARG0+0x20(r1) ; Restore a register
895 lwz r24,FM_ARG0+0x24(r1) ; Restore a register
896 lwz r25,FM_ARG0+0x28(r1) ; Restore a register
897 lwz r26,FM_ARG0+0x2C(r1) ; Restore a register
898 lwz r27,FM_ARG0+0x30(r1) ; Restore a register
899 lwz r28,FM_ARG0+0x34(r1) ; Restore a register
900 lwz r29,FM_ARG0+0x38(r1) ; Restore a register
901 lwz r30,FM_ARG0+0x3C(r1) ; Restore a register
902 lwz r31,FM_ARG0+0x40(r1) ; Restore a register
903 lwz r1,0(r1) ; Pop the stack
904 blr ; Leave...
905
906;
907; Here is where we come when all is lost. Somehow, we failed a mapping function
908; that must work... All hope is gone. Alas, we die.......
909;
d7e50217 910
55e303ae
A
911hrmPanic: lis r0,hi16(Choke) ; System abend
912 ori r0,r0,lo16(Choke) ; System abend
913 li r3,failMapping ; Show that we failed some kind of mapping thing
914 sc
1c79356b
A
915
916
55e303ae
A
917;
918; Invalidate block mappings by invalidating a chunk of autogen PTEs in PTEGs hashed
919; in the range. Then, if we did not finish, return a code indicating that we need to
920; be called again. Eventually, we will finish and then, we will do a TLBIE for each
921; PTEG up to the point where we have cleared it all (64 for 32-bit architecture)
922;
923; A potential speed up is that we stop the invalidate loop once we have walked through
924; the hash table once. This really is not worth the trouble because we need to have
925; mapped 1/2 of physical RAM in an individual block. Way unlikely.
926;
927; We should rethink this and see if we think it will be faster to check PTE and
928; only invalidate the specific PTE rather than all block map PTEs in the PTEG.
929;
1c79356b 930
55e303ae 931 .align 5
1c79356b 932
55e303ae
A
933hrmBlock32:
934 lhz r23,mpSpace(r31) ; Get the address space hash
935 lhz r25,mpBSize(r31) ; Get the number of pages in block
936 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
937 ori r0,r20,mpRIP ; Turn on the remove in progress flag
938 mfsdr1 r29 ; Get the hash table base and size
939 rlwinm r24,r23,maxAdrSpb,32-maxAdrSpb-maxAdrSpb,31-maxAdrSpb ; Get high order of hash
940 lwz r27,mpVAddr+4(r31) ; Get the base vaddr
941 sub r4,r25,r9 ; Get number of pages left
942 cmplw cr1,r9,r25 ; Have we already hit the end?
943 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
944 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
945 rlwinm r26,r29,16,7,15 ; Get the hash table size
946 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
947 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
948 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
949 cmpwi cr7,r2,0 ; Remember if we have finished
950 slwi r0,r9,12 ; Make cursor into page offset
951 or r24,r24,r23 ; Get full hash
952 and r4,r4,r2 ; If more than a chunk, bring this back to 0
953 rlwinm r29,r29,0,0,15 ; Isolate the hash table base
954 add r27,r27,r0 ; Adjust vaddr to start of current chunk
955 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
956
957 bgt- cr1,hrmEndInSight ; Someone is already doing the last hunk...
958
959 la r3,pmapSXlk(r28) ; Point to the pmap search lock
960 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
961 bl sxlkUnlock ; Unlock the search list while we are invalidating
962
963 rlwinm r8,r27,4+maxAdrSpb,31-maxAdrSpb-3,31-maxAdrSpb ; Isolate the segment
964 rlwinm r30,r27,26,6,25 ; Shift vaddr to PTEG offset (and remember VADDR in R27)
965 xor r24,r24,r8 ; Get the proper VSID
966 rlwinm r21,r27,26,10,25 ; Shift page index to PTEG offset (and remember VADDR in R27)
967 ori r26,r26,lo16(0xFFC0) ; Stick in the rest of the length
968 rlwinm r22,r4,6,10,25 ; Shift size to PTEG offset
969 rlwinm r24,r24,6,0,25 ; Shift hash to PTEG units
970 add r22,r22,r30 ; Get end address (in PTEG units)
971
972hrmBInv32: rlwinm r23,r30,0,10,25 ; Isolate just the page index
973 xor r23,r23,r24 ; Hash it
974 and r23,r23,r26 ; Wrap it into the table
975 rlwinm r3,r23,28,4,29 ; Change to PCA offset
976 subfic r3,r3,-4 ; Get the PCA entry offset
977 add r7,r3,r29 ; Point to the PCA slot
978 cmplw cr5,r30,r22 ; Check if we reached the end of the range
979 addi r30,r30,64 ; bump to the next vaddr
980
981 bl mapLockPteg ; Lock the PTEG
982
983 rlwinm. r4,r6,16,0,7 ; Position, save, and test block mappings in PCA
984 add r5,r23,r29 ; Point to the PTEG
985 li r0,0 ; Set an invalid PTE value
986 beq+ hrmBNone32 ; No block map PTEs in this PTEG...
987 mtcrf 0x80,r4 ; Set CRs to select PTE slots
988 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 989
55e303ae
A
990 bf 0,hrmSlot0 ; No autogen here
991 stw r0,0x00(r5) ; Invalidate PTE
1c79356b 992
55e303ae
A
993hrmSlot0: bf 1,hrmSlot1 ; No autogen here
994 stw r0,0x08(r5) ; Invalidate PTE
1c79356b 995
55e303ae
A
996hrmSlot1: bf 2,hrmSlot2 ; No autogen here
997 stw r0,0x10(r5) ; Invalidate PTE
1c79356b 998
55e303ae
A
999hrmSlot2: bf 3,hrmSlot3 ; No autogen here
1000 stw r0,0x18(r5) ; Invalidate PTE
1c79356b 1001
55e303ae
A
1002hrmSlot3: bf 4,hrmSlot4 ; No autogen here
1003 stw r0,0x20(r5) ; Invalidate PTE
1c79356b 1004
55e303ae
A
1005hrmSlot4: bf 5,hrmSlot5 ; No autogen here
1006 stw r0,0x28(r5) ; Invalidate PTE
1c79356b 1007
55e303ae
A
1008hrmSlot5: bf 6,hrmSlot6 ; No autogen here
1009 stw r0,0x30(r5) ; Invalidate PTE
1c79356b 1010
55e303ae
A
1011hrmSlot6: bf 7,hrmSlot7 ; No autogen here
1012 stw r0,0x38(r5) ; Invalidate PTE
1c79356b 1013
55e303ae
A
1014hrmSlot7: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1015 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1016 andc r6,r6,r0 ; Turn off all the old autogen bits
9bccf70c 1017
55e303ae 1018hrmBNone32: eieio ; Make sure all updates come first
9bccf70c 1019
55e303ae 1020 stw r6,0(r7) ; Unlock and set the PCA
1c79356b 1021
55e303ae 1022 bne+ cr5,hrmBInv32 ; Go invalidate the next...
1c79356b 1023
55e303ae 1024 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1c79356b 1025
55e303ae
A
1026 mr r3,r31 ; Copy the pointer to the mapping
1027 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1c79356b 1028
55e303ae
A
1029 sync ; Make sure memory is consistent
1030
1031 subi r5,r25,63 ; Subtract TLB size from page count (note we are 0 based here)
1032 li r6,63 ; Assume full invalidate for now
1033 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1034 andc r6,r6,r5 ; Clear max if we have less to do
1035 and r5,r25,r5 ; Clear count if we have more than max
1036 lwz r27,mpVAddr+4(r31) ; Get the base vaddr again
1037 li r7,tlbieLock ; Get the TLBIE lock
1038 or r5,r5,r6 ; Get number of TLBIEs needed
1039
1040hrmBTLBlck: lwarx r2,0,r7 ; Get the TLBIE lock
1041 mr. r2,r2 ; Is it locked?
1042 li r2,1 ; Get our lock value
1043 bne- hrmBTLBlck ; It is locked, go wait...
1044 stwcx. r2,0,r7 ; Try to get it
1045 bne- hrmBTLBlck ; We was beat...
1046
1047hrmBTLBi: addic. r5,r5,-1 ; See if we did them all
1048 tlbie r27 ; Invalidate it everywhere
1049 addi r27,r27,0x1000 ; Up to the next page
1050 bge+ hrmBTLBi ; Make sure we have done it all...
1051
1052 rlwinm. r0,r19,0,pfSMPcapb,pfSMPcapb ; Can this processor do SMP?
1053 li r2,0 ; Lock clear value
1054
1055 sync ; Make sure all is quiet
1056 beq- hrmBNTlbs ; Jump if we can not do a TLBSYNC....
1057
1058 eieio ; Make sure that the tlbie happens first
1059 tlbsync ; Wait for everyone to catch up
1060 sync ; Wait for quiet again
1061
1062hrmBNTlbs: stw r2,tlbieLock(0) ; Clear the tlbie lock
1063
1064 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1065 bl sxlkShared ; Go get a shared lock on the mapping lists
1066 mr. r3,r3 ; Did we get the lock?
1067 bne- hrmPanic ; Nope...
1068
1069 lwz r4,mpVAddr(r31) ; High order of address
1070 lwz r5,mpVAddr+4(r31) ; Low order of address
1071 mr r3,r28 ; Pass in pmap to search
1072 mr r29,r4 ; Save this in case we need it (only promote fails)
1073 mr r30,r5 ; Save this in case we need it (only promote fails)
1074 bl EXT(mapSearchFull) ; Go see if we can find it
1075
1076 mr. r3,r3 ; Did we? (And remember mapping address for later)
1077 mr r15,r4 ; Save top of next vaddr
1078 mr r16,r5 ; Save bottom of next vaddr
1079 beq- hrmPanic ; Nope, not found...
1080
1081 cmplw r3,r31 ; Same mapping?
1082 bne- hrmPanic ; Not good...
1083
1084 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1085 bl sxlkPromote ; Try to promote shared to exclusive
1086 mr. r3,r3 ; Could we?
1087 mr r3,r31 ; Restore the mapping pointer
1088 beq+ hrmBDone1 ; Yeah...
1089
1090 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1091 bl sxlkConvert ; Convert shared to exclusive
1092 mr. r3,r3 ; Could we?
1093 bne-- hrmPanic ; Nope, we must have timed out...
1094
1095 mr r3,r28 ; Pass in pmap to search
1096 mr r4,r29 ; High order of address
1097 mr r5,r30 ; Low order of address
1098 bl EXT(mapSearchFull) ; Rescan the list
1099
1100 mr. r3,r3 ; Did we lose it when we converted?
1101 mr r15,r4 ; Save top of next vaddr
1102 mr r16,r5 ; Save bottom of next vaddr
1103 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1104
1105hrmBDone1: bl mapDrainBusy ; Go wait until mapping is unused
1106
1107 mr r3,r28 ; Get the pmap to remove from
1108 mr r4,r31 ; Point to the mapping
1109 bl EXT(mapRemove) ; Remove the mapping from the list
1110
1111 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1112 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1113 subi r4,r4,1 ; Drop down the mapped page count
1114 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1115 bl sxlkUnlock ; Unlock the search list
1116
1117 b hrmRetn32 ; We are all done, get out...
1c79356b 1118
55e303ae
A
1119;
1120; Here we handle the 64-bit version of hw_rem_map
1121;
1122
1c79356b 1123 .align 5
55e303ae
A
1124
1125hrmSplit64: rlwinm r9,r21,27,5,29 ; Convert PTEG to PCA entry
1126 bne-- cr5,hrmBlock64 ; Go treat block specially...
1127 subfic r9,r9,-4 ; Get the PCA entry offset
1128 bt-- cr0_eq,hrmPysDQ64 ; Skip next if no possible PTE...
1129 add r7,r9,r29 ; Point to the PCA slot
1130
1131 bl mapLockPteg ; Go lock up the PTEG
1132
1133 lwz r21,mpPte(r31) ; Get the quick pointer again
1134 ld r5,0(r26) ; Get the top of PTE
1135
1136 rlwinm. r0,r21,0,mpHValidb,mpHValidb ; See if we actually have a PTE
1137 rlwinm r21,r21,0,0,30 ; Clear out valid bit
1138 sldi r23,r5,16 ; Shift AVPN up to EA format
1139 rldicr r5,r5,0,62 ; Clear the valid bit
1140 rldimi r23,r30,0,36 ; Insert the page portion of the VPN
1141 stw r21,mpPte(r31) ; Make sure we invalidate mpPte but keep pointing to PTEG (keep walk_page from making a mistake)
1142 beq-- hrmUlckPCA64 ; Pte is gone, no need to invalidate...
1143
1144 std r5,0(r26) ; Invalidate the PTE
1145
1146 li r9,tlbieLock ; Get the TLBIE lock
1147
1148 sync ; Make sure the invalid PTE is actually in memory
1149
1150hrmPtlb64: lwarx r5,0,r9 ; Get the TLBIE lock
1151 rldicl r23,r23,0,16 ; Clear bits 0:15 cause they say to
1152 mr. r5,r5 ; Is it locked?
1153 li r5,1 ; Get locked indicator
1154 bne-- hrmPtlb64w ; It is locked, go spin...
1155 stwcx. r5,0,r9 ; Try to get it
1156 bne-- hrmPtlb64 ; We was beat...
1157
1158 tlbie r23 ; Invalidate it all corresponding TLB entries
1c79356b 1159
55e303ae
A
1160 eieio ; Make sure that the tlbie happens first
1161 tlbsync ; Wait for everyone to catch up
1162 isync
1163
1164 ptesync ; Make sure of it all
1165 li r0,0 ; Clear this
1166 rlwinm r2,r21,28,29,31 ; Get slot number (16 byte entries)
1167 stw r0,tlbieLock(0) ; Clear the tlbie lock
1168 oris r0,r0,0x8000 ; Assume slot 0
1169 eieio ; Make sure those RC bit have been stashed in PTE
1170 srw r0,r0,r2 ; Get slot mask to deallocate
d7e50217 1171
55e303ae
A
1172 lwz r22,12(r26) ; Get the latest reference and change bits
1173 or r6,r6,r0 ; Make the guy we killed free
de355530 1174
55e303ae
A
1175hrmUlckPCA64:
1176 eieio ; Make sure all updates come first
1177
1178 stw r6,0(r7) ; Unlock and change the PCA
1179
1180hrmPysDQ64: mr r3,r31 ; Point to the mapping
1181 bl mapDrainBusy ; Go wait until mapping is unused
1182
1183 mr r3,r28 ; Get the pmap to insert into
1184 mr r4,r31 ; Point to the mapping
1185 bl EXT(mapRemove) ; Remove the mapping from the list
1186
1187 andi. r0,r20,lo16(mpSpecial|mpNest) ; Is this nest or special mapping?
1188 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1189 cmplwi cr1,r0,0 ; Special thingie?
1190 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1191 subi r4,r4,1 ; Drop down the mapped page count
1192 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1193 bl sxlkUnlock ; Unlock the search list
1194
1195 bne-- cr1,hrmRetn64 ; This one has no real memory associated with it so we are done...
1c79356b 1196
55e303ae 1197 bl mapPhysFindLock ; Go find and lock the physent
1c79356b 1198
55e303ae
A
1199 li r0,0xFF ; Get mask to clean up mapping pointer
1200 ld r9,ppLink(r3) ; Get first mapping
1201 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1202 mr r4,r22 ; Get the RC bits we just got
1c79356b 1203
55e303ae 1204 bl mapPhysMerge ; Go merge the RC bits
d7e50217 1205
55e303ae 1206 andc r9,r9,r0 ; Clean up the mapping pointer
d7e50217 1207
55e303ae
A
1208 cmpld r9,r31 ; Are we the first on the list?
1209 bne- hrmNot1st64 ; Nope...
1c79356b 1210
55e303ae
A
1211 li r9,0 ; Get a 0
1212 ld r4,mpAlias(r31) ; Get our forward pointer
1213
1214 std r9,mpAlias(r31) ; Make sure we are off the chain
1215 bl mapPhyCSet64 ; Go set the physent link and preserve flags
de355530 1216
55e303ae
A
1217 b hrmPhyDQd64 ; Join up and unlock it all...
1218
1219hrmPtlb64w: li r5,lgKillResv ; Point to some spare memory
1220 stwcx. r5,0,r5 ; Clear the pending reservation
de355530 1221
d7e50217 1222
55e303ae
A
1223hrmPtlb64x: lwz r5,0(r9) ; Do a regular load to avoid taking reservation
1224 mr. r5,r5 ; is it locked?
1225 beq++ hrmPtlb64 ; Nope...
1226 b hrmPtlb64x ; Sniff some more...
1227
1228 .align 5
1229
1230hrmNot1st64:
1231 mr. r8,r9 ; Remember and test current node
1232 beq- hrmNotFound ; Could not find our node...
1233 ld r9,mpAlias(r9) ; Chain to the next
1234 cmpld r9,r31 ; Is this us?
1235 bne- hrmNot1st64 ; Not us...
1236
1237 ld r9,mpAlias(r9) ; Get our forward pointer
1238 std r9,mpAlias(r8) ; Unchain us
1239
1240 nop ; For alignment
1241
1242hrmPhyDQd64:
1243 bl mapPhysUnlock ; Unlock the physent chain
1c79356b 1244
55e303ae
A
1245hrmRetn64: rldicr r8,r31,0,51 ; Find start of page
1246 mr r3,r31 ; Copy the pointer to the mapping
1247 lwz r8,mbvrswap+4(r8) ; Get last half of virtual to real swap
1248 bl mapDrainBusy ; Go wait until mapping is unused
1c79356b 1249
55e303ae 1250 xor r3,r31,r8 ; Flip mapping address to virtual
d7e50217 1251
55e303ae 1252 mtmsrd r17 ; Restore enables/translation/etc.
de355530 1253 isync
55e303ae
A
1254
1255 b hrmRetnCmn ; Join the common return path...
1c79356b 1256
1c79356b 1257
55e303ae
A
1258;
1259; Check hrmBlock32 for comments.
1260;
1c79356b 1261
de355530 1262 .align 5
55e303ae
A
1263
1264hrmBlock64:
1265 lhz r24,mpSpace(r31) ; Get the address space hash
1266 lhz r25,mpBSize(r31) ; Get the number of pages in block
1267 lwz r9,mpBlkRemCur(r31) ; Get our current remove position
1268 ori r0,r20,mpRIP ; Turn on the remove in progress flag
1269 mfsdr1 r29 ; Get the hash table base and size
1270 ld r27,mpVAddr(r31) ; Get the base vaddr
1271 rlwinm r5,r29,0,27,31 ; Isolate the size
1272 sub r4,r25,r9 ; Get number of pages left
1273 cmplw cr1,r9,r25 ; Have we already hit the end?
1274 addi r10,r9,mapRemChunk ; Point to the start of the next chunk
1275 addi r2,r4,-mapRemChunk ; See if mapRemChunk or more
1276 stb r0,mpFlags+3(r31) ; Save the flags with the mpRIP bit on
1277 srawi r2,r2,31 ; We have -1 if less than mapRemChunk or 0 if equal or more
1278 subi r4,r4,mapRemChunk-1 ; Back off for a running start (will be negative for more than mapRemChunk)
1279 cmpwi cr7,r2,0 ; Remember if we are doing the last chunk
1280 and r4,r4,r2 ; If more than a chunk, bring this back to 0
1281 srdi r27,r27,12 ; Change address into page index
1282 addi r4,r4,mapRemChunk-1 ; Add mapRemChunk-1 to get max(num left, chunksize)
1283 add r27,r27,r9 ; Adjust vaddr to start of current chunk
1284
1285 bgt-- cr1,hrmEndInSight ; Someone is already doing the last hunk...
1286
1287 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1288 stw r10,mpBlkRemCur(r31) ; Set next chunk to do (note: this may indicate after end)
1289 bl sxlkUnlock ; Unlock the search list while we are invalidating
1290
1291 rlwimi r24,r24,14,4,17 ; Insert a copy of space hash
1292 eqv r26,r26,r26 ; Get all foxes here
1293 rldimi r24,r24,28,8 ; Make a couple copies up higher
1294 rldicr r29,r29,0,47 ; Isolate just the hash table base
1295 subfic r5,r5,46 ; Get number of leading zeros
1296 srd r26,r26,r5 ; Shift the size bits over
1297 mr r30,r27 ; Get start of chunk to invalidate
1298 rldicr r26,r26,0,56 ; Make length in PTEG units
1299 add r22,r4,r30 ; Get end page number
1300
1301hrmBInv64: srdi r0,r30,2 ; Shift page index over to form ESID
1302 rldicr r0,r0,0,49 ; Clean all but segment portion
1303 rlwinm r2,r30,0,16,31 ; Get the current page index
1304 xor r0,r0,r24 ; Form VSID
1305 xor r8,r2,r0 ; Hash the vaddr
1306 sldi r8,r8,7 ; Make into PTEG offset
1307 and r23,r8,r26 ; Wrap into the hash table
1308 rlwinm r3,r23,27,5,29 ; Change to PCA offset (table is always 2GB or less so 32-bit instructions work here)
1309 subfic r3,r3,-4 ; Get the PCA entry offset
1310 add r7,r3,r29 ; Point to the PCA slot
1311
1312 cmplw cr5,r30,r22 ; Have we reached the end of the range?
1313
1314 bl mapLockPteg ; Lock the PTEG
1315
1316 rlwinm. r4,r6,16,0,7 ; Extract the block mappings in this here PTEG and see if there are any
1317 add r5,r23,r29 ; Point to the PTEG
1318 li r0,0 ; Set an invalid PTE value
1319 beq++ hrmBNone64 ; No block map PTEs in this PTEG...
1320 mtcrf 0x80,r4 ; Set CRs to select PTE slots
1321 mtcrf 0x40,r4 ; Set CRs to select PTE slots
1c79356b 1322
1c79356b 1323
55e303ae
A
1324 bf 0,hrmSlot0s ; No autogen here
1325 std r0,0x00(r5) ; Invalidate PTE
1c79356b 1326
55e303ae
A
1327hrmSlot0s: bf 1,hrmSlot1s ; No autogen here
1328 std r0,0x10(r5) ; Invalidate PTE
1c79356b 1329
55e303ae
A
1330hrmSlot1s: bf 2,hrmSlot2s ; No autogen here
1331 std r0,0x20(r5) ; Invalidate PTE
d7e50217 1332
55e303ae
A
1333hrmSlot2s: bf 3,hrmSlot3s ; No autogen here
1334 std r0,0x30(r5) ; Invalidate PTE
d7e50217 1335
55e303ae
A
1336hrmSlot3s: bf 4,hrmSlot4s ; No autogen here
1337 std r0,0x40(r5) ; Invalidate PTE
d7e50217 1338
55e303ae
A
1339hrmSlot4s: bf 5,hrmSlot5s ; No autogen here
1340 std r0,0x50(r5) ; Invalidate PTE
d7e50217 1341
55e303ae
A
1342hrmSlot5s: bf 6,hrmSlot6s ; No autogen here
1343 std r0,0x60(r5) ; Invalidate PTE
d7e50217 1344
55e303ae
A
1345hrmSlot6s: bf 7,hrmSlot7s ; No autogen here
1346 std r0,0x70(r5) ; Invalidate PTE
d7e50217 1347
55e303ae
A
1348hrmSlot7s: rlwinm r0,r4,16,16,23 ; Move in use to autogen
1349 or r6,r6,r4 ; Flip on the free bits that corrospond to the autogens we cleared
1350 andc r6,r6,r0 ; Turn off all the old autogen bits
1351
1352hrmBNone64: eieio ; Make sure all updates come first
1353 stw r6,0(r7) ; Unlock and set the PCA
1354
1355 addi r30,r30,1 ; bump to the next PTEG
1356 bne++ cr5,hrmBInv64 ; Go invalidate the next...
1357
1358 bge+ cr7,hrmDoneChunk ; We have not as yet done the last chunk, go tell our caller to call again...
1359
1360 mr r3,r31 ; Copy the pointer to the mapping
1361 bl mapDrainBusy ; Go wait until we are sure all other removers are done with this one
1362
1363 sync ; Make sure memory is consistent
1364
1365 subi r5,r25,255 ; Subtract TLB size from page count (note we are 0 based here)
1366 li r6,255 ; Assume full invalidate for now
1367 srawi r5,r5,31 ; Make 0 if we need a full purge, -1 otherwise
1368 andc r6,r6,r5 ; Clear max if we have less to do
1369 and r5,r25,r5 ; Clear count if we have more than max
1370 sldi r24,r24,28 ; Get the full XOR value over to segment position
1371 ld r27,mpVAddr(r31) ; Get the base vaddr
1372 li r7,tlbieLock ; Get the TLBIE lock
1373 or r5,r5,r6 ; Get number of TLBIEs needed
1c79356b 1374
55e303ae
A
1375hrmBTLBlcl: lwarx r2,0,r7 ; Get the TLBIE lock
1376 mr. r2,r2 ; Is it locked?
1377 li r2,1 ; Get our lock value
1378 bne-- hrmBTLBlcm ; It is locked, go wait...
1379 stwcx. r2,0,r7 ; Try to get it
1380 bne-- hrmBTLBlcl ; We was beat...
1381
1382hrmBTLBj: sldi r2,r27,maxAdrSpb ; Move to make room for address space ID
1383 rldicr r2,r2,0,35-maxAdrSpb ; Clear out the extra
1384 addic. r5,r5,-1 ; See if we did them all
1385 xor r2,r2,r24 ; Make the VSID
1386 rldimi r2,r27,0,36 ; Insert the page portion of the VPN
1387 rldicl r2,r2,0,16 ; Clear bits 0:15 cause they say we gotta
1c79356b 1388
55e303ae
A
1389 tlbie r2 ; Invalidate it everywhere
1390 addi r27,r27,0x1000 ; Up to the next page
1391 bge++ hrmBTLBj ; Make sure we have done it all...
1c79356b 1392
55e303ae 1393 sync ; Make sure all is quiet
1c79356b 1394
55e303ae
A
1395 eieio ; Make sure that the tlbie happens first
1396 tlbsync ; wait for everyone to catch up
150bd074 1397 isync
1c79356b 1398
55e303ae 1399 li r2,0 ; Lock clear value
d7e50217 1400
55e303ae
A
1401 ptesync ; Wait for quiet again
1402 sync ; Make sure that is done
1403
1404 stw r2,tlbieLock(0) ; Clear the tlbie lock
1405
1406 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1407 bl sxlkShared ; Go get a shared lock on the mapping lists
1408 mr. r3,r3 ; Did we get the lock?
1409 bne- hrmPanic ; Nope...
1410
1411 lwz r4,mpVAddr(r31) ; High order of address
1412 lwz r5,mpVAddr+4(r31) ; Low order of address
1413 mr r3,r28 ; Pass in pmap to search
1414 mr r29,r4 ; Save this in case we need it (only promote fails)
1415 mr r30,r5 ; Save this in case we need it (only promote fails)
1416 bl EXT(mapSearchFull) ; Go see if we can find it
1417
1418 mr. r3,r3 ; Did we? (And remember mapping address for later)
1419 mr r15,r4 ; Save top of next vaddr
1420 mr r16,r5 ; Save bottom of next vaddr
1421 beq- hrmPanic ; Nope, not found...
1422
1423 cmpld r3,r31 ; Same mapping?
1424 bne- hrmPanic ; Not good...
1425
1426 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1427 bl sxlkPromote ; Try to promote shared to exclusive
1428 mr. r3,r3 ; Could we?
1429 mr r3,r31 ; Restore the mapping pointer
1430 beq+ hrmBDone2 ; Yeah...
1431
1432 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1433 bl sxlkConvert ; Convert shared to exclusive
1434 mr. r3,r3 ; Could we?
1435 bne-- hrmPanic ; Nope, we must have timed out...
1436
1437 mr r3,r28 ; Pass in pmap to search
1438 mr r4,r29 ; High order of address
1439 mr r5,r30 ; Low order of address
1440 bl EXT(mapSearchFull) ; Rescan the list
1441
1442 mr. r3,r3 ; Did we lose it when we converted?
1443 mr r15,r4 ; Save top of next vaddr
1444 mr r16,r5 ; Save bottom of next vaddr
1445 beq-- hrmPanic ; Yeah, we did, someone tossed it for us...
1446
1447hrmBDone2: bl mapDrainBusy ; Go wait until mapping is unused
1448
1449 mr r3,r28 ; Get the pmap to remove from
1450 mr r4,r31 ; Point to the mapping
1451 bl EXT(mapRemove) ; Remove the mapping from the list
1452
1453 lwz r4,pmapResidentCnt(r28) ; Get the mapped page count
1454 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1455 subi r4,r4,1 ; Drop down the mapped page count
1456 stw r4,pmapResidentCnt(r28) ; Set the mapped page count
1457 bl sxlkUnlock ; Unlock the search list
1458
1459 b hrmRetn64 ; We are all done, get out...
1460
1461hrmBTLBlcm: li r2,lgKillResv ; Get space unreserve line
1462 stwcx. r2,0,r2 ; Unreserve it
1463
1464hrmBTLBlcn: lwz r2,0(r7) ; Get the TLBIE lock
1465 mr. r2,r2 ; Is it held?
1466 beq++ hrmBTLBlcl ; Nope...
1467 b hrmBTLBlcn ; Yeah...
1c79356b 1468
1c79356b
A
1469
1470
1471/*
55e303ae 1472 * mapping *hw_purge_phys(physent) - remove a mapping from the system
1c79356b 1473 *
55e303ae 1474 * Upon entry, R3 contains a pointer to a physent.
1c79356b 1475 *
55e303ae
A
1476 * This function removes the first mapping from a physical entry
1477 * alias list. It locks the list, extracts the vaddr and pmap from
1478 * the first entry. It then jumps into the hw_rem_map function.
1479 * NOTE: since we jump into rem_map, we need to set up the stack
1480 * identically. Also, we set the next parm to 0 so we do not
1481 * try to save a next vaddr.
1482 *
1483 * We return the virtual address of the removed mapping as a
1484 * R3.
de355530 1485 *
55e303ae 1486 * Note that this is designed to be called from 32-bit mode with a stack.
de355530 1487 *
55e303ae
A
1488 * We disable translation and all interruptions here. This keeps is
1489 * from having to worry about a deadlock due to having anything locked
1490 * and needing it to process a fault.
1c79356b 1491 *
55e303ae
A
1492 * Note that this must be done with both interruptions off and VM off
1493 *
1494 *
1495 * Remove mapping via physical page (mapping_purge)
1496 *
1497 * 1) lock physent
1498 * 2) extract vaddr and pmap
1499 * 3) unlock physent
1500 * 4) do "remove mapping via pmap"
1501 *
1c79356b 1502 *
1c79356b
A
1503 */
1504
1505 .align 5
55e303ae
A
1506 .globl EXT(hw_purge_phys)
1507
1508LEXT(hw_purge_phys)
1509 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1510 mflr r0 ; Save the link register
1511 stw r15,FM_ARG0+0x00(r1) ; Save a register
1512 stw r16,FM_ARG0+0x04(r1) ; Save a register
1513 stw r17,FM_ARG0+0x08(r1) ; Save a register
1514 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1515 stw r19,FM_ARG0+0x10(r1) ; Save a register
1516 stw r20,FM_ARG0+0x14(r1) ; Save a register
1517 stw r21,FM_ARG0+0x18(r1) ; Save a register
1518 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1519 stw r23,FM_ARG0+0x20(r1) ; Save a register
1520 stw r24,FM_ARG0+0x24(r1) ; Save a register
1521 stw r25,FM_ARG0+0x28(r1) ; Save a register
1522 li r6,0 ; Set no next address return
1523 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1524 stw r27,FM_ARG0+0x30(r1) ; Save a register
1525 stw r28,FM_ARG0+0x34(r1) ; Save a register
1526 stw r29,FM_ARG0+0x38(r1) ; Save a register
1527 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1528 stw r31,FM_ARG0+0x40(r1) ; Save a register
1529 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1530 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1531
1532 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1533
1534 bl mapPhysLock ; Lock the physent
1535
1536 bt++ pf64Bitb,hppSF ; skip if 64-bit (only they take the hint)
1537
1538 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1539 li r0,0x3F ; Set the bottom stuff to clear
1540 b hppJoin ; Join the common...
1541
1542hppSF: li r0,0xFF
1543 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1544 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1545
1546hppJoin: andc. r12,r12,r0 ; Clean and test link
1547 beq-- hppNone ; There are no more mappings on physical page
1548
1549 lis r28,hi16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1550 lhz r7,mpSpace(r12) ; Get the address space hash
1551 ori r28,r28,lo16(EXT(pmapTrans)) ; Get the top of the start of the pmap hash to pmap translate table
1552 slwi r0,r7,2 ; Multiply space by 4
1553 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1554 slwi r7,r7,3 ; Multiply space by 8
1555 lwz r5,mpVAddr+4(r12) ; and the bottom
1556 add r7,r7,r0 ; Get correct displacement into translate table
1557 lwz r28,0(r28) ; Get the actual translation map
de355530 1558
55e303ae
A
1559 add r28,r28,r7 ; Point to the pmap translation
1560
1561 bl mapPhysUnlock ; Time to unlock the physical entry
1562
1563 bt++ pf64Bitb,hppSF2 ; skip if 64-bit (only they take the hint)
1564
1565 lwz r28,pmapPAddr+4(r28) ; Get the physical address of the pmap
1566 b hrmJoin ; Go remove the mapping...
1567
1568hppSF2: ld r28,pmapPAddr(r28) ; Get the physical address of the pmap
1569 b hrmJoin ; Go remove the mapping...
d7e50217 1570
de355530 1571 .align 5
55e303ae
A
1572
1573hppNone: bl mapPhysUnlock ; Time to unlock the physical entry
1574
1575 bt++ pf64Bitb,hppSF3 ; skip if 64-bit (only they take the hint)...
1576
1577 mtmsr r11 ; Restore enables/translation/etc.
1578 isync
1579 b hppRetnCmn ; Join the common return code...
1c79356b 1580
55e303ae
A
1581hppSF3: mtmsrd r11 ; Restore enables/translation/etc.
1582 isync
1c79356b 1583
55e303ae
A
1584;
1585; NOTE: we have not used any registers other than the volatiles to this point
1586;
1c79356b 1587
55e303ae 1588hppRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1c79356b 1589
55e303ae
A
1590 li r3,0 ; Clear high order mapping address because we are 32-bit
1591 mtlr r12 ; Restore the return
1592 lwz r1,0(r1) ; Pop the stack
1593 blr ; Leave...
1c79356b
A
1594
1595/*
55e303ae
A
1596 * mapping *hw_purge_map(pmap, vaddr, addr64_t *next) - remove a mapping from the system.
1597 *
1598 * Upon entry, R3 contains a pointer to a pmap. Since vaddr is
1599 * a 64-bit quantity, it is a long long so it is in R4 and R5.
1600 *
1601 * We return the virtual address of the removed mapping as a
1602 * R3.
1603 *
1604 * Note that this is designed to be called from 32-bit mode with a stack.
1605 *
1606 * We disable translation and all interruptions here. This keeps is
1607 * from having to worry about a deadlock due to having anything locked
1608 * and needing it to process a fault.
1609 *
1610 * Note that this must be done with both interruptions off and VM off
1611 *
1612 * Remove a mapping which can be reestablished by VM
1613 *
1c79356b 1614 */
1c79356b 1615
55e303ae
A
1616 .align 5
1617 .globl EXT(hw_purge_map)
1618
1619LEXT(hw_purge_map)
1620 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1621 mflr r0 ; Save the link register
1622 stw r15,FM_ARG0+0x00(r1) ; Save a register
1623 stw r16,FM_ARG0+0x04(r1) ; Save a register
1624 stw r17,FM_ARG0+0x08(r1) ; Save a register
1625 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1626 stw r19,FM_ARG0+0x10(r1) ; Save a register
1627 mfsprg r19,2 ; Get feature flags
1628 stw r20,FM_ARG0+0x14(r1) ; Save a register
1629 stw r21,FM_ARG0+0x18(r1) ; Save a register
1630 mtcrf 0x02,r19 ; move pf64Bit cr6
1631 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1632 stw r23,FM_ARG0+0x20(r1) ; Save a register
1633 stw r24,FM_ARG0+0x24(r1) ; Save a register
1634 stw r25,FM_ARG0+0x28(r1) ; Save a register
1635 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1636 stw r27,FM_ARG0+0x30(r1) ; Save a register
1637 stw r28,FM_ARG0+0x34(r1) ; Save a register
1638 stw r29,FM_ARG0+0x38(r1) ; Save a register
1639 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1640 stw r31,FM_ARG0+0x40(r1) ; Save a register
1641 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1642 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1643
1644 bt++ pf64Bitb,hpmSF1 ; skip if 64-bit (only they take the hint)
1645 lwz r9,pmapvr+4(r3) ; Get conversion mask
1646 b hpmSF1x ; Done...
1647
1648hpmSF1: ld r9,pmapvr(r3) ; Get conversion mask
1649
1650hpmSF1x:
1651 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1652
1653 xor r28,r3,r9 ; Convert the pmap to physical addressing
1654
1655 mr r17,r11 ; Save the MSR
1656
1657 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1658 bl sxlkExclusive ; Go get an exclusive lock on the mapping lists
1659 mr. r3,r3 ; Did we get the lock?
1660 bne-- hrmBadLock ; Nope...
1661;
1662; Note that we do a full search (i.e., no shortcut level skips, etc.)
1663; here so that we will know the previous elements so we can dequeue them
1664; later.
1665;
1666hpmSearch:
1667 mr r3,r28 ; Pass in pmap to search
1668 mr r29,r4 ; Top half of vaddr
1669 mr r30,r5 ; Bottom half of vaddr
1670 bl EXT(mapSearchFull) ; Rescan the list
1671 mr. r31,r3 ; Did we? (And remember mapping address for later)
1672 or r0,r4,r5 ; Are we beyond the end?
1673 mr r15,r4 ; Save top of next vaddr
1674 cmplwi cr1,r0,0 ; See if there is another
1675 mr r16,r5 ; Save bottom of next vaddr
1676 bne-- hpmGotOne ; We found one, go check it out...
1677
1678hpmCNext: bne++ cr1,hpmSearch ; There is another to check...
1679 b hrmNotFound ; No more in pmap to check...
1680
1681hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
1682 andi. r9,r20,lo16(mpSpecial|mpNest|mpPerm|mpBlock) ; Are we allowed to remove it?
1683 beq++ hrmGotX ; Found, branch to remove the mapping...
1684 b hpmCNext ; Nope...
1c79356b 1685
55e303ae
A
1686/*
1687 * mapping *hw_purge_space(physent, pmap) - remove a mapping from the system based upon address space
1688 *
1689 * Upon entry, R3 contains a pointer to a pmap.
1690 * pa is a pointer to the physent
1691 *
1692 * This function removes the first mapping for a specific pmap from a physical entry
1693 * alias list. It locks the list, extracts the vaddr and pmap from
1694 * the first apporpriate entry. It then jumps into the hw_rem_map function.
1695 * NOTE: since we jump into rem_map, we need to set up the stack
1696 * identically. Also, we set the next parm to 0 so we do not
1697 * try to save a next vaddr.
1698 *
1699 * We return the virtual address of the removed mapping as a
1700 * R3.
1701 *
1702 * Note that this is designed to be called from 32-bit mode with a stack.
1703 *
1704 * We disable translation and all interruptions here. This keeps is
1705 * from having to worry about a deadlock due to having anything locked
1706 * and needing it to process a fault.
1707 *
1708 * Note that this must be done with both interruptions off and VM off
1709 *
1710 *
1711 * Remove mapping via physical page (mapping_purge)
1712 *
1713 * 1) lock physent
1714 * 2) extract vaddr and pmap
1715 * 3) unlock physent
1716 * 4) do "remove mapping via pmap"
1717 *
1718 *
1719 */
1c79356b 1720
55e303ae
A
1721 .align 5
1722 .globl EXT(hw_purge_space)
1723
1724LEXT(hw_purge_space)
1725 stwu r1,-(FM_ALIGN(hrmStackSize)+FM_SIZE)(r1) ; Make some space on the stack
1726 mflr r0 ; Save the link register
1727 stw r15,FM_ARG0+0x00(r1) ; Save a register
1728 stw r16,FM_ARG0+0x04(r1) ; Save a register
1729 stw r17,FM_ARG0+0x08(r1) ; Save a register
1730 mfsprg r2,2 ; Get feature flags
1731 stw r18,FM_ARG0+0x0C(r1) ; Save a register
1732 stw r19,FM_ARG0+0x10(r1) ; Save a register
1733 stw r20,FM_ARG0+0x14(r1) ; Save a register
1734 stw r21,FM_ARG0+0x18(r1) ; Save a register
1735 stw r22,FM_ARG0+0x1C(r1) ; Save a register
1736 mtcrf 0x02,r2 ; move pf64Bit cr6
1737 stw r23,FM_ARG0+0x20(r1) ; Save a register
1738 stw r24,FM_ARG0+0x24(r1) ; Save a register
1739 stw r25,FM_ARG0+0x28(r1) ; Save a register
1740 stw r26,FM_ARG0+0x2C(r1) ; Save a register
1741 stw r27,FM_ARG0+0x30(r1) ; Save a register
1742 li r6,0 ; Set no next address return
1743 stw r28,FM_ARG0+0x34(r1) ; Save a register
1744 stw r29,FM_ARG0+0x38(r1) ; Save a register
1745 stw r30,FM_ARG0+0x3C(r1) ; Save a register
1746 stw r31,FM_ARG0+0x40(r1) ; Save a register
1747 stw r6,FM_ARG0+0x44(r1) ; Save address to save next mapped vaddr
1748 stw r0,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1749
1750 bt++ pf64Bitb,hpsSF1 ; skip if 64-bit (only they take the hint)
1751
1752 lwz r9,pmapvr+4(r4) ; Get conversion mask for pmap
1753
1754 b hpsSF1x ; Done...
1755
1756hpsSF1: ld r9,pmapvr(r4) ; Get conversion mask for pmap
1757
1758hpsSF1x: bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1759
1760 xor r4,r4,r9 ; Convert the pmap to physical addressing
1761
1762 bl mapPhysLock ; Lock the physent
1763
1764 lwz r8,pmapSpace(r4) ; Get the space hash
1765
1766 bt++ pf64Bitb,hpsSF ; skip if 64-bit (only they take the hint)
1767
1768 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
1769
1770hpsSrc32: rlwinm. r12,r12,0,0,25 ; Clean and test mapping address
1771 beq hpsNone ; Did not find one...
1772
1773 lhz r10,mpSpace(r12) ; Get the space
1774
1775 cmplw r10,r8 ; Is this one of ours?
1776 beq hpsFnd ; Yes...
1777
1778 lwz r12,mpAlias+4(r12) ; Chain on to the next
1779 b hpsSrc32 ; Check it out...
1c79356b 1780
55e303ae
A
1781 .align 5
1782
1783hpsSF: li r0,0xFF
1784 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1785 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1786
1787hpsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
1788 beq hpsNone ; Did not find one...
1789
1790 lhz r10,mpSpace(r12) ; Get the space
1791
1792 cmplw r10,r8 ; Is this one of ours?
1793 beq hpsFnd ; Yes...
1794
1795 ld r12,mpAlias(r12) ; Chain on to the next
1796 b hpsSrc64 ; Check it out...
1797
1798 .align 5
1c79356b 1799
55e303ae
A
1800hpsFnd: mr r28,r4 ; Set the pmap physical address
1801 lwz r4,mpVAddr(r12) ; Get the top of the vaddr
1802 lwz r5,mpVAddr+4(r12) ; and the bottom
1803
1804 bl mapPhysUnlock ; Time to unlock the physical entry
1805 b hrmJoin ; Go remove the mapping...
1806
1807 .align 5
1808
1809hpsNone: bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 1810
55e303ae 1811 bt++ pf64Bitb,hpsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 1812
55e303ae
A
1813 mtmsr r11 ; Restore enables/translation/etc.
1814 isync
1815 b hpsRetnCmn ; Join the common return code...
1c79356b 1816
55e303ae
A
1817hpsSF3: mtmsrd r11 ; Restore enables/translation/etc.
1818 isync
1c79356b 1819
55e303ae
A
1820;
1821; NOTE: we have not used any registers other than the volatiles to this point
1822;
d7e50217 1823
55e303ae
A
1824hpsRetnCmn: lwz r12,(FM_ALIGN(hrmStackSize)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1825
1826 li r3,0 ; Set return code
1827 mtlr r12 ; Restore the return
1828 lwz r1,0(r1) ; Pop the stack
1829 blr ; Leave...
1c79356b
A
1830
1831
1832/*
55e303ae
A
1833 * mapping *hw_find_space(physent, space) - finds the first mapping on physent for specified space
1834 *
1835 * Upon entry, R3 contains a pointer to a physent.
1836 * space is the space ID from the pmap in question
1837 *
1838 * We return the virtual address of the found mapping in
1839 * R3. Note that the mapping busy is bumped.
1840 *
1841 * Note that this is designed to be called from 32-bit mode with a stack.
1842 *
1843 * We disable translation and all interruptions here. This keeps is
1844 * from having to worry about a deadlock due to having anything locked
1845 * and needing it to process a fault.
1846 *
1c79356b
A
1847 */
1848
1849 .align 5
55e303ae
A
1850 .globl EXT(hw_find_space)
1851
1852LEXT(hw_find_space)
1853 stwu r1,-(FM_SIZE)(r1) ; Make some space on the stack
1854 mflr r0 ; Save the link register
1855 mr r8,r4 ; Remember the space
1856 stw r0,(FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1857
1858 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1c79356b 1859
55e303ae 1860 bl mapPhysLock ; Lock the physent
1c79356b 1861
55e303ae
A
1862 bt++ pf64Bitb,hfsSF ; skip if 64-bit (only they take the hint)
1863
1864 lwz r12,ppLink+4(r3) ; Grab the pointer to the first mapping
d7e50217 1865
55e303ae
A
1866hfsSrc32: rlwinm. r12,r12,0,0,25 ; Clean and test mapping address
1867 beq hfsNone ; Did not find one...
1868
1869 lhz r10,mpSpace(r12) ; Get the space
1870
1871 cmplw r10,r8 ; Is this one of ours?
1872 beq hfsFnd ; Yes...
1873
1874 lwz r12,mpAlias+4(r12) ; Chain on to the next
1875 b hfsSrc32 ; Check it out...
1c79356b 1876
55e303ae
A
1877 .align 5
1878
1879hfsSF: li r0,0xFF
1880 ld r12,ppLink(r3) ; Get the pointer to the first mapping
1881 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
1882
1883hfsSrc64: andc. r12,r12,r0 ; Clean and test mapping address
1884 beq hfsNone ; Did not find one...
1885
1886 lhz r10,mpSpace(r12) ; Get the space
1887
1888 cmplw r10,r8 ; Is this one of ours?
1889 beq hfsFnd ; Yes...
1890
1891 ld r12,mpAlias(r12) ; Chain on to the next
1892 b hfsSrc64 ; Check it out...
1893
1894 .align 5
1895
1896hfsFnd: mr r8,r3 ; Save the physent
1897 mr r3,r12 ; Point to the mapping
1898 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 1899
55e303ae
A
1900 mr r3,r8 ; Get back the physical entry
1901 li r7,0xFFF ; Get a page size mask
1902 bl mapPhysUnlock ; Time to unlock the physical entry
1c79356b 1903
55e303ae
A
1904 andc r3,r12,r7 ; Move the mapping back down to a page
1905 lwz r3,mbvrswap+4(r3) ; Get last half of virtual to real swap
1906 xor r12,r3,r12 ; Convert to virtual
1907 b hfsRet ; Time to return
1908
1909 .align 5
1910
1911hfsNone: bl mapPhysUnlock ; Time to unlock the physical entry
1912
1913hfsRet: bt++ pf64Bitb,hfsSF3 ; skip if 64-bit (only they take the hint)...
1c79356b 1914
55e303ae
A
1915 mtmsr r11 ; Restore enables/translation/etc.
1916 isync
1917 b hfsRetnCmn ; Join the common return code...
1c79356b 1918
55e303ae
A
1919hfsSF3: mtmsrd r11 ; Restore enables/translation/etc.
1920 isync
1c79356b 1921
55e303ae
A
1922;
1923; NOTE: we have not used any registers other than the volatiles to this point
1924;
1c79356b 1925
55e303ae
A
1926hfsRetnCmn: mr r3,r12 ; Get the mapping or a 0 if we failed
1927 lwz r12,(FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
1928
1929 mtlr r12 ; Restore the return
1930 lwz r1,0(r1) ; Pop the stack
1931 blr ; Leave...
1c79356b 1932
1c79356b 1933
55e303ae
A
1934;
1935; mapping *hw_find_map(pmap, va, *nextva) - Looks up a vaddr in a pmap
1936; Returns 0 if not found or the virtual address of the mapping if
1937; if is. Also, the mapping has the busy count bumped.
1938;
1939 .align 5
1940 .globl EXT(hw_find_map)
1c79356b 1941
55e303ae
A
1942LEXT(hw_find_map)
1943 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
1944 mflr r0 ; Save the link register
1945 stw r25,FM_ARG0+0x00(r1) ; Save a register
1946 stw r26,FM_ARG0+0x04(r1) ; Save a register
1947 mr r25,r6 ; Remember address of next va
1948 stw r27,FM_ARG0+0x08(r1) ; Save a register
1949 stw r28,FM_ARG0+0x0C(r1) ; Save a register
1950 stw r29,FM_ARG0+0x10(r1) ; Save a register
1951 stw r30,FM_ARG0+0x14(r1) ; Save a register
1952 stw r31,FM_ARG0+0x18(r1) ; Save a register
1953 stw r0,(FM_ALIGN((31-26+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 1954
55e303ae
A
1955 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
1956 lwz r7,pmapvr+4(r3) ; Get the second part
1c79356b 1957
1c79356b 1958
55e303ae
A
1959 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
1960
1961 mr r27,r11 ; Remember the old MSR
1962 mr r26,r12 ; Remember the feature bits
9bccf70c 1963
55e303ae 1964 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 1965
55e303ae 1966 bf-- pf64Bitb,hfmSF1 ; skip if 32-bit...
1c79356b 1967
55e303ae 1968 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 1969
55e303ae
A
1970hfmSF1: mr r29,r4 ; Save top half of vaddr
1971 mr r30,r5 ; Save the bottom half
1972
1973 la r3,pmapSXlk(r28) ; Point to the pmap search lock
1974 bl sxlkShared ; Go get a shared lock on the mapping lists
1975 mr. r3,r3 ; Did we get the lock?
1976 bne-- hfmBadLock ; Nope...
1c79356b 1977
55e303ae
A
1978 mr r3,r28 ; get the pmap address
1979 mr r4,r29 ; Get bits 0:31 to look for
1980 mr r5,r30 ; Get bits 32:64
1981
1982 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
1c79356b 1983
55e303ae
A
1984 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Find remove in progress bit
1985 mr. r31,r3 ; Save the mapping if we found it
1986 cmplwi cr1,r0,0 ; Are we removing?
1987 mr r29,r4 ; Save next va high half
1988 crorc cr0_eq,cr0_eq,cr1_eq ; Not found or removing
1989 mr r30,r5 ; Save next va low half
1990 li r6,0 ; Assume we did not find it
1991 li r26,0xFFF ; Get a mask to relocate to start of mapping page
1c79356b 1992
55e303ae 1993 bt-- cr0_eq,hfmNotFnd ; We did not find it...
1c79356b 1994
55e303ae 1995 bl mapBumpBusy ; If we found it, bump up the busy count so the mapping does not disapear
1c79356b 1996
55e303ae 1997 andc r4,r31,r26 ; Get back to the mapping page start
1c79356b 1998
55e303ae
A
1999; Note: we can treat 32- and 64-bit the same here. Because we are going from
2000; physical to virtual and we only do 32-bit virtual, we only need the low order
2001; word of the xor.
d7e50217 2002
55e303ae
A
2003 lwz r4,mbvrswap+4(r4) ; Get last half of virtual to real swap
2004 li r6,-1 ; Indicate we found it and it is not being removed
2005 xor r31,r31,r4 ; Flip to virtual
d7e50217 2006
55e303ae
A
2007hfmNotFnd: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2008 bl sxlkUnlock ; Unlock the search list
d7e50217 2009
55e303ae
A
2010 rlwinm r3,r31,0,0,31 ; Move mapping to return register and clear top of register if 64-bit
2011 and r3,r3,r6 ; Clear if not found or removing
de355530 2012
55e303ae 2013hfmReturn: bt++ pf64Bitb,hfmR64 ; Yes...
de355530 2014
55e303ae
A
2015 mtmsr r27 ; Restore enables/translation/etc.
2016 isync
2017 b hfmReturnC ; Join common...
2018
2019hfmR64: mtmsrd r27 ; Restore enables/translation/etc.
2020 isync
2021
2022hfmReturnC: stw r29,0(r25) ; Save the top of the next va
2023 stw r30,4(r25) ; Save the bottom of the next va
2024 lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2025 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2026 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2027 and r3,r3,r6 ; Clear return if the mapping is being removed
2028 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2029 mtlr r0 ; Restore the return
2030 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2031 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2032 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2033 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2034 lwz r1,0(r1) ; Pop the stack
2035 blr ; Leave...
2036
2037 .align 5
2038
2039hfmBadLock: li r3,1 ; Set lock time out error code
2040 b hfmReturn ; Leave....
1c79356b 2041
1c79356b
A
2042
2043/*
55e303ae
A
2044 * unsigned int hw_walk_phys(pp, preop, op, postop, parm)
2045 * walks all mapping for a physical page and performs
2046 * specified operations on each.
1c79356b 2047 *
55e303ae
A
2048 * pp is unlocked physent
2049 * preop is operation to perform on physent before walk. This would be
2050 * used to set cache attribute or protection
2051 * op is the operation to perform on each mapping during walk
2052 * postop is operation to perform in the phsyent after walk. this would be
2053 * used to set or reset the RC bits.
2054 *
2055 * We return the RC bits from before postop is run.
2056 *
2057 * Note that this is designed to be called from 32-bit mode with a stack.
1c79356b 2058 *
55e303ae
A
2059 * We disable translation and all interruptions here. This keeps is
2060 * from having to worry about a deadlock due to having anything locked
2061 * and needing it to process a fault.
d7e50217 2062 *
55e303ae
A
2063 * We lock the physent, execute preop, and then walk each mapping in turn.
2064 * If there is a PTE, it is invalidated and the RC merged into the physent.
2065 * Then we call the op function.
2066 * Then we revalidate the PTE.
2067 * Once all all mappings are finished, we save the physent RC and call the
2068 * postop routine. Then we unlock the physent and return the RC.
2069 *
2070 *
1c79356b
A
2071 */
2072
1c79356b 2073 .align 5
55e303ae
A
2074 .globl EXT(hw_walk_phys)
2075
2076LEXT(hw_walk_phys)
2077 stwu r1,-(FM_ALIGN((31-25+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2078 mflr r0 ; Save the link register
2079 stw r25,FM_ARG0+0x00(r1) ; Save a register
2080 stw r26,FM_ARG0+0x04(r1) ; Save a register
2081 stw r27,FM_ARG0+0x08(r1) ; Save a register
2082 stw r28,FM_ARG0+0x0C(r1) ; Save a register
2083 mr r25,r7 ; Save the parm
2084 stw r29,FM_ARG0+0x10(r1) ; Save a register
2085 stw r30,FM_ARG0+0x14(r1) ; Save a register
2086 stw r31,FM_ARG0+0x18(r1) ; Save a register
2087 stw r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2088
2089 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
2090
2091 mr r26,r11 ; Save the old MSR
2092 lis r27,hi16(hwpOpBase) ; Get high order of op base
2093 slwi r4,r4,7 ; Convert preop to displacement
2094 ori r27,r27,lo16(hwpOpBase) ; Get low order of op base
2095 slwi r5,r5,7 ; Convert op to displacement
2096 add r12,r4,r27 ; Point to the preop routine
2097 slwi r28,r6,7 ; Convert postop to displacement
2098 mtctr r12 ; Set preop routine
2099 add r28,r28,r27 ; Get the address of the postop routine
2100 add r27,r5,r27 ; Get the address of the op routine
1c79356b 2101
55e303ae 2102 bl mapPhysLock ; Lock the physent
1c79356b 2103
55e303ae
A
2104 mr r29,r3 ; Save the physent address
2105
2106 bt++ pf64Bitb,hwp64 ; skip if 64-bit (only they take the hint)
2107
2108 bctrl ; Call preop routine
2109 bne- hwpEarly32 ; preop says to bail now...
1c79356b 2110
55e303ae
A
2111 mtctr r27 ; Set up the op function address
2112 lwz r31,ppLink+4(r3) ; Grab the pointer to the first mapping
2113
2114hwpSrc32: rlwinm. r31,r31,0,0,25 ; Clean and test mapping address
2115 beq hwpNone32 ; Did not find one...
d7e50217 2116
55e303ae
A
2117;
2118; Note: mapInvPte32 returns the PTE in R3 (or 0 if none), PTE high in R4,
2119; PTE low in R5. The PCA address is in R7. The PTEG come back locked.
2120; If there is no PTE, PTE low is obtained from mapping
2121;
2122 bl mapInvPte32 ; Invalidate and lock PTE, also merge into physent
2123
2124 bctrl ; Call the op function
2125
2126 crmove cr1_eq,cr0_eq ; Save the return code
2127
2128 mr. r3,r3 ; Was there a previously valid PTE?
2129 beq- hwpNxt32 ; Nope...
1c79356b 2130
55e303ae
A
2131 stw r5,4(r3) ; Store second half of PTE
2132 eieio ; Make sure we do not reorder
2133 stw r4,0(r3) ; Revalidate the PTE
2134
2135 eieio ; Make sure all updates come first
2136 stw r6,0(r7) ; Unlock the PCA
d7e50217 2137
55e303ae
A
2138hwpNxt32: bne- cr1,hwpEarly32 ; op says to bail now...
2139 lwz r31,mpAlias+4(r31) ; Chain on to the next
2140 b hwpSrc32 ; Check it out...
1c79356b 2141
55e303ae 2142 .align 5
1c79356b 2143
55e303ae 2144hwpNone32: mtctr r28 ; Get the post routine address
1c79356b 2145
55e303ae
A
2146 lwz r30,ppLink+4(r29) ; Save the old RC
2147 mr r3,r29 ; Get the physent address
2148 bctrl ; Call post routine
1c79356b 2149
55e303ae
A
2150 bl mapPhysUnlock ; Unlock the physent
2151
2152 mtmsr r26 ; Restore translation/mode/etc.
2153 isync
1c79356b 2154
55e303ae 2155 b hwpReturn ; Go restore registers and return...
1c79356b 2156
55e303ae 2157 .align 5
1c79356b 2158
55e303ae
A
2159hwpEarly32: lwz r30,ppLink+4(r29) ; Save the old RC
2160 mr r3,r29 ; Get the physent address
2161 bl mapPhysUnlock ; Unlock the physent
2162
2163 mtmsr r26 ; Restore translation/mode/etc.
2164 isync
2165
2166 b hwpReturn ; Go restore registers and return...
1c79356b 2167
55e303ae 2168 .align 5
1c79356b 2169
55e303ae
A
2170hwp64: bctrl ; Call preop routine
2171 bne-- hwpEarly64 ; preop says to bail now...
d7e50217 2172
55e303ae
A
2173 mtctr r27 ; Set up the op function address
2174
2175 li r0,0xFF
2176 ld r31,ppLink(r3) ; Get the pointer to the first mapping
2177 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
2178
2179hwpSrc64: andc. r31,r31,r0 ; Clean and test mapping address
2180 beq hwpNone64 ; Did not find one...
2181;
2182; Note: mapInvPte64 returns the PTE in R3 (or 0 if none), PTE high in R4,
2183; PTE low in R5. PTEG comes back locked if there is one
2184;
2185 bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
1c79356b 2186
55e303ae 2187 bctrl ; Call the op function
1c79356b 2188
55e303ae 2189 crmove cr1_eq,cr0_eq ; Save the return code
1c79356b 2190
55e303ae
A
2191 mr. r3,r3 ; Was there a previously valid PTE?
2192 beq-- hwpNxt64 ; Nope...
2193
2194 std r5,8(r3) ; Save bottom of PTE
2195 eieio ; Make sure we do not reorder
2196 std r4,0(r3) ; Revalidate the PTE
d7e50217 2197
55e303ae
A
2198 eieio ; Make sure all updates come first
2199 stw r6,0(r7) ; Unlock the PCA
2200
2201hwpNxt64: bne-- cr1,hwpEarly64 ; op says to bail now...
2202 ld r31,mpAlias(r31) ; Chain on to the next
2203 li r0,0xFF
2204 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
2205 b hwpSrc64 ; Check it out...
1c79356b 2206
55e303ae
A
2207 .align 5
2208
2209hwpNone64: mtctr r28 ; Get the post routine address
2210
2211 lwz r30,ppLink+4(r29) ; Save the old RC
2212 mr r3,r29 ; Get the physent address
2213 bctrl ; Call post routine
2214
2215 bl mapPhysUnlock ; Unlock the physent
2216
2217 mtmsrd r26 ; Restore translation/mode/etc.
1c79356b 2218 isync
55e303ae
A
2219 b hwpReturn ; Go restore registers and return...
2220
2221 .align 5
1c79356b 2222
55e303ae
A
2223hwpEarly64: lwz r30,ppLink+4(r29) ; Save the old RC
2224 mr r3,r29 ; Get the physent address
2225 bl mapPhysUnlock ; Unlock the physent
2226
2227 mtmsrd r26 ; Restore translation/mode/etc.
2228 isync
2229
2230hwpReturn: lwz r0,(FM_ALIGN((31-25+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Restore the return
2231 lwz r25,FM_ARG0+0x00(r1) ; Restore a register
2232 lwz r26,FM_ARG0+0x04(r1) ; Restore a register
2233 mr r3,r30 ; Pass back the RC
2234 lwz r27,FM_ARG0+0x08(r1) ; Restore a register
2235 lwz r28,FM_ARG0+0x0C(r1) ; Restore a register
2236 mtlr r0 ; Restore the return
2237 lwz r29,FM_ARG0+0x10(r1) ; Restore a register
2238 lwz r30,FM_ARG0+0x14(r1) ; Restore a register
2239 lwz r31,FM_ARG0+0x18(r1) ; Restore a register
2240 lwz r1,0(r1) ; Pop the stack
2241 blr ; Leave...
d7e50217 2242
d7e50217 2243
55e303ae
A
2244;
2245; The preop/op/postop function table.
2246; Each function must be 64-byte aligned and be no more than
2247; 16 instructions. If more than 16, we must fix address calculations
2248; at the start of hwpOpBase
2249;
2250; The routine must set CR0_EQ in order to continue scan.
2251; If CR0_EQ is not set, an early return from the function is made.
2252;
d7e50217 2253
55e303ae
A
2254 .align 7
2255
2256hwpOpBase:
2257
2258; Function 0 - No operation
2259
2260hwpNoop: cmplw r0,r0 ; Make sure CR0_EQ is set
2261 blr ; Just return...
1c79356b
A
2262
2263 .align 5
1c79356b 2264
55e303ae 2265; This is the continuation of function 4 - Set attributes in mapping
1c79356b 2266
55e303ae
A
2267; We changed the attributes of a mapped page. Make sure there are no cache paradoxes.
2268; NOTE: Do we have to deal with i-cache here?
2269
2270hwpSAM: li r11,4096 ; Get page size
d7e50217 2271
55e303ae
A
2272hwpSAMinvd: sub. r11,r11,r9 ; Back off a line
2273 dcbf r11,r5 ; Flush the line in the data cache
2274 bgt++ hwpSAMinvd ; Go do the rest of it...
2275
2276 sync ; Make sure it is done
1c79356b 2277
55e303ae
A
2278 li r11,4096 ; Get page size
2279
2280hwpSAMinvi: sub. r11,r11,r9 ; Back off a line
2281 icbi r11,r5 ; Flush the line in the icache
2282 bgt++ hwpSAMinvi ; Go do the rest of it...
2283
2284 sync ; Make sure it is done
1c79356b 2285
55e303ae
A
2286 cmpw r0,r0 ; Make sure we return CR0_EQ
2287 blr ; Return...
1c79356b 2288
1c79356b 2289
55e303ae 2290; Function 1 - Set protection in physent
1c79356b 2291
55e303ae
A
2292 .set .,hwpOpBase+(1*128) ; Generate error if previous function too long
2293
2294hwpSPrtPhy: li r5,ppLink+4 ; Get offset for flag part of physent
d7e50217 2295
55e303ae
A
2296hwpSPrtPhX: lwarx r4,r5,r29 ; Get the old flags
2297 rlwimi r4,r25,0,ppPPb-32,ppPPe-32 ; Stick in the new protection
2298 stwcx. r4,r5,r29 ; Try to stuff it
2299 bne-- hwpSPrtPhX ; Try again...
2300; Note: CR0_EQ is set because of stwcx.
2301 blr ; Return...
1c79356b 2302
1c79356b 2303
55e303ae 2304; Function 2 - Set protection in mapping
1c79356b 2305
55e303ae 2306 .set .,hwpOpBase+(2*128) ; Generate error if previous function too long
1c79356b 2307
55e303ae
A
2308hwpSPrtMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2309 lwz r8,mpVAddr+4(r31) ; Get the protection part of mapping
2310 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2311 li r0,lo16(mpPP) ; Get protection bits
2312 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2313 rlwinm r2,r25,0,mpPPb-32,mpPPb-32+2 ; Position new protection
2314 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2315 andc r5,r5,r0 ; Clear the old prot bits
2316 or r5,r5,r2 ; Move in the prot bits
2317 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2318 cmpw r0,r0 ; Make sure we return CR0_EQ
2319 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2320 blr ; Leave...
2321
2322; Function 3 - Set attributes in physent
1c79356b 2323
55e303ae 2324 .set .,hwpOpBase+(3*128) ; Generate error if previous function too long
1c79356b 2325
55e303ae 2326hwpSAtrPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2327
55e303ae
A
2328hwpSAtrPhX: lwarx r4,r5,r29 ; Get the old flags
2329 rlwimi r4,r25,0,ppIb-32,ppGb-32 ; Stick in the new attributes
2330 stwcx. r4,r5,r29 ; Try to stuff it
2331 bne-- hwpSAtrPhX ; Try again...
2332; Note: CR0_EQ is set because of stwcx.
2333 blr ; Return...
de355530 2334
55e303ae 2335; Function 4 - Set attributes in mapping
d7e50217 2336
55e303ae
A
2337 .set .,hwpOpBase+(4*128) ; Generate error if previous function too long
2338
2339hwpSAtrMap: lwz r9,mpFlags(r31) ; Get the mapping flags
2340 lwz r8,mpVAddr+4(r31) ; Get the attribute part of mapping
2341 li r2,0x10 ; Force on coherent
2342 rlwinm. r9,r9,0,mpPermb,mpPermb ; Is the mapping permanent?
2343 li r0,lo16(mpWIMG) ; Get wimg mask
2344 crnot cr0_eq,cr0_eq ; Change CR0_EQ to true if mapping is permanent
2345 rlwimi r2,r2,mpIb-ppIb,mpIb-32,mpIb-32 ; Copy in the cache inhibited bit
2346 beqlr-- ; Leave if permanent mapping (before we trash R5)...
2347 andc r5,r5,r0 ; Clear the old wimg
2348 rlwimi r2,r2,32-(mpGb-ppGb),mpGb-32,mpGb-32 ; Copy in the guarded bit
2349 mfsprg r9,2 ; Feature flags
2350 or r5,r5,r2 ; Move in the new wimg
2351 rlwimi r8,r5,0,20,31 ; Copy into the mapping copy
2352 lwz r2,mpPAddr(r31) ; Get the physical address
2353 li r0,0xFFF ; Start a mask
2354 andi. r9,r9,pf32Byte+pf128Byte ; Get cache line size
2355 rlwinm r5,r0,0,1,0 ; Copy to top half
2356 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2357 rlwinm r2,r2,12,1,0 ; Copy to top and rotate to make physical address with junk left
2358 and r5,r5,r2 ; Clean stuff in top 32 bits
2359 andc r2,r2,r0 ; Clean bottom too
2360 rlwimi r5,r2,0,0,31 ; Insert low 23 to make full physical address
2361 b hwpSAM ; Join common
1c79356b 2362
55e303ae
A
2363; NOTE: we moved the remainder of the code out of here because it
2364; did not fit in the 128 bytes allotted. It got stuck into the free space
2365; at the end of the no-op function.
2366
2367
2368
de355530 2369
55e303ae 2370; Function 5 - Clear reference in physent
1c79356b 2371
55e303ae 2372 .set .,hwpOpBase+(5*128) ; Generate error if previous function too long
1c79356b 2373
55e303ae 2374hwpCRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2375
55e303ae
A
2376hwpCRefPhX: lwarx r4,r5,r29 ; Get the old flags
2377 rlwinm r4,r4,0,ppRb+1-32,ppRb-1-32 ; Clear R
2378 stwcx. r4,r5,r29 ; Try to stuff it
2379 bne-- hwpCRefPhX ; Try again...
2380; Note: CR0_EQ is set because of stwcx.
2381 blr ; Return...
1c79356b
A
2382
2383
55e303ae 2384; Function 6 - Clear reference in mapping
1c79356b 2385
55e303ae 2386 .set .,hwpOpBase+(6*128) ; Generate error if previous function too long
1c79356b 2387
55e303ae
A
2388hwpCRefMap: li r0,lo16(mpR) ; Get reference bit
2389 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2390 andc r5,r5,r0 ; Clear in PTE copy
2391 andc r8,r8,r0 ; and in the mapping
2392 cmpw r0,r0 ; Make sure we return CR0_EQ
2393 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2394 blr ; Return...
1c79356b 2395
de355530 2396
55e303ae 2397; Function 7 - Clear change in physent
1c79356b 2398
55e303ae 2399 .set .,hwpOpBase+(7*128) ; Generate error if previous function too long
1c79356b 2400
55e303ae 2401hwpCCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2402
55e303ae
A
2403hwpCCngPhX: lwarx r4,r5,r29 ; Get the old flags
2404 rlwinm r4,r4,0,ppCb+1-32,ppCb-1-32 ; Clear C
2405 stwcx. r4,r5,r29 ; Try to stuff it
2406 bne-- hwpCCngPhX ; Try again...
2407; Note: CR0_EQ is set because of stwcx.
2408 blr ; Return...
1c79356b 2409
de355530 2410
55e303ae 2411; Function 8 - Clear change in mapping
1c79356b 2412
55e303ae
A
2413 .set .,hwpOpBase+(8*128) ; Generate error if previous function too long
2414
2415hwpCCngMap: li r0,lo16(mpC) ; Get change bit
2416 lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2417 andc r5,r5,r0 ; Clear in PTE copy
2418 andc r8,r8,r0 ; and in the mapping
2419 cmpw r0,r0 ; Make sure we return CR0_EQ
2420 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2421 blr ; Return...
d7e50217 2422
de355530 2423
55e303ae 2424; Function 9 - Set reference in physent
d7e50217 2425
55e303ae 2426 .set .,hwpOpBase+(9*128) ; Generate error if previous function too long
d7e50217 2427
55e303ae
A
2428hwpSRefPhy: li r5,ppLink+4 ; Get offset for flag part of physent
2429
2430hwpSRefPhX: lwarx r4,r5,r29 ; Get the old flags
2431 ori r4,r4,lo16(ppR) ; Set the reference
2432 stwcx. r4,r5,r29 ; Try to stuff it
2433 bne-- hwpSRefPhX ; Try again...
2434; Note: CR0_EQ is set because of stwcx.
2435 blr ; Return...
d7e50217 2436
1c79356b 2437
55e303ae 2438; Function 10 - Set reference in mapping
d7e50217 2439
55e303ae
A
2440 .set .,hwpOpBase+(10*128) ; Generate error if previous function too long
2441
2442hwpSRefMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2443 ori r5,r5,lo16(mpR) ; Set reference in PTE low
2444 ori r8,r8,lo16(mpR) ; Set reference in mapping
2445 cmpw r0,r0 ; Make sure we return CR0_EQ
2446 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2447 blr ; Return...
2448
2449; Function 11 - Set change in physent
1c79356b 2450
55e303ae 2451 .set .,hwpOpBase+(11*128) ; Generate error if previous function too long
1c79356b 2452
55e303ae 2453hwpSCngPhy: li r5,ppLink+4 ; Get offset for flag part of physent
1c79356b 2454
55e303ae
A
2455hwpSCngPhX: lwarx r4,r5,r29 ; Get the old flags
2456 ori r4,r4,lo16(ppC) ; Set the change bit
2457 stwcx. r4,r5,r29 ; Try to stuff it
2458 bne-- hwpSCngPhX ; Try again...
2459; Note: CR0_EQ is set because of stwcx.
2460 blr ; Return...
de355530 2461
55e303ae 2462; Function 12 - Set change in mapping
1c79356b 2463
55e303ae 2464 .set .,hwpOpBase+(12*128) ; Generate error if previous function too long
1c79356b 2465
55e303ae
A
2466hwpSCngMap: lwz r8,mpVAddr+4(r31) ; Get the flag part of mapping
2467 ori r5,r5,lo16(mpC) ; Set change in PTE low
2468 ori r8,r8,lo16(mpC) ; Set chage in mapping
2469 cmpw r0,r0 ; Make sure we return CR0_EQ
2470 stw r8,mpVAddr+4(r31) ; Set the flag part of mapping
2471 blr ; Return...
1c79356b 2472
55e303ae 2473; Function 13 - Test reference in physent
1c79356b 2474
55e303ae
A
2475 .set .,hwpOpBase+(13*128) ; Generate error if previous function too long
2476
2477hwpTRefPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
2478 rlwinm. r0,r0,0,ppRb-32,ppRb-32 ; Isolate reference bit and see if 0
2479 blr ; Return (CR0_EQ set to continue if reference is off)...
1c79356b 2480
1c79356b 2481
55e303ae 2482; Function 14 - Test reference in mapping
1c79356b 2483
55e303ae 2484 .set .,hwpOpBase+(14*128) ; Generate error if previous function too long
de355530 2485
55e303ae
A
2486hwpTRefMap: rlwinm. r0,r5,0,mpRb-32,mpRb-32 ; Isolate reference bit and see if 0
2487 blr ; Return (CR0_EQ set to continue if reference is off)...
2488
2489; Function 15 - Test change in physent
1c79356b 2490
55e303ae 2491 .set .,hwpOpBase+(15*128) ; Generate error if previous function too long
1c79356b 2492
55e303ae
A
2493hwpTCngPhy: lwz r0,ppLink+4(r29) ; Get the flags from physent
2494 rlwinm. r0,r0,0,ppCb-32,ppCb-32 ; Isolate change bit and see if 0
2495 blr ; Return (CR0_EQ set to continue if reference is off)...
2496
2497
2498; Function 16 - Test change in mapping
2499
2500 .set .,hwpOpBase+(16*128) ; Generate error if previous function too long
d7e50217 2501
55e303ae
A
2502hwpTCngMap: rlwinm. r0,r5,0,mpCb-32,mpCb-32 ; Isolate change bit and see if 0
2503 blr ; Return (CR0_EQ set to continue if reference is off)...
2504
2505 .set .,hwpOpBase+(17*128) ; Generate error if previous function too long
2506
d7e50217 2507
d7e50217 2508
de355530 2509;
55e303ae
A
2510; int hw_protect(pmap, va, prot, *nextva) - Changes protection on a specific mapping.
2511;
2512; Returns:
2513; mapRtOK - if all is ok
2514; mapRtBadLk - if mapping lock fails
2515; mapRtPerm - if mapping is permanent
2516; mapRtNotFnd - if mapping is not found
2517; mapRtBlock - if mapping is a block
de355530 2518;
55e303ae
A
2519 .align 5
2520 .globl EXT(hw_protect)
d7e50217 2521
55e303ae
A
2522LEXT(hw_protect)
2523 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2524 mflr r0 ; Save the link register
2525 stw r24,FM_ARG0+0x00(r1) ; Save a register
2526 stw r25,FM_ARG0+0x04(r1) ; Save a register
2527 mr r25,r7 ; Remember address of next va
2528 stw r26,FM_ARG0+0x08(r1) ; Save a register
2529 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2530 stw r28,FM_ARG0+0x10(r1) ; Save a register
2531 mr r24,r6 ; Save the new protection flags
2532 stw r29,FM_ARG0+0x14(r1) ; Save a register
2533 stw r30,FM_ARG0+0x18(r1) ; Save a register
2534 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2535 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
1c79356b 2536
55e303ae
A
2537 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2538 lwz r7,pmapvr+4(r3) ; Get the second part
d7e50217 2539
d7e50217 2540
55e303ae 2541 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 2542
55e303ae
A
2543 mr r27,r11 ; Remember the old MSR
2544 mr r26,r12 ; Remember the feature bits
9bccf70c 2545
55e303ae 2546 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 2547
55e303ae
A
2548 bf-- pf64Bitb,hpSF1 ; skip if 32-bit...
2549
2550 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
9bccf70c 2551
55e303ae
A
2552hpSF1: mr r29,r4 ; Save top half of vaddr
2553 mr r30,r5 ; Save the bottom half
2554
2555 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2556 bl sxlkShared ; Go get a shared lock on the mapping lists
2557 mr. r3,r3 ; Did we get the lock?
2558 bne-- hpBadLock ; Nope...
d7e50217 2559
55e303ae
A
2560 mr r3,r28 ; get the pmap address
2561 mr r4,r29 ; Get bits 0:31 to look for
2562 mr r5,r30 ; Get bits 32:64
de355530 2563
55e303ae 2564 bl EXT(mapSearch) ; Go see if we can find it (note: R7 comes back with mpFlags)
d7e50217 2565
55e303ae
A
2566 andi. r7,r7,lo16(mpSpecial|mpNest|mpPerm|mpBlock|mpRIP) ; Are we allowed to change it or is it being removed?
2567 mr. r31,r3 ; Save the mapping if we found it
2568 cmplwi cr1,r7,0 ; Anything special going on?
2569 mr r29,r4 ; Save next va high half
2570 mr r30,r5 ; Save next va low half
d7e50217 2571
55e303ae 2572 beq-- hpNotFound ; Not found...
de355530 2573
55e303ae 2574 bne-- cr1,hpNotAllowed ; Something special is happening...
d7e50217 2575
55e303ae
A
2576 bt++ pf64Bitb,hpDo64 ; Split for 64 bit
2577
2578 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
2579
2580 rlwimi r5,r24,0,mpPPb-32,mpPPb-32+2 ; Stick in the new pp
2581 mr. r3,r3 ; Was there a previously valid PTE?
2582
2583 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
2584
2585 beq-- hpNoOld32 ; Nope...
1c79356b 2586
55e303ae
A
2587 stw r5,4(r3) ; Store second half of PTE
2588 eieio ; Make sure we do not reorder
2589 stw r4,0(r3) ; Revalidate the PTE
2590
2591 eieio ; Make sure all updates come first
2592 stw r6,0(r7) ; Unlock PCA
2593
2594hpNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2595 bl sxlkUnlock ; Unlock the search list
de355530 2596
55e303ae
A
2597 li r3,mapRtOK ; Set normal return
2598 b hpR32 ; Join common...
2599
2600 .align 5
1c79356b 2601
d7e50217 2602
55e303ae
A
2603hpDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2604
2605 rldimi r5,r24,0,mpPPb ; Stick in the new pp
2606 mr. r3,r3 ; Was there a previously valid PTE?
2607
2608 stb r5,mpVAddr+7(r31) ; Set the new pp field (do not muck with the rest)
2609
2610 beq-- hpNoOld64 ; Nope...
d7e50217 2611
55e303ae
A
2612 std r5,8(r3) ; Store second half of PTE
2613 eieio ; Make sure we do not reorder
2614 std r4,0(r3) ; Revalidate the PTE
de355530 2615
55e303ae
A
2616 eieio ; Make sure all updates come first
2617 stw r6,0(r7) ; Unlock PCA
de355530 2618
55e303ae
A
2619hpNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2620 bl sxlkUnlock ; Unlock the search list
de355530 2621
55e303ae
A
2622 li r3,mapRtOK ; Set normal return
2623 b hpR64 ; Join common...
de355530 2624
55e303ae
A
2625 .align 5
2626
2627hpReturn: bt++ pf64Bitb,hpR64 ; Yes...
2628
2629hpR32: mtmsr r27 ; Restore enables/translation/etc.
2630 isync
2631 b hpReturnC ; Join common...
2632
2633hpR64: mtmsrd r27 ; Restore enables/translation/etc.
2634 isync
2635
2636hpReturnC: stw r29,0(r25) ; Save the top of the next va
2637 stw r30,4(r25) ; Save the bottom of the next va
2638 lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2639 lwz r24,FM_ARG0+0x00(r1) ; Save a register
2640 lwz r25,FM_ARG0+0x04(r1) ; Save a register
2641 lwz r26,FM_ARG0+0x08(r1) ; Save a register
2642 mtlr r0 ; Restore the return
2643 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
2644 lwz r28,FM_ARG0+0x10(r1) ; Save a register
2645 lwz r29,FM_ARG0+0x14(r1) ; Save a register
2646 lwz r30,FM_ARG0+0x18(r1) ; Save a register
2647 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
2648 lwz r1,0(r1) ; Pop the stack
2649 blr ; Leave...
2650
2651 .align 5
2652
2653hpBadLock: li r3,mapRtBadLk ; Set lock time out error code
2654 b hpReturn ; Leave....
d7e50217 2655
55e303ae
A
2656hpNotFound: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2657 bl sxlkUnlock ; Unlock the search list
d7e50217 2658
55e303ae
A
2659 li r3,mapRtNotFnd ; Set that we did not find the requested page
2660 b hpReturn ; Leave....
2661
2662hpNotAllowed:
2663 rlwinm. r0,r7,0,mpRIPb,mpRIPb ; Is it actually being removed?
2664 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2665 bne-- hpNotFound ; Yeah...
2666 bl sxlkUnlock ; Unlock the search list
2667
2668 li r3,mapRtBlock ; Assume it was a block
2669 andi. r7,r7,lo16(mpBlock) ; Is this a block?
2670 bne++ hpReturn ; Yes, leave...
2671
2672 li r3,mapRtPerm ; Set that we hit a permanent page
2673 b hpReturn ; Leave....
9bccf70c 2674
9bccf70c 2675
55e303ae
A
2676;
2677; int hw_test_rc(pmap, va, reset) - tests RC on a specific va
2678;
2679; Returns following code ORed with RC from mapping
2680; mapRtOK - if all is ok
2681; mapRtBadLk - if mapping lock fails
2682; mapRtNotFnd - if mapping is not found
2683;
2684 .align 5
2685 .globl EXT(hw_test_rc)
9bccf70c 2686
55e303ae
A
2687LEXT(hw_test_rc)
2688 stwu r1,-(FM_ALIGN((31-24+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
2689 mflr r0 ; Save the link register
2690 stw r24,FM_ARG0+0x00(r1) ; Save a register
2691 stw r25,FM_ARG0+0x04(r1) ; Save a register
2692 stw r26,FM_ARG0+0x08(r1) ; Save a register
2693 stw r27,FM_ARG0+0x0C(r1) ; Save a register
2694 stw r28,FM_ARG0+0x10(r1) ; Save a register
2695 mr r24,r6 ; Save the reset request
2696 stw r29,FM_ARG0+0x14(r1) ; Save a register
2697 stw r30,FM_ARG0+0x18(r1) ; Save a register
2698 stw r31,FM_ARG0+0x1C(r1) ; Save a register
2699 stw r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
9bccf70c 2700
55e303ae
A
2701 lwz r6,pmapvr(r3) ; Get the first part of the VR translation for pmap
2702 lwz r7,pmapvr+4(r3) ; Get the second part
0b4e3aa0 2703
9bccf70c 2704
55e303ae 2705 bl EXT(mapSetUp) ; Turn off interrupts, translation, and possibly enter 64-bit
9bccf70c 2706
55e303ae
A
2707 mr r27,r11 ; Remember the old MSR
2708 mr r26,r12 ; Remember the feature bits
9bccf70c 2709
55e303ae 2710 xor r28,r3,r7 ; Change the common 32- and 64-bit half
9bccf70c 2711
55e303ae 2712 bf-- pf64Bitb,htrSF1 ; skip if 32-bit...
1c79356b 2713
55e303ae 2714 rldimi r28,r6,32,0 ; Shift the fixed upper part of the physical over and cram in top
1c79356b 2715
55e303ae
A
2716htrSF1: mr r29,r4 ; Save top half of vaddr
2717 mr r30,r5 ; Save the bottom half
1c79356b 2718
55e303ae
A
2719 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2720 bl sxlkShared ; Go get a shared lock on the mapping lists
2721 mr. r3,r3 ; Did we get the lock?
2722 li r25,0 ; Clear RC
2723 bne-- htrBadLock ; Nope...
2724
2725 mr r3,r28 ; get the pmap address
2726 mr r4,r29 ; Get bits 0:31 to look for
2727 mr r5,r30 ; Get bits 32:64
d7e50217 2728
55e303ae 2729 bl EXT(mapSearch) ; Go see if we can find it (R7 comes back with mpFlags)
9bccf70c 2730
55e303ae
A
2731 andi. r0,r7,lo16(mpSpecial|mpNest|mpPerm|mpBlock|mpRIP) ; Are we allowed to change it or is it being removed?
2732 mr. r31,r3 ; Save the mapping if we found it
2733 cmplwi cr1,r0,0 ; Are we removing it?
2734 crorc cr0_eq,cr0_eq,cr1_eq ; Did we not find it or is it being removed?
d7e50217 2735
55e303ae 2736 bt-- cr0_eq,htrNotFound ; Not found, something special, or being removed...
1c79356b 2737
55e303ae
A
2738 bt++ pf64Bitb,htrDo64 ; Split for 64 bit
2739
2740 bl mapInvPte32 ; Invalidate and lock PTEG, also merge into physent
2741
2742 cmplwi cr1,r24,0 ; Do we want to clear RC?
2743 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
2744 mr. r3,r3 ; Was there a previously valid PTE?
2745 li r0,lo16(mpR|mpC) ; Get bits to clear
9bccf70c 2746
55e303ae
A
2747 and r25,r5,r0 ; Save the RC bits
2748 beq++ cr1,htrNoClr32 ; Nope...
2749
2750 andc r12,r12,r0 ; Clear mapping copy of RC
2751 andc r5,r5,r0 ; Clear PTE copy of RC
2752 sth r12,mpVAddr+6(r31) ; Set the new RC
9bccf70c 2753
55e303ae 2754htrNoClr32: beq-- htrNoOld32 ; No previously valid PTE...
d7e50217 2755
55e303ae
A
2756 sth r5,6(r3) ; Store updated RC
2757 eieio ; Make sure we do not reorder
2758 stw r4,0(r3) ; Revalidate the PTE
9bccf70c 2759
55e303ae
A
2760 eieio ; Make sure all updates come first
2761 stw r6,0(r7) ; Unlock PCA
1c79356b 2762
55e303ae
A
2763htrNoOld32: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2764 bl sxlkUnlock ; Unlock the search list
2765 li r3,mapRtOK ; Set normal return
2766 b htrR32 ; Join common...
1c79356b 2767
55e303ae
A
2768 .align 5
2769
2770
2771htrDo64: bl mapInvPte64 ; Invalidate and lock PTEG, also merge into physent
2772
2773 cmplwi cr1,r24,0 ; Do we want to clear RC?
2774 lwz r12,mpVAddr+4(r31) ; Get the bottom of the mapping vaddr field
2775 mr. r3,r3 ; Was there a previously valid PTE?
2776 li r0,lo16(mpR|mpC) ; Get bits to clear
1c79356b 2777
55e303ae
A
2778 and r25,r5,r0 ; Save the RC bits
2779 beq++ cr1,htrNoClr64 ; Nope...
2780
2781 andc r12,r12,r0 ; Clear mapping copy of RC
2782 andc r5,r5,r0 ; Clear PTE copy of RC
2783 sth r12,mpVAddr+6(r31) ; Set the new RC
1c79356b 2784
55e303ae
A
2785htrNoClr64: beq-- htrNoOld64 ; Nope, no pevious pte...
2786
2787 sth r5,14(r3) ; Store updated RC
2788 eieio ; Make sure we do not reorder
2789 std r4,0(r3) ; Revalidate the PTE
1c79356b 2790
55e303ae
A
2791 eieio ; Make sure all updates come first
2792 stw r6,0(r7) ; Unlock PCA
1c79356b 2793
55e303ae
A
2794htrNoOld64: la r3,pmapSXlk(r28) ; Point to the pmap search lock
2795 bl sxlkUnlock ; Unlock the search list
2796 li r3,mapRtOK ; Set normal return
2797 b htrR64 ; Join common...
de355530 2798
55e303ae
A
2799 .align 5
2800
2801htrReturn: bt++ pf64Bitb,htrR64 ; Yes...
de355530 2802
55e303ae
A
2803htrR32: mtmsr r27 ; Restore enables/translation/etc.
2804 isync
2805 b htrReturnC ; Join common...
de355530 2806
55e303ae
A
2807htrR64: mtmsrd r27 ; Restore enables/translation/etc.
2808 isync
1c79356b 2809
55e303ae
A
2810htrReturnC: lwz r0,(FM_ALIGN((31-24+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
2811 or r3,r3,r25 ; Send the RC bits back
2812 lwz r24,FM_ARG0+0x00(r1) ; Save a register
2813 lwz r25,FM_ARG0+0x04(r1) ; Save a register
2814 lwz r26,FM_ARG0+0x08(r1) ; Save a register
2815 mtlr r0 ; Restore the return
2816 lwz r27,FM_ARG0+0x0C(r1) ; Save a register
2817 lwz r28,FM_ARG0+0x10(r1) ; Save a register
2818 lwz r29,FM_ARG0+0x14(r1) ; Save a register
2819 lwz r30,FM_ARG0+0x18(r1) ; Save a register
2820 lwz r31,FM_ARG0+0x1C(r1) ; Save a register
2821 lwz r1,0(r1) ; Pop the stack
1c79356b
A
2822 blr ; Leave...
2823
2824 .align 5
2825
55e303ae
A
2826htrBadLock: li r3,mapRtBadLk ; Set lock time out error code
2827 b htrReturn ; Leave....
1c79356b 2828
55e303ae
A
2829htrNotFound:
2830 la r3,pmapSXlk(r28) ; Point to the pmap search lock
2831 bl sxlkUnlock ; Unlock the search list
1c79356b 2832
55e303ae
A
2833 li r3,mapRtNotFnd ; Set that we did not find the requested page
2834 b htrReturn ; Leave....
2835
2836
2837
2838;
2839; mapPhysFindLock - find physent list and lock it
2840; R31 points to mapping
2841;
2842 .align 5
2843
2844mapPhysFindLock:
2845 lbz r4,mpFlags+1(r31) ; Get the index into the physent bank table
2846 lis r3,ha16(EXT(pmap_mem_regions)) ; Get high order of physent table (note use of ha16 to get value appropriate for an addi of low part)
2847 rlwinm r4,r4,2,0,29 ; Change index into byte offset
2848 addi r4,r4,lo16(EXT(pmap_mem_regions)) ; Get low part of address of entry
2849 add r3,r3,r4 ; Point to table entry
2850 lwz r5,mpPAddr(r31) ; Get physical page number
2851 lwz r7,mrStart(r3) ; Get the start of range
2852 lwz r3,mrPhysTab(r3) ; Get the start of the entries for this bank
2853 sub r6,r5,r7 ; Get index to physent
2854 rlwinm r6,r6,3,0,28 ; Get offset to physent
2855 add r3,r3,r6 ; Point right to the physent
2856 b mapPhysLock ; Join in the lock...
2857
2858;
2859; mapPhysLock - lock a physent list
2860; R3 contains list header
2861;
2862 .align 5
2863
2864mapPhysLockS:
2865 li r2,lgKillResv ; Get a spot to kill reservation
2866 stwcx. r2,0,r2 ; Kill it...
2867
2868mapPhysLockT:
2869 lwz r2,ppLink(r3) ; Get physent chain header
2870 rlwinm. r2,r2,0,0,0 ; Is lock clear?
2871 bne-- mapPhysLockT ; Nope, still locked...
2872
2873mapPhysLock:
2874 lwarx r2,0,r3 ; Get the lock
2875 rlwinm. r0,r2,0,0,0 ; Is it locked?
2876 oris r0,r2,0x8000 ; Set the lock bit
2877 bne-- mapPhysLockS ; It is locked, spin on it...
2878 stwcx. r0,0,r3 ; Try to stuff it back...
2879 bne-- mapPhysLock ; Collision, try again...
2880 isync ; Clear any speculations
2881 blr ; Leave...
2882
2883
2884;
2885; mapPhysUnlock - unlock a physent list
2886; R3 contains list header
2887;
2888 .align 5
2889
2890mapPhysUnlock:
2891 lwz r0,ppLink(r3) ; Get physent chain header
2892 rlwinm r0,r0,0,1,31 ; Clear the lock bit
2893 eieio ; Make sure unlock comes last
2894 stw r0,ppLink(r3) ; Unlock the list
2895 blr
2896
2897;
2898; mapPhysMerge - merge the RC bits into the master copy
2899; R3 points to the physent
2900; R4 contains the RC bits
2901;
2902; Note: we just return if RC is 0
2903;
2904 .align 5
2905
2906mapPhysMerge:
2907 rlwinm. r4,r4,PTE1_REFERENCED_BIT+(64-ppRb),ppRb-32,ppCb-32 ; Isolate RC bits
2908 la r5,ppLink+4(r3) ; Point to the RC field
2909 beqlr-- ; Leave if RC is 0...
2910
2911mapPhysMergeT:
2912 lwarx r6,0,r5 ; Get the RC part
2913 or r6,r6,r4 ; Merge in the RC
2914 stwcx. r6,0,r5 ; Try to stuff it back...
2915 bne-- mapPhysMergeT ; Collision, try again...
2916 blr ; Leave...
2917
2918;
2919; Sets the physent link pointer and preserves all flags
2920; The list is locked
2921; R3 points to physent
2922; R4 has link to set
2923;
2924
2925 .align 5
2926
2927mapPhyCSet32:
2928 la r5,ppLink+4(r3) ; Point to the link word
2929
2930mapPhyCSetR:
2931 lwarx r2,0,r5 ; Get the link and flags
2932 rlwimi r4,r2,0,26,31 ; Insert the flags
2933 stwcx. r4,0,r5 ; Stick them back
2934 bne-- mapPhyCSetR ; Someone else did something, try again...
2935 blr ; Return...
2936
2937 .align 5
2938
2939mapPhyCSet64:
2940 li r0,0xFF ; Get mask to clean up mapping pointer
2941 rldicl r0,r0,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
2942
2943mapPhyCSet64x:
2944 ldarx r2,0,r3 ; Get the link and flags
2945 and r5,r2,r0 ; Isolate the flags
2946 or r6,r4,r5 ; Add them to the link
2947 stdcx. r6,0,r3 ; Stick them back
2948 bne-- mapPhyCSet64x ; Someone else did something, try again...
2949 blr ; Return...
2950
2951;
2952; mapBumpBusy - increment the busy count on a mapping
2953; R3 points to mapping
2954;
2955
2956 .align 5
2957
2958mapBumpBusy:
2959 lwarx r4,0,r3 ; Get mpBusy
2960 addis r4,r4,0x0100 ; Bump the busy count
2961 stwcx. r4,0,r3 ; Save it back
2962 bne-- mapBumpBusy ; This did not work, try again...
2963 blr ; Leave...
2964
2965;
2966; mapDropBusy - increment the busy count on a mapping
2967; R3 points to mapping
2968;
2969
2970 .globl EXT(mapping_drop_busy)
2971 .align 5
2972
2973LEXT(mapping_drop_busy)
2974mapDropBusy:
2975 lwarx r4,0,r3 ; Get mpBusy
2976 addis r4,r4,0xFF00 ; Drop the busy count
2977 stwcx. r4,0,r3 ; Save it back
2978 bne-- mapDropBusy ; This did not work, try again...
2979 blr ; Leave...
2980
2981;
2982; mapDrainBusy - drain the busy count on a mapping
2983; R3 points to mapping
2984; Note: we already have a busy for ourselves. Only one
2985; busy per processor is allowed, so we just spin here
2986; waiting for the count to drop to 1.
2987; Also, the mapping can not be on any lists when we do this
2988; so all we are doing is waiting until it can be released.
2989;
2990
2991 .align 5
2992
2993mapDrainBusy:
2994 lwz r4,mpFlags(r3) ; Get mpBusy
2995 rlwinm r4,r4,8,24,31 ; Clean it up
2996 cmplwi r4,1 ; Is is just our busy?
2997 beqlr++ ; Yeah, it is clear...
2998 b mapDrainBusy ; Try again...
2999
3000
3001
3002;
3003; handleDSeg - handle a data segment fault
3004; handleISeg - handle an instruction segment fault
3005;
3006; All that we do here is to map these to DSI or ISI and insure
3007; that the hash bit is not set. This forces the fault code
3008; to also handle the missing segment.
3009;
3010; At entry R2 contains per_proc, R13 contains savarea pointer,
3011; and R11 is the exception code.
3012;
3013
3014 .align 5
3015 .globl EXT(handleDSeg)
3016
3017LEXT(handleDSeg)
3018
3019 li r11,T_DATA_ACCESS ; Change fault to DSI
3020 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3021 b EXT(handlePF) ; Join common...
3022
3023 .align 5
3024 .globl EXT(handleISeg)
3025
3026LEXT(handleISeg)
3027
3028 li r11,T_INSTRUCTION_ACCESS ; Change fault to ISI
3029 stw r11,saveexception(r13) ; Change the exception code from seg fault to PTE miss
3030 b EXT(handlePF) ; Join common...
3031
3032
3033/*
3034 * handlePF - handle a page fault interruption
3035 *
3036 * At entry R2 contains per_proc, R13 contains savarea pointer,
3037 * and R11 is the exception code.
3038 *
3039 * This first part does a quick check to see if we can handle the fault.
3040 * We canot handle any kind of protection exceptions here, so we pass
3041 * them up to the next level.
3042 *
3043 * NOTE: In order for a page-fault redrive to work, the translation miss
3044 * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur
3045 * before we come here.
3046 */
3047
3048 .align 5
3049 .globl EXT(handlePF)
3050
3051LEXT(handlePF)
3052
3053 mfsprg r12,2 ; Get feature flags
3054 cmplwi r11,T_INSTRUCTION_ACCESS ; See if this is for the instruction
3055 lwz r8,savesrr1+4(r13) ; Get the MSR to determine mode
3056 mtcrf 0x02,r12 ; move pf64Bit to cr6
3057 lis r0,hi16(dsiNoEx|dsiProt|dsiInvMode|dsiAC) ; Get the types that we cannot handle here
3058 lwz r18,SAVflags(r13) ; Get the flags
3059
3060 beq-- gotIfetch ; We have an IFETCH here...
3061
3062 lwz r27,savedsisr(r13) ; Get the DSISR
3063 lwz r29,savedar(r13) ; Get the first half of the DAR
3064 lwz r30,savedar+4(r13) ; And second half
3065
3066 b ckIfProt ; Go check if this is a protection fault...
3067
3068gotIfetch: andis. r27,r8,hi16(dsiValid) ; Clean this up to construct a DSISR value
3069 lwz r29,savesrr0(r13) ; Get the first half of the instruction address
3070 lwz r30,savesrr0+4(r13) ; And second half
3071 stw r27,savedsisr(r13) ; Save the "constructed" DSISR
3072
3073ckIfProt: and. r4,r27,r0 ; Is this a non-handlable exception?
3074 li r20,64 ; Set a limit of 64 nests for sanity check
3075 bne-- hpfExit ; Yes... (probably not though)
3076
3077;
3078; Note: if the RI is on, we are accessing user space from the kernel, therefore we
3079; should be loading the user pmap here.
3080;
3081
3082 andi. r0,r8,lo16(MASK(MSR_PR)|MASK(MSR_RI)) ; Are we addressing user or kernel space?
3083 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
3084 mr r19,r2 ; Remember the per_proc
3085 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
3086 mr r23,r30 ; Save the low part of faulting address
3087 beq-- hpfInKern ; Skip if we are in the kernel
3088 la r8,ppUserPmap(r19) ; Point to the current user pmap
3089
3090hpfInKern: mr r22,r29 ; Save the high part of faulting address
3091
3092 bt-- pf64Bitb,hpf64a ; If 64-bit, skip the next bit...
3093
3094;
3095; On 32-bit machines we emulate a segment exception by loading unused SRs with a
3096; predefined value that corresponds to no address space. When we see that value
3097; we turn off the PTE miss bit in the DSISR to drive the code later on that will
3098; cause the proper SR to be loaded.
3099;
3100
3101 lwz r28,4(r8) ; Pick up the pmap
3102 rlwinm. r18,r18,0,SAVredriveb,SAVredriveb ; Was this a redrive?
3103 mr r25,r28 ; Save the original pmap (in case we nest)
3104 bne hpfNest ; Segs are not ours if so...
3105 mfsrin r4,r30 ; Get the SR that was used for translation
3106 cmplwi r4,invalSpace ; Is this a simulated segment fault?
3107 bne++ hpfNest ; No...
3108
3109 rlwinm r27,r27,0,dsiMissb+1,dsiMissb-1 ; Clear the PTE miss bit in DSISR
3110 b hpfNest ; Join on up...
3111
3112 .align 5
3113
3114 nop ; Push hpfNest to a 32-byte boundary
3115 nop ; Push hpfNest to a 32-byte boundary
3116 nop ; Push hpfNest to a 32-byte boundary
3117 nop ; Push hpfNest to a 32-byte boundary
3118 nop ; Push hpfNest to a 32-byte boundary
3119 nop ; Push hpfNest to a 32-byte boundary
3120
3121hpf64a: ld r28,0(r8) ; Get the pmap pointer (64-bit)
3122 mr r25,r28 ; Save the original pmap (in case we nest)
3123
3124;
3125; This is where we loop descending nested pmaps
3126;
3127
3128hpfNest: la r3,pmapSXlk(r28) ; Point to the pmap search lock
3129 addi r20,r20,-1 ; Count nest try
3130 bl sxlkShared ; Go get a shared lock on the mapping lists
3131 mr. r3,r3 ; Did we get the lock?
3132 bne-- hpfBadLock ; Nope...
3133
3134 mr r3,r28 ; Get the pmap pointer
3135 mr r4,r22 ; Get top of faulting vaddr
3136 mr r5,r23 ; Get bottom of faulting vaddr
3137 bl EXT(mapSearch) ; Go see if we can find it (R7 gets mpFlags)
3138
3139 rlwinm r0,r7,0,mpRIPb,mpRIPb ; Are we removing this one?
3140 mr. r31,r3 ; Save the mapping if we found it
3141 cmplwi cr1,r0,0 ; Check for removal
3142 crorc cr0_eq,cr0_eq,cr1_eq ; Merge not found and removing
3143
3144 bt-- cr0_eq,hpfNotFound ; Not found or removing...
3145
3146 rlwinm. r0,r7,0,mpNestb,mpNestb ; Are we nested?
3147 mr r26,r7 ; Get the flags for this mapping (passed back from search call)
3148
3149 lhz r21,mpSpace(r31) ; Get the space
3150
3151 beq++ hpfFoundIt ; No, we found our guy...
3152
3153
3154#if pmapTransSize != 12
3155#error pmapTrans entry size is not 12 bytes!!!!!!!!!!!! It is pmapTransSize
3156#endif
3157 rlwinm. r0,r26,0,mpSpecialb,mpSpecialb ; Special handling?
3158 cmplwi cr1,r20,0 ; Too many nestings?
3159 bne-- hpfSpclNest ; Do we need to do special handling?
3160
3161hpfCSrch: lhz r21,mpSpace(r31) ; Get the space
3162 lwz r8,mpNestReloc(r31) ; Get the vaddr relocation
3163 lwz r9,mpNestReloc+4(r31) ; Get the vaddr relocation bottom half
3164 la r3,pmapSXlk(r28) ; Point to the old pmap search lock
3165 lis r0,0x8000 ; Get 0xFFFFFFFF80000000
3166 lis r10,hi16(EXT(pmapTrans)) ; Get the translate table
3167 add r0,r0,r0 ; Get 0xFFFFFFFF00000000 for 64-bit or 0 for 32-bit
3168 blt-- cr1,hpfNestTooMuch ; Too many nestings, must be a loop...
3169 or r23,r23,r0 ; Make sure a carry will propagate all the way in 64-bit
3170 slwi r11,r21,3 ; Multiply space by 8
3171 ori r10,r10,lo16(EXT(pmapTrans)) ; Get the translate table low part
3172 addc r23,r23,r9 ; Relocate bottom half of vaddr
3173 lwz r10,0(r10) ; Get the actual translation map
3174 slwi r12,r21,2 ; Multiply space by 4
3175 add r10,r10,r11 ; Add in the higher part of the index
3176 rlwinm r23,r23,0,0,31 ; Clean up the relocated address (does nothing in 32-bit)
3177 adde r22,r22,r8 ; Relocate the top half of the vaddr
3178 add r12,r12,r10 ; Now we are pointing at the space to pmap translation entry
3179 bl sxlkUnlock ; Unlock the search list
3180
3181 lwz r28,pmapPAddr+4(r12) ; Get the physical address of the new pmap
3182 bf-- pf64Bitb,hpfNest ; Done if 32-bit...
3183
3184 ld r28,pmapPAddr(r12) ; Get the physical address of the new pmap
3185 b hpfNest ; Go try the new pmap...
3186
3187;
3188; Error condition. We only allow 64 nestings. This keeps us from having to
3189; check for recusive nests when we install them.
3190;
3191
3192 .align 5
3193
3194hpfNestTooMuch:
3195 lwz r20,savedsisr(r13) ; Get the DSISR
3196 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3197 bl sxlkUnlock ; Unlock the search list (R3 good from above)
3198 ori r20,r20,1 ; Indicate that there was a nesting problem
3199 stw r20,savedsisr(r13) ; Stash it
3200 lwz r11,saveexception(r13) ; Restore the exception code
3201 b EXT(PFSExit) ; Yes... (probably not though)
3202
3203;
3204; Error condition - lock failed - this is fatal
3205;
3206
3207 .align 5
3208
3209hpfBadLock:
3210 lis r0,hi16(Choke) ; System abend
3211 ori r0,r0,lo16(Choke) ; System abend
3212 li r3,failMapping ; Show mapping failure
3213 sc
3214;
3215; Did not find any kind of mapping
3216;
3217
3218 .align 5
3219
3220hpfNotFound:
3221 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3222 bl sxlkUnlock ; Unlock it
3223 lwz r11,saveexception(r13) ; Restore the exception code
3224
3225hpfExit: ; We need this because we can not do a relative branch
3226 b EXT(PFSExit) ; Yes... (probably not though)
3227
3228
3229;
3230; Here is where we handle special mappings. So far, the only use is to load a
3231; processor specific segment register for copy in/out handling.
3232;
3233; The only (so far implemented) special map is used for copyin/copyout.
3234; We keep a mapping of a "linkage" mapping in the per_proc.
3235; The linkage mapping is basically a nested pmap that is switched in
3236; as part of context switch. It relocates the appropriate user address
3237; space slice into the right place in the kernel.
3238;
3239
3240 .align 5
3241
3242hpfSpclNest:
3243 la r31,ppCIOmp(r19) ; Just point to the mapping
3244 oris r27,r27,hi16(dsiSpcNest) ; Show that we had a special nesting here
3245 b hpfCSrch ; Go continue search...
3246
3247
3248;
3249; We have now found a mapping for the address we faulted on.
3250;
3251
3252;
3253; Here we go about calculating what the VSID should be. We concatanate
3254; the space ID (14 bits wide) 3 times. We then slide the vaddr over
3255; so that bits 0:35 are in 14:49 (leaves a hole for one copy of the space ID).
3256; Then we XOR and expanded space ID and the shifted vaddr. This gives us
3257; the VSID.
3258;
3259; This is used both for segment handling and PTE handling
3260;
3261
3262
3263#if maxAdrSpb != 14
3264#error maxAdrSpb (address space id size) is not 14 bits!!!!!!!!!!!!
3265#endif
3266
3267 .align 5
3268
3269hpfFoundIt: lwz r12,pmapFlags(r28) ; Get the pmap flags so we can find the keys for this segment
3270 rlwinm. r0,r27,0,dsiMissb,dsiMissb ; Did we actually miss the segment?
3271 rlwinm r15,r23,18,14,17 ; Shift 32:35 (0:3) of vaddr just above space ID
3272 rlwinm r20,r21,28,22,31 ; Shift upper 10 bits of space into high order
3273 rlwinm r14,r22,18,14,31 ; Shift 0:17 of vaddr over
3274 rlwinm r0,r27,0,dsiSpcNestb,dsiSpcNestb ; Isolate special nest flag
3275 rlwimi r21,r21,14,4,17 ; Make a second copy of space above first
3276 cmplwi cr5,r0,0 ; Did we just do a special nesting?
3277 rlwimi r15,r22,18,0,13 ; Shift 18:31 of vaddr just above shifted 32:35
3278 crorc cr0_eq,cr0_eq,cr5_eq ; Force outselves through the seg load code if special nest
3279 rlwimi r21,r21,28,0,3 ; Get low order of 3rd copy of space at top of register
3280 xor r14,r14,r20 ; Calculate the top half of VSID
3281 xor r15,r15,r21 ; Calculate the bottom half of the VSID
3282 rlwinm r14,r14,12,15,19 ; Slide the top of the VSID over to correct position (trim for 65 bit addressing)
3283 rlwinm r12,r12,9,20,22 ; Isolate and position key for cache entry
3284 rlwimi r14,r15,12,20,31 ; Slide top of bottom of VSID over into the top
3285 rlwinm r15,r15,12,0,19 ; Slide the last nybble into the low order segment position
3286 or r12,r12,r15 ; Add key into the bottom of VSID
3287;
3288; Note: ESID is in R22:R23 pair; VSID is in R14:R15; cache form VSID is R14:R12
3289
3290 bne++ hpfPteMiss ; Nope, normal PTE miss...
3291
3292;
3293; Here is the only place that we make an entry in the pmap segment cache.
3294;
3295; Note that we do not make an entry in the segment cache for special
3296; nested mappings. This makes the copy in/out segment get refreshed
3297; when switching threads.
3298;
3299; The first thing that we do is to look up the ESID we are going to load
3300; into a segment in the pmap cache. If it is already there, this is
3301; a segment that appeared since the last time we switched address spaces.
3302; If all is correct, then it was another processors that made the cache
3303; entry. If not, well, it is an error that we should die on, but I have
3304; not figured a good way to trap it yet.
3305;
3306; If we get a hit, we just bail, otherwise, lock the pmap cache, select
3307; an entry based on the generation number, update the cache entry, and
3308; also update the pmap sub-tag as well. The sub-tag is a table of 4 bit
3309; entries that correspond to the last 4 bits (32:35 for 64-bit and
3310; 0:3 for 32-bit) of the ESID.
3311;
3312; Then we unlock and bail.
3313;
3314; First lock it. Then select a free slot or steal one based on the generation
3315; number. Then store it, update the allocation flags, and unlock.
3316;
3317; The cache entry contains an image of the ESID/VSID pair we would load for
3318; 64-bit architecture. For 32-bit, it is a simple transform to an SR image.
3319;
3320; Remember, this cache entry goes in the ORIGINAL pmap (saved in R25), not
3321; the current one, which may have changed because we nested.
3322;
3323; Also remember that we do not store the valid bit in the ESID. If we
3324; od, this will break some other stuff.
3325;
3326
3327 bne-- cr5,hpfNoCacheEnt2 ; Skip the cache entry if this is a "special nest" fault....
3328
3329 mr r3,r25 ; Point to the pmap
3330 mr r4,r22 ; ESID high half
3331 mr r5,r23 ; ESID low half
3332 bl pmapCacheLookup ; Go see if this is in the cache already
3333
3334 mr. r3,r3 ; Did we find it?
3335 mr r4,r11 ; Copy this to a different register
3336
3337 bne-- hpfNoCacheEnt ; Yes, we found it, no need to make another entry...
3338
3339 lwz r10,pmapSCSubTag(r25) ; Get the first part of the sub-tag lookup table
3340 lwz r11,pmapSCSubTag+4(r25) ; Get the second part of the sub-tag lookup table
3341
3342 cntlzw r7,r4 ; Find a free slot
3343
3344 subi r6,r7,pmapSegCacheUse ; We end up with a negative if we find one
3345 rlwinm r30,r30,0,0,3 ; Clean up the ESID
3346 srawi r6,r6,31 ; Get 0xFFFFFFFF if we have one, 0 if not
3347 addi r5,r4,1 ; Bump the generation number
3348 and r7,r7,r6 ; Clear bit number if none empty
3349 andc r8,r4,r6 ; Clear generation count if we found an empty
3350 rlwimi r4,r5,0,17,31 ; Insert the new generation number into the control word
3351 or r7,r7,r8 ; Select a slot number
3352 li r8,0 ; Clear
3353 andi. r7,r7,pmapSegCacheUse-1 ; Wrap into the number we are using
3354 oris r8,r8,0x8000 ; Get the high bit on
3355 la r9,pmapSegCache(r25) ; Point to the segment cache
3356 slwi r6,r7,4 ; Get index into the segment cache
3357 slwi r2,r7,2 ; Get index into the segment cache sub-tag index
3358 srw r8,r8,r7 ; Get the mask
3359 cmplwi r2,32 ; See if we are in the first or second half of sub-tag
3360 li r0,0 ; Clear
3361 rlwinm r2,r2,0,27,31 ; Wrap shift so we do not shift cache entries 8-F out
3362 oris r0,r0,0xF000 ; Get the sub-tag mask
3363 add r9,r9,r6 ; Point to the cache slot
3364 srw r0,r0,r2 ; Slide sub-tag mask to right slot (shift work for either half)
3365 srw r5,r30,r2 ; Slide sub-tag to right slot (shift work for either half)
3366
3367 stw r29,sgcESID(r9) ; Save the top of the ESID
3368 andc r10,r10,r0 ; Clear sub-tag slot in case we are in top
3369 andc r11,r11,r0 ; Clear sub-tag slot in case we are in bottom
3370 stw r30,sgcESID+4(r9) ; Save the bottom of the ESID
3371 or r10,r10,r5 ; Stick in subtag in case top half
3372 or r11,r11,r5 ; Stick in subtag in case bottom half
3373 stw r14,sgcVSID(r9) ; Save the top of the VSID
3374 andc r4,r4,r8 ; Clear the invalid bit for the slot we just allocated
3375 stw r12,sgcVSID+4(r9) ; Save the bottom of the VSID and the key
3376 bge hpfSCSTbottom ; Go save the bottom part of sub-tag
3377
3378 stw r10,pmapSCSubTag(r25) ; Save the top of the sub-tag
3379 b hpfNoCacheEnt ; Go finish up...
3380
3381hpfSCSTbottom:
3382 stw r11,pmapSCSubTag+4(r25) ; Save the bottom of the sub-tag
3383
3384
3385hpfNoCacheEnt:
3386 eieio ; Make sure cache is updated before lock
3387 stw r4,pmapCCtl(r25) ; Unlock, allocate, and bump generation number
3388
3389
3390hpfNoCacheEnt2:
3391 lwz r4,ppMapFlags(r19) ; Get the protection key modifier
3392 bt++ pf64Bitb,hpfLoadSeg64 ; If 64-bit, go load the segment...
3393
3394;
3395; Make and enter 32-bit segment register
3396;
3397
3398 lwz r16,validSegs(r19) ; Get the valid SR flags
3399 xor r12,r12,r4 ; Alter the storage key before loading segment register
3400 rlwinm r2,r30,4,28,31 ; Isolate the segment we are setting
3401 rlwinm r6,r12,19,1,3 ; Insert the keys and N bit
3402 lis r0,0x8000 ; Set bit 0
3403 rlwimi r6,r12,20,12,31 ; Insert 4:23 the VSID
3404 srw r0,r0,r2 ; Get bit corresponding to SR
3405 rlwimi r6,r14,20,8,11 ; Get the last nybble of the SR contents
3406 or r16,r16,r0 ; Show that SR is valid
3407
3408 mtsrin r6,r30 ; Set the actual SR
3409
3410 stw r16,validSegs(r19) ; Set the valid SR flags
3411
3412 b hpfPteMiss ; SR loaded, go do a PTE...
3413
3414;
3415; Make and enter 64-bit segment look-aside buffer entry.
3416; Note that the cache entry is the right format except for valid bit.
3417; We also need to convert from long long to 64-bit register values.
3418;
3419
3420
3421 .align 5
3422
3423hpfLoadSeg64:
3424 ld r16,validSegs(r19) ; Get the valid SLB entry flags
3425 sldi r8,r29,32 ; Move high order address over
3426 sldi r10,r14,32 ; Move high part of VSID over
3427
3428 not r3,r16 ; Make valids be 0s
3429 li r0,1 ; Prepare to set bit 0
3430
3431 cntlzd r17,r3 ; Find a free SLB
3432 xor r12,r12,r4 ; Alter the storage key before loading segment table entry
3433 or r9,r8,r30 ; Form full 64-bit address
3434 cmplwi r17,63 ; Did we find a free SLB entry?
3435 sldi r0,r0,63 ; Get bit 0 set
3436 or r10,r10,r12 ; Move in low part and keys
3437 addi r17,r17,1 ; Skip SLB 0 always
3438 blt++ hpfFreeSeg ; Yes, go load it...
3439
3440;
3441; No free SLB entries, select one that is in use and invalidate it
3442;
3443 lwz r4,ppSegSteal(r19) ; Get the next slot to steal
3444 addi r17,r4,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
3445 addi r4,r4,1 ; Set next slot to steal
3446 slbmfee r7,r17 ; Get the entry that is in the selected spot
3447 subi r2,r4,63-pmapSegCacheUse ; Force steal to wrap
3448 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
3449 srawi r2,r2,31 ; Get -1 if steal index still in range
3450 slbie r7 ; Invalidate the in-use SLB entry
3451 and r4,r4,r2 ; Reset steal index when it should wrap
3452 isync ;
3453
3454 stw r4,ppSegSteal(r19) ; Set the next slot to steal
3455;
3456; We are now ready to stick the SLB entry in the SLB and mark it in use
3457;
3458
3459hpfFreeSeg:
3460 subi r4,r17,1 ; Adjust shift to account for skipping slb 0
3461 mr r7,r9 ; Get a copy of the ESID with bits 36:63 clear
3462 srd r0,r0,r4 ; Set bit mask for allocation
3463 oris r9,r9,0x0800 ; Turn on the valid bit
3464 or r16,r16,r0 ; Turn on the allocation flag
3465 rldimi r9,r17,0,58 ; Copy in the SLB entry selector
3466
3467 beq++ cr5,hpfNoBlow ; Skip blowing away the SLBE if this is not a special nest...
3468 slbie r7 ; Blow away a potential duplicate
3469
3470hpfNoBlow: slbmte r10,r9 ; Make that SLB entry
3471
3472 std r16,validSegs(r19) ; Mark as valid
3473 b hpfPteMiss ; STE loaded, go do a PTE...
3474
3475;
3476; The segment has been set up and loaded if need be. Now we are ready to build the
3477; PTE and get it into the hash table.
3478;
3479; Note that there is actually a race here. If we start fault processing on
3480; a different pmap, i.e., we have descended into a nested pmap, it is possible
3481; that the nest could have been removed from the original pmap. We would
3482; succeed with this translation anyway. I do not think we need to worry
3483; about this (famous last words) because nobody should be unnesting anything
3484; if there are still people activily using them. It should be up to the
3485; higher level VM system to put the kibosh on this.
3486;
3487; There is also another race here: if we fault on the same mapping on more than
3488; one processor at the same time, we could end up with multiple PTEs for the same
3489; mapping. This is not a good thing.... We really only need one of the
3490; fault handlers to finish, so what we do is to set a "fault in progress" flag in
3491; the mapping. If we see that set, we just abandon the handler and hope that by
3492; the time we restore context and restart the interrupted code, the fault has
3493; been resolved by the other guy. If not, we will take another fault.
3494;
3495
3496;
3497; NOTE: IMPORTANT - CR7 contains a flag indicating if we have a block mapping or not.
3498; It is required to stay there until after we call mapSelSlot!!!!
3499;
3500
3501 .align 5
3502
3503hpfPteMiss: lwarx r0,0,r31 ; Load the mapping flag field
3504 lwz r12,mpPte(r31) ; Get the quick pointer to PTE
3505 li r3,mpHValid ; Get the PTE valid bit
3506 andi. r2,r0,lo16(mpFIP) ; Are we handling a fault on the other side?
3507 ori r2,r0,lo16(mpFIP) ; Set the fault in progress flag
3508 crnot cr1_eq,cr0_eq ; Remember if FIP was on
3509 and. r12,r12,r3 ; Isolate the valid bit
3510 crorc cr0_eq,cr1_eq,cr0_eq ; Bail if FIP is on. Then, if already have PTE, bail...
3511 beq-- hpfAbandon ; Yes, other processor is or already has handled this...
3512 andi. r0,r2,mpBlock ; Is this a block mapping?
3513 crmove cr7_eq,cr0_eq ; Remember if we have a block mapping
3514 stwcx. r2,0,r31 ; Store the flags
3515 bne-- hpfPteMiss ; Collision, try again...
3516
3517 bt++ pf64Bitb,hpfBldPTE64 ; Skip down to the 64 bit stuff...
3518
3519;
3520; At this point we are about to do the 32-bit PTE generation.
3521;
3522; The following is the R14:R15 pair that contains the "shifted" VSID:
3523;
3524; 1 2 3 4 4 5 6
3525; 0 8 6 4 2 0 8 6 3
3526; +--------+--------+--------+--------+--------+--------+--------+--------+
3527; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
3528; +--------+--------+--------+--------+--------+--------+--------+--------+
3529;
3530; The 24 bits of the 32-bit architecture VSID is in the following:
3531;
3532; 1 2 3 4 4 5 6
3533; 0 8 6 4 2 0 8 6 3
3534; +--------+--------+--------+--------+--------+--------+--------+--------+
3535; |////////|////////|////////|////VVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
3536; +--------+--------+--------+--------+--------+--------+--------+--------+
3537;
3538
3539
3540hpfBldPTE32:
3541 lwz r25,mpVAddr+4(r31) ; Grab the base virtual address for the mapping (32-bit portion)
3542 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
3543
3544 mfsdr1 r27 ; Get the hash table base address
3545
3546 rlwinm r0,r23,0,4,19 ; Isolate just the page index
3547 rlwinm r18,r23,10,26,31 ; Extract the API
3548 xor r19,r15,r0 ; Calculate hash << 12
3549 mr r2,r25 ; Save the flag part of the mapping
3550 rlwimi r18,r14,27,1,4 ; Move bits 28:31 of the "shifted" VSID into the PTE image
3551 rlwinm r16,r27,16,7,15 ; Extract the hash table size
3552 rlwinm r25,r25,0,0,19 ; Clear out the flags
3553 slwi r24,r24,12 ; Change ppnum to physical address (note: 36-bit addressing no supported)
3554 sub r25,r23,r25 ; Get offset in mapping to page (0 unless block map)
3555 ori r16,r16,lo16(0xFFC0) ; Slap in the bottom of the mask
3556 rlwinm r27,r27,0,0,15 ; Extract the hash table base
3557 rlwinm r19,r19,26,6,25 ; Shift hash over to make offset into hash table
3558 add r24,r24,r25 ; Adjust to true physical address
3559 rlwimi r18,r15,27,5,24 ; Move bits 32:31 of the "shifted" VSID into the PTE image
3560 rlwimi r24,r2,0,20,31 ; Slap in the WIMG and prot
3561 and r19,r19,r16 ; Wrap hash table offset into the hash table
3562 ori r24,r24,lo16(mpR) ; Turn on the reference bit right now
3563 rlwinm r20,r19,28,10,29 ; Shift hash over to make offset into PCA
3564 add r19,r19,r27 ; Point to the PTEG
3565 subfic r20,r20,-4 ; Get negative offset to PCA
3566 oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
3567 add r20,r20,r27 ; Point to the PCA slot
3568
3569;
3570; We now have a valid PTE pair in R18/R24. R18 is PTE upper and R24 is PTE lower.
3571; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
3572;
3573; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
3574; that some other processor beat us and stuck in a PTE or that
3575; all we had was a simple segment exception and the PTE was there the whole time.
3576; If we find one a pointer, we are done.
3577;
3578
3579 mr r7,r20 ; Copy the PCA pointer
3580 bl mapLockPteg ; Lock the PTEG
3581
3582 lwz r12,mpPte(r31) ; Get the offset to the PTE
3583 mr r17,r6 ; Remember the PCA image
3584 mr r16,r6 ; Prime the post-select PCA image
3585 andi. r0,r12,mpHValid ; Is there a PTE here already?
3586 li r21,8 ; Get the number of slots
3587
3588 bne- cr7,hpfNoPte32 ; Skip this for a block mapping...
3589
3590 bne- hpfBailOut ; Someone already did this for us...
3591
3592;
3593; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
3594; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
3595; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
3596; R4 returns the slot index.
3597;
3598; REMEMBER: CR7 indicates that we are building a block mapping.
3599;
3600
3601hpfNoPte32: subic. r21,r21,1 ; See if we have tried all slots
3602 mr r6,r17 ; Get back the original PCA
3603 rlwimi r6,r16,0,8,15 ; Insert the updated steal slot
3604 blt- hpfBailOut ; Holy Cow, all slots are locked...
3605
3606 bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
3607
3608 cmplwi cr5,r3,1 ; Did we steal a slot?
3609 rlwinm r5,r4,3,26,28 ; Convert index to slot offset
3610 add r19,r19,r5 ; Point directly to the PTE
3611 mr r16,r6 ; Remember the PCA image after selection
3612 blt+ cr5,hpfInser32 ; Nope, no steal...
3613
3614 lwz r6,0(r19) ; Get the old PTE
3615 lwz r7,4(r19) ; Get the real part of the stealee
3616 rlwinm r6,r6,0,1,31 ; Clear the valid bit
3617 bgt cr5,hpfNipBM ; Do not try to lock a non-existant physent for a block mapping...
3618 srwi r3,r7,12 ; Change phys address to a ppnum
3619 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
3620 cmplwi cr1,r3,0 ; Check if this is in RAM
3621 bne- hpfNoPte32 ; Could not get it, try for another...
3622
3623 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
3624
3625hpfNipBM: stw r6,0(r19) ; Set the invalid PTE
3626
3627 sync ; Make sure the invalid is stored
3628 li r9,tlbieLock ; Get the TLBIE lock
3629 rlwinm r10,r6,21,0,3 ; Shift last 4 bits of space to segment part
3630
3631hpfTLBIE32: lwarx r0,0,r9 ; Get the TLBIE lock
3632 mfsprg r4,0 ; Get the per_proc
3633 rlwinm r8,r6,25,18,31 ; Extract the space ID
3634 rlwinm r11,r6,25,18,31 ; Extract the space ID
3635 lwz r7,hwSteals(r4) ; Get the steal count
3636 srwi r2,r6,7 ; Align segment number with hash
3637 rlwimi r11,r11,14,4,17 ; Get copy above ourselves
3638 mr. r0,r0 ; Is it locked?
3639 srwi r0,r19,6 ; Align PTEG offset for back hash
3640 xor r2,r2,r11 ; Get the segment number (plus a whole bunch of extra bits)
3641 xor r11,r11,r0 ; Hash backwards to partial vaddr
3642 rlwinm r12,r2,14,0,3 ; Shift segment up
3643 mfsprg r2,2 ; Get feature flags
3644 li r0,1 ; Get our lock word
3645 rlwimi r12,r6,22,4,9 ; Move up the API
3646 bne- hpfTLBIE32 ; It is locked, go wait...
3647 rlwimi r12,r11,12,10,19 ; Move in the rest of the vaddr
3648
3649 stwcx. r0,0,r9 ; Try to get it
3650 bne- hpfTLBIE32 ; We was beat...
3651 addi r7,r7,1 ; Bump the steal count
3652
3653 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
3654 li r0,0 ; Lock clear value
3655
3656 tlbie r12 ; Invalidate it everywhere
3657
3658 stw r0,tlbieLock(0) ; Clear the tlbie lock
3659
3660 beq- hpfNoTS32 ; Can not have MP on this machine...
3661
3662 eieio ; Make sure that the tlbie happens first
3663 tlbsync ; Wait for everyone to catch up
3664 sync ; Make sure of it all
3665
3666hpfNoTS32: stw r7,hwSteals(r4) ; Save the steal count
3667 bgt cr5,hpfInser32 ; We just stole a block mapping...
3668
3669 lwz r4,4(r19) ; Get the RC of the just invalidated PTE
3670
3671 la r11,ppLink+4(r3) ; Point to the master RC copy
3672 lwz r7,ppLink+4(r3) ; Grab the pointer to the first mapping
3673 rlwinm r2,r4,27,ppRb-32,ppCb-32 ; Position the new RC
3674
3675hpfMrgRC32: lwarx r0,0,r11 ; Get the master RC
3676 or r0,r0,r2 ; Merge in the new RC
3677 stwcx. r0,0,r11 ; Try to stick it back
3678 bne- hpfMrgRC32 ; Try again if we collided...
3679
3680
3681hpfFPnch: rlwinm. r7,r7,0,0,25 ; Clean and test mapping address
3682 beq- hpfLostPhys ; We could not find our mapping. Kick the bucket...
3683
3684 lhz r10,mpSpace(r7) ; Get the space
3685 lwz r9,mpVAddr+4(r7) ; And the vaddr
3686 cmplw cr1,r10,r8 ; Is this one of ours?
3687 xor r9,r12,r9 ; Compare virtual address
3688 cmplwi r9,0x1000 ; See if we really match
3689 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
3690 beq+ hpfFPnch2 ; Yes, found ours...
3691
3692 lwz r7,mpAlias+4(r7) ; Chain on to the next
3693 b hpfFPnch ; Check it out...
3694
3695hpfFPnch2: sub r0,r19,r27 ; Get offset to the PTEG
3696 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep quick pointer pointing to PTEG)
3697 bl mapPhysUnlock ; Unlock the physent now
3698
3699hpfInser32: oris r18,r18,lo16(0x8000) ; Make sure the valid bit is on
3700
3701 stw r24,4(r19) ; Stuff in the real part of the PTE
3702 eieio ; Make sure this gets there first
3703
3704 stw r18,0(r19) ; Stuff the virtual part of the PTE and make it valid
3705 mr r17,r16 ; Get the PCA image to save
3706 b hpfFinish ; Go join the common exit code...
3707
3708
3709;
3710; At this point we are about to do the 64-bit PTE generation.
3711;
3712; The following is the R14:R15 pair that contains the "shifted" VSID:
3713;
3714; 1 2 3 4 4 5 6
3715; 0 8 6 4 2 0 8 6 3
3716; +--------+--------+--------+--------+--------+--------+--------+--------+
3717; |00000000|0000000V|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVVVVVV|VVVV////|////////|
3718; +--------+--------+--------+--------+--------+--------+--------+--------+
3719;
3720;
3721
3722 .align 5
3723
3724hpfBldPTE64:
3725 ld r10,mpVAddr(r31) ; Grab the base virtual address for the mapping
3726 lwz r24,mpPAddr(r31) ; Grab the base physical page number for the mapping
3727
3728 mfsdr1 r27 ; Get the hash table base address
3729
3730 sldi r11,r22,32 ; Slide top of adjusted EA over
3731 sldi r14,r14,32 ; Slide top of VSID over
3732 rlwinm r5,r27,0,27,31 ; Isolate the size
3733 eqv r16,r16,r16 ; Get all foxes here
3734 rlwimi r15,r23,16,20,24 ; Stick in EA[36:40] to make AVPN
3735 mr r2,r10 ; Save the flag part of the mapping
3736 or r11,r11,r23 ; Stick in bottom of adjusted EA for full 64-bit value
3737 rldicr r27,r27,0,45 ; Clean up the hash table base
3738 or r15,r15,r14 ; Stick in bottom of AVPN for full 64-bit value
3739 rlwinm r0,r11,0,4,19 ; Clear out everything but the page
3740 subfic r5,r5,46 ; Get number of leading zeros
3741 xor r19,r0,r15 ; Calculate hash
3742 ori r15,r15,1 ; Turn on valid bit in AVPN to make top of PTE
3743 srd r16,r16,r5 ; Shift over to get length of table
3744 srdi r19,r19,5 ; Convert page offset to hash table offset
3745 rldicr r16,r16,0,56 ; Clean up lower bits in hash table size
3746 rldicr r10,r10,0,51 ; Clear out flags
3747 sldi r24,r24,12 ; Change ppnum to physical address
3748 sub r11,r11,r10 ; Get the offset from the base mapping
3749 and r19,r19,r16 ; Wrap into hash table
3750 add r24,r24,r11 ; Get actual physical address of this page
3751 srdi r20,r19,5 ; Convert PTEG offset to PCA offset
3752 rldimi r24,r2,0,52 ; Insert the keys, WIMG, RC, etc.
3753 subfic r20,r20,-4 ; Get negative offset to PCA
3754 ori r24,r24,lo16(mpR) ; Force on the reference bit
3755 add r20,r20,r27 ; Point to the PCA slot
3756 add r19,r19,r27 ; Point to the PTEG
3757
3758;
3759; We now have a valid PTE pair in R15/R24. R15 is PTE upper and R24 is PTE lower.
3760; R19 contains the offset of the PTEG in the hash table. R20 has offset into the PCA.
3761;
3762; We need to check PTE pointer (mpPte) again after we lock the PTEG. It is possible
3763; that some other processor beat us and stuck in a PTE or that
3764; all we had was a simple segment exception and the PTE was there the whole time.
3765; If we find one a pointer, we are done.
3766;
3767
3768 mr r7,r20 ; Copy the PCA pointer
3769 bl mapLockPteg ; Lock the PTEG
3770
3771 lwz r12,mpPte(r31) ; Get the offset to the PTE
3772 mr r17,r6 ; Remember the PCA image
3773 mr r18,r6 ; Prime post-selection PCA image
3774 andi. r0,r12,mpHValid ; See if we have a PTE now
3775 li r21,8 ; Get the number of slots
3776
3777 bne-- cr7,hpfNoPte64 ; Skip this for a block mapping...
3778
3779 bne-- hpfBailOut ; Someone already did this for us...
3780
3781;
3782; The mapSelSlot function selects a PTEG slot to use. As input, it uses R3 as a
3783; pointer to the PCA. When it returns, R3 contains 0 if an unoccupied slot was
3784; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
3785; R4 returns the slot index.
3786;
3787; REMEMBER: CR7 indicates that we are building a block mapping.
3788;
3789
3790hpfNoPte64: subic. r21,r21,1 ; See if we have tried all slots
3791 mr r6,r17 ; Restore original state of PCA
3792 rlwimi r6,r18,0,8,15 ; Insert the updated steal slot
3793 blt- hpfBailOut ; Holy Cow, all slots are locked...
3794
3795 bl mapSelSlot ; Go select a slot
3796
3797 cmplwi cr5,r3,1 ; Did we steal a slot?
3798 rlwinm r5,r4,4,25,27 ; Convert index to slot offset
3799 mr r18,r6 ; Remember the PCA image after selection
3800 add r19,r19,r5 ; Point directly to the PTE
3801 lwz r10,hwSteals(r2) ; Get the steal count
3802 blt++ cr5,hpfInser64 ; Nope, no steal...
3803
3804 ld r6,0(r19) ; Get the old PTE
3805 ld r7,8(r19) ; Get the real part of the stealee
3806 rldicr r6,r6,0,62 ; Clear the valid bit
3807 bgt cr5,hpfNipBMx ; Do not try to lock a non-existant physent for a block mapping...
3808 srdi r3,r7,12 ; Change page address to a page address
3809 bl mapFindPhyTry ; Go find and try to lock physent (note: if R3 is 0, there is no physent for this page)
3810 cmplwi cr1,r3,0 ; Check if this is in RAM
3811 bne-- hpfNoPte64 ; Could not get it, try for another...
3812
3813 crmove cr5_gt,cr1_eq ; If we did not find a physent, pretend that this is a block map
3814
3815hpfNipBMx: std r6,0(r19) ; Set the invalid PTE
3816 li r9,tlbieLock ; Get the TLBIE lock
3817
3818 srdi r11,r6,5 ; Shift VSID over for back hash
3819 mfsprg r4,0 ; Get the per_proc
3820 xor r11,r11,r19 ; Hash backwards to get low bits of VPN
3821 sync ; Make sure the invalid is stored
3822
3823 sldi r12,r6,16 ; Move AVPN to EA position
3824 sldi r11,r11,5 ; Move this to the page position
3825
3826hpfTLBIE64: lwarx r0,0,r9 ; Get the TLBIE lock
3827 mr. r0,r0 ; Is it locked?
3828 li r0,1 ; Get our lock word
3829 bne-- hpfTLBIE65 ; It is locked, go wait...
3830
3831 stwcx. r0,0,r9 ; Try to get it
3832 rldimi r12,r11,0,41 ; Stick the low part of the page number into the AVPN
3833 rldicl r8,r6,52,50 ; Isolate the address space ID
3834 bne-- hpfTLBIE64 ; We was beat...
3835 addi r10,r10,1 ; Bump the steal count
3836
3837 rldicl r11,r12,0,16 ; Clear cause the book says so
3838 li r0,0 ; Lock clear value
3839
3840 tlbie r11 ; Invalidate it everywhere
3841
3842 stw r0,tlbieLock(0) ; Clear the tlbie lock
3843
3844 mr r7,r8 ; Get a copy of the space ID
3845 eieio ; Make sure that the tlbie happens first
3846 rldimi r7,r7,14,36 ; Copy address space to make hash value
3847 tlbsync ; Wait for everyone to catch up
3848 rldimi r7,r7,28,22 ; Add in a 3rd copy of the hash up top
3849 isync
3850 srdi r2,r6,26 ; Shift original segment down to bottom
3851
3852 ptesync ; Make sure of it all
3853 xor r7,r7,r2 ; Compute original segment
3854
3855 stw r10,hwSteals(r4) ; Save the steal count
3856 bgt cr5,hpfInser64 ; We just stole a block mapping...
3857
3858 rldimi r12,r7,28,0 ; Insert decoded segment
3859 rldicl r4,r12,0,13 ; Trim to max supported address
3860
3861 ld r12,8(r19) ; Get the RC of the just invalidated PTE
3862
3863 la r11,ppLink+4(r3) ; Point to the master RC copy
3864 ld r7,ppLink(r3) ; Grab the pointer to the first mapping
3865 rlwinm r2,r12,27,ppRb-32,ppCb-32 ; Position the new RC
3866
3867hpfMrgRC64: lwarx r0,0,r11 ; Get the master RC
3868 li r12,0xFF ; Get mask to clean up alias pointer
3869 or r0,r0,r2 ; Merge in the new RC
3870 rldicl r12,r12,62,0 ; Rotate clean up mask to get 0xC0000000000000003F
3871 stwcx. r0,0,r11 ; Try to stick it back
3872 bne-- hpfMrgRC64 ; Try again if we collided...
3873
3874hpfFPnchx: andc. r7,r7,r12 ; Clean and test mapping address
3875 beq-- hpfLostPhys ; We could not find our mapping. Kick the bucket...
3876
3877 lhz r10,mpSpace(r7) ; Get the space
3878 ld r9,mpVAddr(r7) ; And the vaddr
3879 cmplw cr1,r10,r8 ; Is this one of ours?
3880 xor r9,r4,r9 ; Compare virtual address
3881 cmpldi r9,0x1000 ; See if we really match
3882 crand cr0_eq,cr1_eq,cr0_lt ; See if both space and vaddr match
3883 beq++ hpfFPnch2x ; Yes, found ours...
3884
3885 ld r7,mpAlias(r7) ; Chain on to the next
3886 b hpfFPnchx ; Check it out...
3887
3888 .align 5
3889
3890hpfTLBIE65: li r7,lgKillResv ; Point to the reservatio kill area
3891 stwcx. r7,0,r7 ; Kill reservation
3892
3893hpfTLBIE63: lwz r0,0(r9) ; Get the TLBIE lock
3894 mr. r0,r0 ; Is it locked?
3895 beq++ hpfTLBIE64 ; Yup, wait for it...
3896 b hpfTLBIE63 ; Nope, try again..
3897
3898
3899
3900hpfFPnch2x: sub r0,r19,r27 ; Get offset to PTEG
3901 stw r0,mpPte(r7) ; Invalidate the quick pointer (keep pointing at PTEG though)
3902 bl mapPhysUnlock ; Unlock the physent now
3903
3904
3905hpfInser64: std r24,8(r19) ; Stuff in the real part of the PTE
3906 eieio ; Make sure this gets there first
3907 std r15,0(r19) ; Stuff the virtual part of the PTE and make it valid
3908 mr r17,r18 ; Get the PCA image to set
3909 b hpfFinish ; Go join the common exit code...
3910
3911hpfLostPhys:
3912 lis r0,hi16(Choke) ; System abend - we must find the stolen mapping or we are dead
3913 ori r0,r0,lo16(Choke) ; System abend
3914 sc
3915
3916;
3917; This is the common code we execute when we are finished setting up the PTE.
3918;
3919
3920 .align 5
3921
3922hpfFinish: sub r4,r19,r27 ; Get offset of PTE
3923 ori r4,r4,lo16(mpHValid) ; Add valid bit to PTE offset
3924 bne cr7,hpfBailOut ; Do not set the PTE pointer for a block map
3925 stw r4,mpPte(r31) ; Remember our PTE
3926
3927hpfBailOut: eieio ; Make sure all updates come first
3928 stw r17,0(r20) ; Unlock and set the final PCA
3929
3930;
3931; This is where we go if we have started processing the fault, but find that someone
3932; else has taken care of it.
3933;
3934
3935hpfIgnore: lwz r2,mpFlags(r31) ; Get the mapping flags
3936 rlwinm r2,r2,0,mpFIPb+1,mpFIPb-1 ; Clear the "fault in progress" flag
3937 sth r2,mpFlags+2(r31) ; Set it
3938
3939 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3940 bl sxlkUnlock ; Unlock the search list
3941
3942 li r11,T_IN_VAIN ; Say that it was handled
3943 b EXT(PFSExit) ; Leave...
3944
3945;
3946; This is where we go when we find that someone else
3947; is in the process of handling the fault.
3948;
3949
3950hpfAbandon: li r3,lgKillResv ; Kill off any reservation
3951 stwcx. r3,0,r3 ; Do it
3952
3953 la r3,pmapSXlk(r28) ; Point to the pmap search lock
3954 bl sxlkUnlock ; Unlock the search list
3955
3956 li r11,T_IN_VAIN ; Say that it was handled
3957 b EXT(PFSExit) ; Leave...
3958
3959
3960
3961/*
3962 * hw_set_user_space(pmap)
3963 * hw_set_user_space_dis(pmap)
3964 *
3965 * Indicate whether memory space needs to be switched.
3966 * We really need to turn off interrupts here, because we need to be non-preemptable
de355530
A
3967 *
3968 * hw_set_user_space_dis is used when interruptions are already disabled. Mind the
3969 * register usage here. The VMM switch code in vmachmon.s that calls this
3970 * know what registers are in use. Check that if these change.
3971 */
1c79356b 3972
1c79356b 3973
55e303ae
A
3974
3975 .align 5
3976 .globl EXT(hw_set_user_space)
3977
3978LEXT(hw_set_user_space)
3979
3980 lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable
3981 mfmsr r10 ; Get the current MSR
3982 ori r8,r8,lo16(MASK(MSR_FP)) ; Add in FP
3983 ori r9,r8,lo16(MASK(MSR_EE)) ; Add in the EE
3984 andc r10,r10,r8 ; Turn off VEC, FP for good
3985 andc r9,r10,r9 ; Turn off EE also
3986 mtmsr r9 ; Disable them
3987 isync ; Make sure FP and vec are off
3988 mfsprg r6,0 ; Get the per_proc_info address
3989 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
3990 mfsprg r4,2 ; The the feature flags
3991 lwz r7,pmapvr(r3) ; Get the v to r translation
3992 lwz r8,pmapvr+4(r3) ; Get the v to r translation
3993 mtcrf 0x80,r4 ; Get the Altivec flag
3994 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
3995 cmplw cr1,r3,r2 ; Same address space as before?
3996 stw r7,ppUserPmap(r6) ; Show our real pmap address
3997 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
3998 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
3999 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4000 mtmsr r10 ; Restore interruptions
4001 beqlr-- cr1 ; Leave if the same address space or not Altivec
4002
4003 dssall ; Need to kill all data streams if adrsp changed
4004 sync
4005 blr ; Return...
4006
4007 .align 5
4008 .globl EXT(hw_set_user_space_dis)
4009
4010LEXT(hw_set_user_space_dis)
4011
4012 lwz r7,pmapvr(r3) ; Get the v to r translation
4013 mfsprg r4,2 ; The the feature flags
4014 lwz r8,pmapvr+4(r3) ; Get the v to r translation
4015 mfsprg r6,0 ; Get the per_proc_info address
4016 lwz r2,ppUserPmapVirt(r6) ; Get our virtual pmap address
4017 mtcrf 0x80,r4 ; Get the Altivec flag
4018 xor r4,r3,r8 ; Get bottom of the real address of bmap anchor
4019 cmplw cr1,r3,r2 ; Same address space as before?
4020 stw r7,ppUserPmap(r6) ; Show our real pmap address
4021 crorc cr1_eq,cr1_eq,pfAltivecb ; See if same address space or not altivec machine
4022 stw r4,ppUserPmap+4(r6) ; Show our real pmap address
4023 stw r3,ppUserPmapVirt(r6) ; Show our virtual pmap address
4024 beqlr-- cr1 ; Leave if the same
4025
4026 dssall ; Need to kill all data streams if adrsp changed
4027 sync
4028 blr ; Return...
4029
4030/* int mapalc1(struct mappingblok *mb) - Finds, allocates, and zeros a free 1-bit mapping entry
4031 *
4032 * Lock must already be held on mapping block list
4033 * returns 0 if all slots filled.
4034 * returns n if a slot is found and it is not the last
4035 * returns -n if a slot is found and it is the last
4036 * when n and -n are returned, the corresponding bit is cleared
4037 * the mapping is zeroed out before return
4038 *
4039 */
4040
4041 .align 5
4042 .globl EXT(mapalc1)
4043
4044LEXT(mapalc1)
4045 lwz r4,mbfree(r3) ; Get the 1st mask
4046 lis r0,0x8000 ; Get the mask to clear the first free bit
4047 lwz r5,mbfree+4(r3) ; Get the 2nd mask
4048 mr r12,r3 ; Save the block ptr
4049 cntlzw r3,r4 ; Get first 1-bit in 1st word
4050 srw. r9,r0,r3 ; Get bit corresponding to first free one
4051 cntlzw r10,r5 ; Get first free field in second word
4052 andc r4,r4,r9 ; Turn 1-bit off in 1st word
4053 bne mapalc1f ; Found one in 1st word
4054
4055 srw. r9,r0,r10 ; Get bit corresponding to first free one in 2nd word
4056 li r3,0 ; assume failure return
4057 andc r5,r5,r9 ; Turn it off
4058 beqlr-- ; There are no 1 bits left...
4059 addi r3,r10,32 ; set the correct number
4060
4061mapalc1f:
4062 or. r0,r4,r5 ; any more bits set?
4063 stw r4,mbfree(r12) ; update bitmasks
4064 stw r5,mbfree+4(r12)
4065
4066 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4067 addi r7,r6,32
4068 dcbz r6,r12 ; clear the 64-byte mapping
4069 dcbz r7,r12
4070
4071 bnelr++ ; return if another bit remains set
4072
4073 neg r3,r3 ; indicate we just returned the last bit
4074 blr
4075
4076
4077/* int mapalc2(struct mappingblok *mb) - Finds, allocates, and zero's a free 2-bit mapping entry
4078 *
4079 * Lock must already be held on mapping block list
4080 * returns 0 if all slots filled.
4081 * returns n if a slot is found and it is not the last
4082 * returns -n if a slot is found and it is the last
4083 * when n and -n are returned, the corresponding bits are cleared
4084 * We find runs of 2 consecutive 1 bits by cntlzw(n & (n<<1)).
4085 * the mapping is zero'd out before return
4086 */
4087
4088 .align 5
4089 .globl EXT(mapalc2)
4090LEXT(mapalc2)
4091 lwz r4,mbfree(r3) ; Get the first mask
4092 lis r0,0x8000 ; Get the mask to clear the first free bit
4093 lwz r5,mbfree+4(r3) ; Get the second mask
4094 mr r12,r3 ; Save the block ptr
4095 slwi r6,r4,1 ; shift first word over
4096 and r6,r4,r6 ; lite start of double bit runs in 1st word
4097 slwi r7,r5,1 ; shift 2nd word over
4098 cntlzw r3,r6 ; Get first free 2-bit run in 1st word
4099 and r7,r5,r7 ; lite start of double bit runs in 2nd word
4100 srw. r9,r0,r3 ; Get bit corresponding to first run in 1st word
4101 cntlzw r10,r7 ; Get first free field in second word
4102 srwi r11,r9,1 ; shift over for 2nd bit in 1st word
4103 andc r4,r4,r9 ; Turn off 1st bit in 1st word
4104 andc r4,r4,r11 ; turn off 2nd bit in 1st word
4105 bne mapalc2a ; Found two consecutive free bits in 1st word
4106
4107 srw. r9,r0,r10 ; Get bit corresponding to first free one in second word
4108 li r3,0 ; assume failure
4109 srwi r11,r9,1 ; get mask for 2nd bit
4110 andc r5,r5,r9 ; Turn off 1st bit in 2nd word
4111 andc r5,r5,r11 ; turn off 2nd bit in 2nd word
4112 beq-- mapalc2c ; There are no runs of 2 bits in 2nd word either
4113 addi r3,r10,32 ; set the correct number
4114
4115mapalc2a:
4116 or. r0,r4,r5 ; any more bits set?
4117 stw r4,mbfree(r12) ; update bitmasks
4118 stw r5,mbfree+4(r12)
4119 slwi r6,r3,6 ; get (n * mpBasicSize), ie offset of mapping in block
4120 addi r7,r6,32
4121 addi r8,r6,64
4122 addi r9,r6,96
4123 dcbz r6,r12 ; zero out the 128-byte mapping
4124 dcbz r7,r12 ; we use the slow 32-byte dcbz even on 64-bit machines
4125 dcbz r8,r12 ; because the mapping may not be 128-byte aligned
4126 dcbz r9,r12
4127
4128 bnelr++ ; return if another bit remains set
4129
4130 neg r3,r3 ; indicate we just returned the last bit
4131 blr
4132
4133mapalc2c:
4134 rlwinm r7,r5,1,31,31 ; move bit 0 of 2nd word to bit 31
4135 and. r0,r4,r7 ; is the 2-bit field that spans the 2 words free?
4136 beqlr ; no, we failed
4137 rlwinm r4,r4,0,0,30 ; yes, turn off bit 31 of 1st word
4138 rlwinm r5,r5,0,1,31 ; turn off bit 0 of 2nd word
4139 li r3,31 ; get index of this field
4140 b mapalc2a
4141
4142
4143;
4144; This routine initialzes the hash table and PCA.
4145; It is done here because we may need to be 64-bit to do it.
4146;
4147
4148 .align 5
4149 .globl EXT(hw_hash_init)
4150
4151LEXT(hw_hash_init)
4152
4153 mfsprg r10,2 ; Get feature flags
4154 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
4155 mtcrf 0x02,r10 ; move pf64Bit to cr6
4156 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
4157 lis r4,0xFF01 ; Set all slots free and start steal at end
4158 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
4159 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
4160
4161 lwz r12,0(r12) ; Get hash table size
4162 li r3,0 ; Get start
4163 bt++ pf64Bitb,hhiSF ; skip if 64-bit (only they take the hint)
4164
4165 lwz r11,4(r11) ; Get hash table base
4166
4167hhiNext32: cmplw r3,r12 ; Have we reached the end?
4168 bge- hhiCPCA32 ; Yes...
4169 dcbz r3,r11 ; Clear the line
4170 addi r3,r3,32 ; Next one...
4171 b hhiNext32 ; Go on...
4172
4173hhiCPCA32: rlwinm r12,r12,28,4,29 ; Get number of slots * 4
4174 li r3,-4 ; Displacement to first PCA entry
4175 neg r12,r12 ; Get negative end of PCA
4176
4177hhiNPCA32: stwx r4,r3,r11 ; Initialize the PCA entry
4178 subi r3,r3,4 ; Next slot
4179 cmpw r3,r12 ; Have we finished?
4180 bge+ hhiNPCA32 ; Not yet...
4181 blr ; Leave...
4182
4183hhiSF: mfmsr r9 ; Save the MSR
4184 li r8,1 ; Get a 1
4185 mr r0,r9 ; Get a copy of the MSR
4186 ld r11,0(r11) ; Get hash table base
4187 rldimi r0,r8,63,MSR_SF_BIT ; Set SF bit (bit 0)
4188 mtmsrd r0 ; Turn on SF
4189 isync
4190
4191
4192hhiNext64: cmpld r3,r12 ; Have we reached the end?
4193 bge-- hhiCPCA64 ; Yes...
4194 dcbz128 r3,r11 ; Clear the line
4195 addi r3,r3,128 ; Next one...
4196 b hhiNext64 ; Go on...
4197
4198hhiCPCA64: rlwinm r12,r12,27,5,29 ; Get number of slots * 4
4199 li r3,-4 ; Displacement to first PCA entry
4200 neg r12,r12 ; Get negative end of PCA
4201
4202hhiNPCA64: stwx r4,r3,r11 ; Initialize the PCA entry
4203 subi r3,r3,4 ; Next slot
4204 cmpd r3,r12 ; Have we finished?
4205 bge++ hhiNPCA64 ; Not yet...
4206
4207 mtmsrd r9 ; Turn off SF if it was off
4208 isync
4209 blr ; Leave...
4210
4211
4212;
4213; This routine sets up the hardware to start translation.
4214; Note that we do NOT start translation.
4215;
4216
4217 .align 5
4218 .globl EXT(hw_setup_trans)
4219
4220LEXT(hw_setup_trans)
4221
4222 mfsprg r11,0 ; Get the per_proc block
4223 mfsprg r12,2 ; Get feature flags
4224 li r0,0 ; Get a 0
4225 li r2,1 ; And a 1
4226 mtcrf 0x02,r12 ; Move pf64Bit to cr6
4227 stw r0,validSegs(r11) ; Make sure we think all SR/STEs are invalid
4228 stw r0,validSegs+4(r11) ; Make sure we think all SR/STEs are invalid, part deux
4229 sth r2,ppInvSeg(r11) ; Force a reload of the SRs
4230 sth r0,ppCurSeg(r11) ; Set that we are starting out in kernel
4231
4232 bt++ pf64Bitb,hstSF ; skip if 64-bit (only they take the hint)
4233
4234 li r9,0 ; Clear out a register
4235 sync
4236 isync
4237 mtdbatu 0,r9 ; Invalidate maps
4238 mtdbatl 0,r9 ; Invalidate maps
4239 mtdbatu 1,r9 ; Invalidate maps
4240 mtdbatl 1,r9 ; Invalidate maps
4241 mtdbatu 2,r9 ; Invalidate maps
4242 mtdbatl 2,r9 ; Invalidate maps
4243 mtdbatu 3,r9 ; Invalidate maps
4244 mtdbatl 3,r9 ; Invalidate maps
4245
4246 mtibatu 0,r9 ; Invalidate maps
4247 mtibatl 0,r9 ; Invalidate maps
4248 mtibatu 1,r9 ; Invalidate maps
4249 mtibatl 1,r9 ; Invalidate maps
4250 mtibatu 2,r9 ; Invalidate maps
4251 mtibatl 2,r9 ; Invalidate maps
4252 mtibatu 3,r9 ; Invalidate maps
4253 mtibatl 3,r9 ; Invalidate maps
4254
4255 lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
4256 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
4257 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
4258 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
4259 lwz r11,4(r11) ; Get hash table base
4260 lwz r12,0(r12) ; Get hash table size
4261 subi r12,r12,1 ; Back off by 1
4262 rlwimi r11,r12,16,23,31 ; Stick the size into the sdr1 image
4263
4264 mtsdr1 r11 ; Ok, we now have the hash table set up
4265 sync
4266
4267 li r12,invalSpace ; Get the invalid segment value
4268 li r10,0 ; Start low
4269
4270hstsetsr: mtsrin r12,r10 ; Set the SR
4271 addis r10,r10,0x1000 ; Bump the segment
4272 mr. r10,r10 ; Are we finished?
4273 bne+ hstsetsr ; Nope...
4274 sync
4275 blr ; Return...
4276
4277;
4278; 64-bit version
4279;
4280
4281hstSF: lis r11,hi16(EXT(hash_table_base)) ; Get hash table base address
4282 lis r12,hi16(EXT(hash_table_size)) ; Get hash table size address
4283 ori r11,r11,lo16(EXT(hash_table_base)) ; Get hash table base address
4284 ori r12,r12,lo16(EXT(hash_table_size)) ; Get hash table size address
4285 ld r11,0(r11) ; Get hash table base
4286 lwz r12,0(r12) ; Get hash table size
4287 cntlzw r10,r12 ; Get the number of bits
4288 subfic r10,r10,13 ; Get the extra bits we need
4289 or r11,r11,r10 ; Add the size field to SDR1
4290
4291 mtsdr1 r11 ; Ok, we now have the hash table set up
4292 sync
4293
4294 li r0,0 ; Set an SLB slot index of 0
4295 slbia ; Trash all SLB entries (except for entry 0 that is)
4296 slbmfee r7,r0 ; Get the entry that is in SLB index 0
4297 rldicr r7,r7,0,35 ; Clear the valid bit and the rest
4298 slbie r7 ; Invalidate it
4299
4300 blr ; Return...
4301
4302
4303;
4304; This routine turns on translation for the first time on a processor
4305;
4306
4307 .align 5
4308 .globl EXT(hw_start_trans)
4309
4310LEXT(hw_start_trans)
4311
4312
4313 mfmsr r10 ; Get the msr
4314 ori r10,r10,lo16(MASK(MSR_IR) | MASK(MSR_DR)) ; Turn on translation
4315
4316 mtmsr r10 ; Everything falls apart here
4317 isync
4318
4319 blr ; Back to it.
4320
4321
4322
4323;
4324; This routine validates a segment register.
4325; hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va)
4326;
4327; r3 = virtual pmap
4328; r4 = segment[0:31]
4329; r5 = segment[32:63]
4330; r6 = va[0:31]
4331; r7 = va[32:63]
4332;
4333; Note that we transform the addr64_t (long long) parameters into single 64-bit values.
4334; Note that there is no reason to apply the key modifier here because this is only
4335; used for kernel accesses.
4336;
4337
4338 .align 5
4339 .globl EXT(hw_map_seg)
4340
4341LEXT(hw_map_seg)
4342
4343 lwz r0,pmapSpace(r3) ; Get the space, we will need it soon
4344 lwz r9,pmapFlags(r3) ; Get the flags for the keys now
4345 mfsprg r10,2 ; Get feature flags
4346 mfsprg r12,0 ; Get the per_proc
4347
4348;
4349; Note: the following code would problably be easier to follow if I split it,
4350; but I just wanted to see if I could write this to work on both 32- and 64-bit
4351; machines combined.
4352;
4353
4354;
4355; Here we enter with va[0:31] in r6[0:31] (or r6[32:63] on 64-bit machines)
4356; and va[32:63] in r7[0:31] (or r7[32:63] on 64-bit machines)
4357
4358 rlwinm r4,r4,0,1,0 ; Copy seg[0:31] into r4[0;31] - no-op for 32-bit
4359 rlwinm r7,r7,18,14,17 ; Slide va[32:35] east to just west of space ID
4360 mtcrf 0x02,r10 ; Move pf64Bit and pfNoMSRirb to cr5 and 6
4361 srwi r8,r6,14 ; Slide va[0:17] east to just west of the rest
4362 rlwimi r7,r6,18,0,13 ; Slide va[18:31] east to just west of slid va[32:25]
4363 rlwimi r0,r0,14,4,17 ; Dup address space ID above itself
4364 rlwinm r8,r8,0,1,0 ; Dup low part into high (does nothing on 32-bit machines)
4365 rlwinm r2,r0,28,0,31 ; Rotate rotate low nybble to top of low half
4366 rlwimi r2,r2,0,1,0 ; Replicate bottom 32 into top 32
4367 rlwimi r8,r7,0,0,31 ; Join va[0:17] with va[18:35] (just like mr on 32-bit machines)
4368
4369 rlwimi r2,r0,0,4,31 ; We should now have 4 copies of the space
4370 ; concatenated together. There is garbage
4371 ; at the top for 64-bit but we will clean
4372 ; that out later.
4373 rlwimi r4,r5,0,0,31 ; Copy seg[32:63] into r4[32:63] - just like mr for 32-bit
4374
4375
4376;
4377; Here we exit with va[0:35] shifted into r8[14:51], zeros elsewhere, or
4378; va[18:35] shifted into r8[0:17], zeros elsewhere on 32-bit machines
4379;
4380
4381;
4382; What we have now is:
4383;
4384; 0 0 1 2 3 4 4 5 6
4385; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
4386; +--------+--------+--------+--------+--------+--------+--------+--------+
4387; r2 = |xxxx0000|AAAAAAAA|AAAAAABB|BBBBBBBB|BBBBCCCC|CCCCCCCC|CCDDDDDD|DDDDDDDD| - hash value
4388; +--------+--------+--------+--------+--------+--------+--------+--------+
4389; 0 0 1 2 3 - for 32-bit machines
4390; 0 8 6 4 1
4391;
4392; 0 0 1 2 3 4 4 5 6
4393; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
4394; +--------+--------+--------+--------+--------+--------+--------+--------+
4395; r8 = |00000000|000000SS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SS000000|00000000| - shifted and cleaned EA
4396; +--------+--------+--------+--------+--------+--------+--------+--------+
4397; 0 0 1 2 3 - for 32-bit machines
4398; 0 8 6 4 1
4399;
4400; 0 0 1 2 3 4 4 5 6
4401; 0 8 6 4 2 0 8 6 3 - for 64-bit machines
4402; +--------+--------+--------+--------+--------+--------+--------+--------+
4403; r4 = |SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSSSSSS|SSSS0000|00000000|00000000|00000000| - Segment
4404; +--------+--------+--------+--------+--------+--------+--------+--------+
4405; 0 0 1 2 3 - for 32-bit machines
4406; 0 8 6 4 1
4407
4408
4409 xor r8,r8,r2 ; Calculate VSID
4410
4411 bf-- pf64Bitb,hms32bit ; Skip out if 32-bit...
4412
4413 li r0,1 ; Prepare to set bit 0 (also to clear EE)
4414 mfmsr r6 ; Get current MSR
4415 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
4416 mtmsrd r0,1 ; Set only the EE bit to 0
4417 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
4418 mfmsr r11 ; Get the MSR right now, after disabling EE
4419 andc r2,r11,r2 ; Turn off translation now
4420 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
4421 or r11,r11,r6 ; Turn on the EE bit if it was on
4422 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
4423 isync ; Hang out a bit
4424
4425 ld r6,validSegs(r12) ; Get the valid SLB entry flags
4426 sldi r9,r9,9 ; Position the key and noex bit
4427
4428 rldimi r5,r8,12,0 ; Form the VSID/key
4429
4430 not r3,r6 ; Make valids be 0s
4431
4432 cntlzd r7,r3 ; Find a free SLB
4433 cmplwi r7,63 ; Did we find a free SLB entry?
4434
4435 slbie r4 ; Since this ESID may still be in an SLBE, kill it
4436
4437 oris r4,r4,0x0800 ; Turn on the valid bit in ESID
4438 addi r7,r7,1 ; Make sure we skip slb 0
4439 blt++ hmsFreeSeg ; Yes, go load it...
4440
4441;
4442; No free SLB entries, select one that is in use and invalidate it
4443;
4444 lwz r2,ppSegSteal(r12) ; Get the next slot to steal
4445 addi r7,r2,pmapSegCacheUse+1 ; Select stealee from non-cached slots only
4446 addi r2,r2,1 ; Set next slot to steal
4447 slbmfee r3,r7 ; Get the entry that is in the selected spot
4448 subi r8,r2,64-(pmapSegCacheUse+1) ; Force steal to wrap
4449 rldicr r3,r3,0,35 ; Clear the valid bit and the rest
4450 srawi r8,r8,31 ; Get -1 if steal index still in range
4451 slbie r3 ; Invalidate the in-use SLB entry
4452 and r2,r2,r8 ; Reset steal index when it should wrap
4453 isync ;
4454
4455 stw r2,ppSegSteal(r12) ; Set the next slot to steal
4456;
4457; We are now ready to stick the SLB entry in the SLB and mark it in use
4458;
4459
4460hmsFreeSeg: subi r2,r7,1 ; Adjust for skipped slb 0
4461 rldimi r4,r7,0,58 ; Copy in the SLB entry selector
4462 srd r0,r0,r2 ; Set bit mask for allocation
4463 rldicl r5,r5,0,15 ; Clean out the unsupported bits
4464 or r6,r6,r0 ; Turn on the allocation flag
4465
4466 slbmte r5,r4 ; Make that SLB entry
4467
4468 std r6,validSegs(r12) ; Mark as valid
4469 mtmsrd r11 ; Restore the MSR
4470 isync
4471 blr ; Back to it...
4472
4473 .align 5
4474
4475hms32bit: rlwinm r8,r8,0,8,31 ; Clean up the VSID
4476 rlwinm r2,r4,4,28,31 ; Isolate the segment we are setting
4477 lis r0,0x8000 ; Set bit 0
4478 rlwimi r8,r9,28,1,3 ; Insert the keys and N bit
4479 srw r0,r0,r2 ; Get bit corresponding to SR
4480 addi r7,r12,validSegs ; Point to the valid segment flags directly
4481
4482 mtsrin r8,r4 ; Set the actual SR
4483 isync ; Need to make sure this is done
4484
4485hmsrupt: lwarx r6,0,r7 ; Get and reserve the valid segment flags
4486 or r6,r6,r0 ; Show that SR is valid
4487 stwcx. r6,0,r7 ; Set the valid SR flags
4488 bne-- hmsrupt ; Had an interrupt, need to get flags again...
4489
4490 blr ; Back to it...
4491
4492
4493;
4494; This routine invalidates a segment register.
4495;
4496
4497 .align 5
4498 .globl EXT(hw_blow_seg)
4499
4500LEXT(hw_blow_seg)
4501
4502 mfsprg r10,2 ; Get feature flags
4503 mfsprg r12,0 ; Get the per_proc
4504 mtcrf 0x02,r10 ; move pf64Bit and pfNoMSRirb to cr5 and 6
4505
4506 addi r7,r12,validSegs ; Point to the valid segment flags directly
4507 rlwinm r9,r4,0,0,3 ; Save low segment address and make sure it is clean
4508
4509 bf-- pf64Bitb,hbs32bit ; Skip out if 32-bit...
4510
4511 li r0,1 ; Prepare to set bit 0 (also to clear EE)
4512 mfmsr r6 ; Get current MSR
4513 li r2,MASK(MSR_IR)|MASK(MSR_DR) ; Get the translation bits
4514 mtmsrd r0,1 ; Set only the EE bit to 0
4515 rlwinm r6,r6,0,MSR_EE_BIT,MSR_EE_BIT ; See if EE bit is on
4516 mfmsr r11 ; Get the MSR right now, after disabling EE
4517 andc r2,r11,r2 ; Turn off translation now
4518 rldimi r2,r0,63,0 ; Get bit 64-bit turned on
4519 or r11,r11,r6 ; Turn on the EE bit if it was on
4520 mtmsrd r2 ; Make sure translation and EE are off and 64-bit is on
4521 isync ; Hang out a bit
4522
4523 rldimi r9,r3,32,0 ; Insert the top part of the ESID
4524
4525 slbie r9 ; Invalidate the associated SLB entry
4526
4527 mtmsrd r11 ; Restore the MSR
4528 isync
4529 blr ; Back to it.
4530
4531 .align 5
4532
4533hbs32bit: lwarx r4,0,r7 ; Get and reserve the valid segment flags
4534 rlwinm r6,r9,4,28,31 ; Convert segment to number
4535 lis r2,0x8000 ; Set up a mask
4536 srw r2,r2,r6 ; Make a mask
4537 and. r0,r4,r2 ; See if this is even valid
4538 li r5,invalSpace ; Set the invalid address space VSID
4539 beqlr ; Leave if already invalid...
4540
4541 mtsrin r5,r9 ; Slam the segment register
4542 isync ; Need to make sure this is done
4543
4544hbsrupt: andc r4,r4,r2 ; Clear the valid bit for this segment
4545 stwcx. r4,0,r7 ; Set the valid SR flags
4546 beqlr++ ; Stored ok, no interrupt, time to leave...
4547
4548 lwarx r4,0,r7 ; Get and reserve the valid segment flags again
4549 b hbsrupt ; Try again...
4550
4551;
4552; This routine invadates the entire pmap segment cache
4553;
4554; Translation is on, interrupts may or may not be enabled.
4555;
4556
4557 .align 5
4558 .globl EXT(invalidateSegs)
4559
4560LEXT(invalidateSegs)
4561
4562 la r10,pmapCCtl(r3) ; Point to the segment cache control
4563 eqv r2,r2,r2 ; Get all foxes
4564
4565isInv: lwarx r4,0,r10 ; Get the segment cache control value
4566 rlwimi r4,r2,0,0,15 ; Slam in all invalid bits
4567 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4568 bne-- isInv0 ; Yes, try again...
4569
4570 stwcx. r4,0,r10 ; Try to invalidate it
4571 bne-- isInv ; Someone else just stuffed it...
4572 blr ; Leave...
4573
4574
4575isInv0: li r4,lgKillResv ; Get reservation kill zone
4576 stwcx. r4,0,r4 ; Kill reservation
4577
4578isInv1: lwz r4,pmapCCtl(r3) ; Get the segment cache control
4579 rlwinm. r0,r4,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4580 bne-- isInv ; Nope...
4581 b isInv1 ; Still locked do it again...
4582
4583;
4584; This routine switches segment registers between kernel and user.
4585; We have some assumptions and rules:
4586; We are in the exception vectors
4587; pf64Bitb is set up
4588; R3 contains the MSR we going to
4589; We can not use R4, R13, R20, R21, R29
4590; R13 is the savearea
4591; R29 has the per_proc
4592;
4593; We return R3 as 0 if we did not switch between kernel and user
4594; We also maintain and apply the user state key modifier used by VMM support;
4595; If we go to the kernel it is set to 0, otherwise it follows the bit
4596; in spcFlags.
4597;
4598
d7e50217 4599 .align 5
55e303ae 4600 .globl EXT(switchSegs)
1c79356b 4601
55e303ae
A
4602LEXT(switchSegs)
4603
4604 lwz r22,ppInvSeg(r29) ; Get the ppInvSeg (force invalidate) and ppCurSeg (user or kernel segments indicator)
4605 lwz r9,spcFlags(r29) ; Pick up the special user state flags
4606 rlwinm r2,r3,MSR_PR_BIT+1,31,31 ; Isolate the problem mode bit
4607 rlwinm r3,r3,MSR_RI_BIT+1,31,31 ; Isolate the recoverable interrupt bit
4608 lis r8,hi16(EXT(kernel_pmap_phys)) ; Assume kernel
4609 or r2,r2,r3 ; This will 1 if we will be using user segments
4610 li r3,0 ; Get a selection mask
4611 cmplw r2,r22 ; This will be EQ if same state and not ppInvSeg
4612 ori r8,r8,lo16(EXT(kernel_pmap_phys)) ; Assume kernel (bottom of address)
4613 sub r3,r3,r2 ; Form select mask - 0 if kernel, -1 if user
4614 la r19,ppUserPmap(r29) ; Point to the current user pmap
4615
4616; The following line is an exercise of a generally unreadable but recompile-friendly programing practice
4617 rlwinm r30,r9,userProtKeybit+1+(63-sgcVSKeyUsr),sgcVSKeyUsr-32,sgcVSKeyUsr-32 ; Isolate the user state protection key
4618
4619 andc r8,r8,r3 ; Zero kernel pmap ptr if user, untouched otherwise
4620 and r19,r19,r3 ; Zero user pmap ptr if kernel, untouched otherwise
4621 and r30,r30,r3 ; Clear key modifier if kernel, leave otherwise
4622 or r8,r8,r19 ; Get the pointer to the pmap we are using
4623
4624 beqlr ; We are staying in the same mode, do not touch segs...
4625
4626 lwz r28,0(r8) ; Get top half of pmap address
4627 lwz r10,4(r8) ; Get bottom half
4628
4629 stw r2,ppInvSeg(r29) ; Clear request for invalidate and save ppCurSeg
4630 rlwinm r28,r28,0,1,0 ; Copy top to top
4631 stw r30,ppMapFlags(r29) ; Set the key modifier
4632 rlwimi r28,r10,0,0,31 ; Insert bottom
4633
4634 la r10,pmapCCtl(r28) ; Point to the segment cache control
4635 la r9,pmapSegCache(r28) ; Point to the segment cache
4636
4637ssgLock: lwarx r15,0,r10 ; Get and reserve the segment cache control
4638 rlwinm. r0,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
4639 ori r16,r15,lo16(pmapCCtlLck) ; Set lock bit
4640 bne-- ssgLock0 ; Yup, this is in use...
4641
4642 stwcx. r16,0,r10 ; Try to set the lock
4643 bne-- ssgLock ; Did we get contention?
4644
4645 not r11,r15 ; Invert the invalids to valids
4646 li r17,0 ; Set a mask for the SRs we are loading
4647 isync ; Make sure we are all caught up
4648
4649 bf-- pf64Bitb,ssg32Enter ; If 32-bit, jump into it...
4650
4651 li r0,0 ; Clear
4652 slbia ; Trash all SLB entries (except for entry 0 that is)
4653 li r17,1 ; Get SLB index to load (skip slb 0)
4654 oris r0,r0,0x8000 ; Get set for a mask
4655 b ssg64Enter ; Start on a cache line...
d7e50217
A
4656
4657 .align 5
d7e50217 4658
55e303ae
A
4659ssgLock0: li r15,lgKillResv ; Killing field
4660 stwcx. r15,0,r15 ; Kill reservation
d7e50217 4661
55e303ae
A
4662ssgLock1: lwz r15,pmapCCtl(r28) ; Get the segment cache controls
4663 rlwinm. r15,r15,0,pmapCCtlLckb,pmapCCtlLckb ; Someone have the lock?
4664 beq++ ssgLock ; Yup, this is in use...
4665 b ssgLock1 ; Nope, try again...
4666;
4667; This is the 32-bit address space switch code.
4668; We take a reservation on the segment cache and walk through.
4669; For each entry, we load the specified entries and remember which
4670; we did with a mask. Then, we figure out which segments should be
4671; invalid and then see which actually are. Then we load those with the
4672; defined invalid VSID.
4673; Afterwards, we unlock the segment cache.
4674;
d7e50217 4675
55e303ae
A
4676 .align 5
4677
4678ssg32Enter: cntlzw r12,r11 ; Find the next slot in use
4679 cmplwi r12,pmapSegCacheUse ; See if we are done
4680 slwi r14,r12,4 ; Index to the cache slot
4681 lis r0,0x8000 ; Get set for a mask
4682 add r14,r14,r9 ; Point to the entry
4683
4684 bge- ssg32Done ; All done...
4685
4686 lwz r5,sgcESID+4(r14) ; Get the ESID part
4687 srw r2,r0,r12 ; Form a mask for the one we are loading
4688 lwz r7,sgcVSID+4(r14) ; And get the VSID bottom
4689
4690 andc r11,r11,r2 ; Clear the bit
4691 lwz r6,sgcVSID(r14) ; And get the VSID top
4692
4693 rlwinm r2,r5,4,28,31 ; Change the segment number to a number
4694
4695 xor r7,r7,r30 ; Modify the key before we actually set it
4696 srw r0,r0,r2 ; Get a mask for the SR we are loading
4697 rlwinm r8,r7,19,1,3 ; Insert the keys and N bit
4698 or r17,r17,r0 ; Remember the segment
4699 rlwimi r8,r7,20,12,31 ; Insert 4:23 the VSID
4700 rlwimi r8,r6,20,8,11 ; Get the last nybble of the SR contents
4701
4702 mtsrin r8,r5 ; Load the segment
4703 b ssg32Enter ; Go enter the next...
4704
4705 .align 5
4706
4707ssg32Done: lwz r16,validSegs(r29) ; Get the valid SRs flags
4708 stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
4709
4710 lis r0,0x8000 ; Get set for a mask
4711 li r2,invalSpace ; Set the invalid address space VSID
4712
4713 nop ; Align loop
4714 nop ; Align loop
4715 andc r16,r16,r17 ; Get list of SRs that were valid before but not now
4716 nop ; Align loop
4717
4718ssg32Inval: cntlzw r18,r16 ; Get the first one to invalidate
4719 cmplwi r18,16 ; Have we finished?
4720 srw r22,r0,r18 ; Get the mask bit
4721 rlwinm r23,r18,28,0,3 ; Get the segment register we need
4722 andc r16,r16,r22 ; Get rid of the guy we just did
4723 bge ssg32Really ; Yes, we are really done now...
4724
4725 mtsrin r2,r23 ; Invalidate the SR
4726 b ssg32Inval ; Do the next...
4727
4728 .align 5
4729
4730ssg32Really:
4731 stw r17,validSegs(r29) ; Set the valid SR flags
4732 li r3,1 ; Set kernel/user transition
4733 blr
4734
4735;
4736; This is the 64-bit address space switch code.
4737; First we blow away all of the SLB entries.
4738; Walk through,
4739; loading the SLB. Afterwards, we release the cache lock
4740;
4741; Note that because we have to treat SLBE 0 specially, we do not ever use it...
4742; Its a performance thing...
4743;
1c79356b
A
4744
4745 .align 5
1c79356b 4746
55e303ae
A
4747ssg64Enter: cntlzw r12,r11 ; Find the next slot in use
4748 cmplwi r12,pmapSegCacheUse ; See if we are done
4749 slwi r14,r12,4 ; Index to the cache slot
4750 srw r16,r0,r12 ; Form a mask for the one we are loading
4751 add r14,r14,r9 ; Point to the entry
4752 andc r11,r11,r16 ; Clear the bit
4753 bge-- ssg64Done ; All done...
4754
4755 ld r5,sgcESID(r14) ; Get the ESID part
4756 ld r6,sgcVSID(r14) ; And get the VSID part
4757 oris r5,r5,0x0800 ; Turn on the valid bit
4758 or r5,r5,r17 ; Insert the SLB slot
4759 xor r6,r6,r30 ; Modify the key before we actually set it
4760 addi r17,r17,1 ; Bump to the next slot
4761 slbmte r6,r5 ; Make that SLB entry
4762 b ssg64Enter ; Go enter the next...
1c79356b 4763
55e303ae 4764 .align 5
d7e50217 4765
55e303ae 4766ssg64Done: stw r15,pmapCCtl(r28) ; Unlock the segment cache controls
d7e50217 4767
55e303ae
A
4768 eqv r16,r16,r16 ; Load up with all foxes
4769 subfic r17,r17,64 ; Get the number of 1 bits we need
4770
4771 sld r16,r16,r17 ; Get a mask for the used SLB entries
4772 li r3,1 ; Set kernel/user transition
4773 std r16,validSegs(r29) ; Set the valid SR flags
1c79356b
A
4774 blr
4775
55e303ae
A
4776;
4777; mapSetUp - this function sets initial state for all mapping functions.
4778; We turn off all translations (physical), disable interruptions, and
4779; enter 64-bit mode if applicable.
4780;
4781; We also return the original MSR in r11, the feature flags in R12,
4782; and CR6 set up so we can do easy branches for 64-bit
4783;
4784
4785 .align 5
4786 .globl EXT(mapSetUp)
4787
4788LEXT(mapSetUp)
4789
4790 lis r0,hi16(MASK(MSR_VEC)) ; Get the vector mask
4791 mfsprg r12,2 ; Get feature flags
4792 ori r0,r0,lo16(MASK(MSR_FP)) ; Get the FP as well
4793 mtcrf 0x04,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
4794 mfmsr r11 ; Save the MSR
4795 mtcrf 0x02,r12 ; move pf64Bit and pfNoMSRirb to cr5 and 6
4796 andc r11,r11,r0 ; Clear VEC and FP for good
4797 ori r0,r0,lo16(MASK(MSR_EE)|MASK(MSR_DR)|MASK(MSR_IR)) ; Get rid of EE, IR, and DR
4798 li r2,1 ; Prepare for 64 bit
4799 andc r0,r11,r0 ; Clear the rest
4800 bt pfNoMSRirb,msuNoMSR ; No MSR...
4801 bt++ pf64Bitb,msuSF ; skip if 64-bit (only they take the hint)
d7e50217 4802
55e303ae
A
4803 mtmsr r0 ; Translation and all off
4804 isync ; Toss prefetch
4805 blr ; Return...
4806
4807 .align 5
4808
4809msuSF: rldimi r0,r2,63,MSR_SF_BIT ; set SF bit (bit 0)
4810 mtmsrd r0 ; set 64-bit mode, turn off EE, DR, and IR
4811 isync ; synchronize
4812 blr ; Return...
4813
4814 .align 5
4815
4816msuNoMSR: mr r2,r3 ; Save R3 across call
4817 mr r3,r0 ; Get the new MSR value
4818 li r0,loadMSR ; Get the MSR setter SC
4819 sc ; Set it
4820 mr r3,r2 ; Restore R3
4821 blr ; Go back all set up...
4822
4823
4824;
4825; Find the physent based on a physical page and try to lock it (but not too hard)
4826; Note that this table always has an entry that with a 0 table pointer at the end
4827;
4828; R3 contains ppnum on entry
4829; R3 is 0 if no entry was found
4830; R3 is physent if found
4831; cr0_eq is true if lock was obtained or there was no entry to lock
4832; cr0_eq is false of there was an entry and it was locked
4833;
4834
4835 .align 5
4836
4837mapFindPhyTry:
4838 lis r9,hi16(EXT(pmap_mem_regions)) ; Point to the start of the region table
4839 mr r2,r3 ; Save our target
4840 ori r9,r9,lo16(EXT(pmap_mem_regions)) ; Point to the start of the region table
4841
4842mapFindPhz: lwz r3,mrPhysTab(r9) ; Get the actual table address
4843 lwz r5,mrStart(r9) ; Get start of table entry
4844 lwz r0,mrEnd(r9) ; Get end of table entry
4845 addi r9,r9,mrSize ; Point to the next slot
4846 cmplwi cr2,r3,0 ; Are we at the end of the table?
4847 cmplw r2,r5 ; See if we are in this table
4848 cmplw cr1,r2,r0 ; Check end also
4849 sub r4,r2,r5 ; Calculate index to physical entry
4850 beq-- cr2,mapFindNo ; Leave if we did not find an entry...
4851 cror cr0_lt,cr0_lt,cr1_gt ; Set CR0_LT if it is NOT this entry
4852 slwi r4,r4,3 ; Get offset to physical entry
4853
4854 blt-- mapFindPhz ; Did not find it...
4855
4856 add r3,r3,r4 ; Point right to the slot
4857
4858mapFindOv: lwz r2,0(r3) ; Get the lock contents right now
4859 rlwinm. r0,r2,0,0,0 ; Is it locked?
4860 bnelr-- ; Yes it is...
4861
4862 lwarx r2,0,r3 ; Get the lock
4863 rlwinm. r0,r2,0,0,0 ; Is it locked?
4864 oris r0,r2,0x8000 ; Set the lock bit
4865 bne-- mapFindKl ; It is locked, go get rid of reservation and leave...
4866 stwcx. r0,0,r3 ; Try to stuff it back...
4867 bne-- mapFindOv ; Collision, try again...
4868 isync ; Clear any speculations
4869 blr ; Leave...
4870
4871mapFindKl: li r2,lgKillResv ; Killing field
4872 stwcx. r2,0,r2 ; Trash reservation...
4873 crclr cr0_eq ; Make sure we do not think we got the lock
4874 blr ; Leave...
4875
4876mapFindNo: crset cr0_eq ; Make sure that we set this
4877 li r3,0 ; Show that we did not find it
4878 blr ; Leave...
4879;
4880; pmapCacheLookup - This function will look up an entry in the pmap segment cache.
4881;
4882; How the pmap cache lookup works:
4883;
4884; We use a combination of three things: a mask of valid entries, a sub-tag, and the
4885; ESID (aka the "tag"). The mask indicates which of the cache slots actually contain
4886; an entry. The sub-tag is a 16 entry 4 bit array that contains the low order 4 bits
4887; of the ESID, bits 32:36 of the effective for 64-bit and 0:3 for 32-bit. The cache
4888; entry contains the full 36 bit ESID.
4889;
4890; The purpose of the sub-tag is to limit the number of searches necessary when looking
4891; for an existing cache entry. Because there are 16 slots in the cache, we could end up
4892; searching all 16 if an match is not found.
4893;
4894; Essentially, we will search only the slots that have a valid entry and whose sub-tag
4895; matches. More than likely, we will eliminate almost all of the searches.
4896;
4897; Inputs:
4898; R3 = pmap
4899; R4 = ESID high half
4900; R5 = ESID low half
4901;
4902; Outputs:
4903; R3 = pmap cache slot if found, 0 if not
4904; R10 = pmapCCtl address
4905; R11 = pmapCCtl image
4906; pmapCCtl locked on exit
4907;
4908
4909 .align 5
4910
4911pmapCacheLookup:
4912 la r10,pmapCCtl(r3) ; Point to the segment cache control
4913
4914pmapCacheLookuq:
4915 lwarx r11,0,r10 ; Get the segment cache control value
4916 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4917 ori r0,r11,lo16(pmapCCtlLck) ; Turn on the lock bit
4918 bne-- pmapCacheLookur ; Nope...
4919 stwcx. r0,0,r10 ; Try to take the lock
4920 bne-- pmapCacheLookuq ; Someone else just stuffed it, try again...
4921
4922 isync ; Make sure we get reservation first
4923 lwz r9,pmapSCSubTag(r3) ; Get the high part of the sub-tag
4924 rlwimi r5,r5,28,4,7 ; Copy sub-tag just to right of itself (XX------)
4925 lwz r10,pmapSCSubTag+4(r3) ; And the bottom half
4926 rlwimi r5,r5,24,8,15 ; Copy doubled sub-tag to right of itself (XXXX----)
4927 lis r8,0x8888 ; Get some eights
4928 rlwimi r5,r5,16,16,31 ; Copy quadrupled sub-tags to the right
4929 ori r8,r8,0x8888 ; Fill the rest with eights
4930
4931 eqv r10,r10,r5 ; Get 0xF where we hit in bottom half
4932 eqv r9,r9,r5 ; Get 0xF where we hit in top half
4933
4934 rlwinm r2,r10,1,0,30 ; Shift over 1
4935 rlwinm r0,r9,1,0,30 ; Shift over 1
4936 and r2,r2,r10 ; AND the even/odd pair into the even
4937 and r0,r0,r9 ; AND the even/odd pair into the even
4938 rlwinm r10,r2,2,0,28 ; Shift over 2
4939 rlwinm r9,r0,2,0,28 ; Shift over 2
4940 and r10,r2,r10 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
4941 and r9,r0,r9 ; AND the even of the ANDed pairs giving the AND of all 4 bits in 0, 4, ...
4942
4943 and r10,r10,r8 ; Clear out extras
4944 and r9,r9,r8 ; Clear out extras
4945
4946 rlwinm r0,r10,3,1,28 ; Slide adjacent next to each other
4947 rlwinm r2,r9,3,1,28 ; Slide adjacent next to each other
4948 or r10,r0,r10 ; Merge them
4949 or r9,r2,r9 ; Merge them
4950 rlwinm r0,r10,6,2,26 ; Slide adjacent pairs next to each other
4951 rlwinm r2,r9,6,2,26 ; Slide adjacent pairs next to each other
4952 or r10,r0,r10 ; Merge them
4953 or r9,r2,r9 ; Merge them
4954 rlwimi r10,r10,12,4,7 ; Stick in the low-order adjacent quad
4955 rlwimi r9,r9,12,4,7 ; Stick in the low-order adjacent quad
4956 not r6,r11 ; Turn invalid into valid
4957 rlwimi r9,r10,24,8,15 ; Merge in the adjacent octs giving a hit mask
4958
4959 la r10,pmapSegCache(r3) ; Point at the cache slots
4960 and. r6,r9,r6 ; Get mask of valid and hit
4961 li r0,0 ; Clear
4962 li r3,0 ; Assume not found
4963 oris r0,r0,0x8000 ; Start a mask
4964 beqlr++ ; Leave, should usually be no hits...
4965
4966pclNextEnt: cntlzw r5,r6 ; Find an in use one
4967 cmplwi cr1,r5,pmapSegCacheUse ; Did we find one?
4968 rlwinm r7,r5,4,0,27 ; Index to the cache entry
4969 srw r2,r0,r5 ; Get validity mask bit
4970 add r7,r7,r10 ; Point to the cache slot
4971 andc r6,r6,r2 ; Clear the validity bit we just tried
4972 bgelr-- cr1 ; Leave if there are no more to check...
4973
4974 lwz r5,sgcESID(r7) ; Get the top half
4975
4976 cmplw r5,r4 ; Only need to check top because sub-tag is the entire other half
4977
4978 bne++ pclNextEnt ; Nope, try again...
4979
4980 mr r3,r7 ; Point to the slot
4981 blr ; Leave....
d7e50217 4982
de355530 4983 .align 5
d7e50217 4984
55e303ae
A
4985pmapCacheLookur:
4986 li r11,lgKillResv ; The killing spot
4987 stwcx. r11,0,r11 ; Kill the reservation
d7e50217 4988
55e303ae
A
4989pmapCacheLookus:
4990 lwz r11,pmapCCtl(r3) ; Get the segment cache control
4991 rlwinm. r0,r11,0,pmapCCtlLckb,pmapCCtlLckb ; Is it already locked?
4992 beq++ pmapCacheLookup ; Nope...
4993 b pmapCacheLookus ; Yup, keep waiting...
4994
4995
4996
4997
4998;
4999; This routine, given a mapping, will find and lock the PTEG
5000; If mpPte does not point to a PTE (checked before and after lock), it will unlock the
5001; PTEG and return. In this case we will have undefined in R4
5002; and the low 12 bits of mpVAddr valid in R5. R3 will contain 0.
5003;
5004; If the mapping is still valid, we will invalidate the PTE and merge
5005; the RC bits into the physent and also save them into the mapping.
5006;
5007; We then return with R3 pointing to the PTE slot, R4 is the
5008; top of the PTE and R5 is the bottom. R6 contains the PCA.
5009; R7 points to the PCA entry.
5010;
5011; Note that we should NEVER be called on a block or special mapping.
5012; We could do many bad things.
5013;
5014
5015 .align 5
5016
5017mapInvPte32:
5018 lwz r0,mpPte(r31) ; Grab the PTE offset
5019 mfsdr1 r7 ; Get the pointer to the hash table
5020 lwz r5,mpVAddr+4(r31) ; Grab the virtual address
5021 rlwinm r10,r7,0,0,15 ; Clean up the hash table base
5022 andi. r3,r0,mpHValid ; Is there a possible PTE?
5023 srwi r7,r0,4 ; Convert to PCA units
5024 rlwinm r7,r7,0,0,29 ; Clean up PCA offset
5025 mflr r2 ; Save the return
5026 subfic r7,r7,-4 ; Convert to -4 based negative index
5027 add r7,r10,r7 ; Point to the PCA directly
5028 beqlr-- ; There was no PTE to start with...
5029
5030 bl mapLockPteg ; Lock the PTEG
5031
5032 lwz r0,mpPte(r31) ; Grab the PTE offset
5033 mtlr r2 ; Restore the LR
5034 andi. r3,r0,mpHValid ; Is there a possible PTE?
5035 beq- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
5036
5037 rlwinm r3,r0,0,0,30 ; Clear the valid bit
5038 add r3,r3,r10 ; Point to actual PTE
5039 lwz r4,0(r3) ; Get the top of the PTE
5040
5041 li r8,tlbieLock ; Get the TLBIE lock
5042 rlwinm r0,r4,0,1,31 ; Clear the valid bit
5043 stw r0,0(r3) ; Invalidate the PTE
5044
5045 sync ; Make sure everyone sees the invalidate
5046
5047mITLBIE32: lwarx r0,0,r8 ; Get the TLBIE lock
5048 mfsprg r2,2 ; Get feature flags
5049 mr. r0,r0 ; Is it locked?
5050 li r0,1 ; Get our lock word
5051 bne- mITLBIE32 ; It is locked, go wait...
5052
5053 stwcx. r0,0,r8 ; Try to get it
5054 bne- mITLBIE32 ; We was beat...
5055
5056 rlwinm. r0,r2,0,pfSMPcapb,pfSMPcapb ; Can this be an MP box?
5057 li r0,0 ; Lock clear value
5058
5059 tlbie r5 ; Invalidate it everywhere
5060
5061 stw r0,tlbieLock(0) ; Clear the tlbie lock
5062
5063 beq- mINoTS32 ; Can not have MP on this machine...
5064
5065 eieio ; Make sure that the tlbie happens first
5066 tlbsync ; Wait for everyone to catch up
5067 sync ; Make sure of it all
5068
5069mINoTS32: lwz r5,4(r3) ; Get the real part
5070 srwi r10,r5,12 ; Change physical address to a ppnum
5071
5072mINmerge: lbz r11,mpFlags+1(r31) ; Get the offset to the physical entry table
5073 lwz r0,mpVAddr+4(r31) ; Get the flags part of the field
5074 lis r8,hi16(EXT(pmap_mem_regions)) ; Get the top of the region table
5075 ori r8,r8,lo16(EXT(pmap_mem_regions)) ; Get the bottom of the region table
5076 rlwinm r11,r11,2,0,29 ; Change index into byte offset
5077 add r11,r11,r8 ; Point to the bank table
5078 lwz r2,mrPhysTab(r11) ; Get the physical table bank pointer
5079 lwz r11,mrStart(r11) ; Get the start of bank
5080 rlwimi r0,r5,0,mpRb-32,mpCb-32 ; Copy in the RC
5081 addi r2,r2,4 ; Offset to last half of field
5082 stw r0,mpVAddr+4(r31) ; Set the new RC into the field
5083 sub r11,r10,r11 ; Get the index into the table
5084 rlwinm r11,r11,3,0,28 ; Get offset to the physent
5085
5086
5087mImrgRC: lwarx r10,r11,r2 ; Get the master RC
5088 rlwinm r0,r5,27,ppRb-32,ppCb-32 ; Position the new RC
5089 or r0,r0,r10 ; Merge in the new RC
5090 stwcx. r0,r11,r2 ; Try to stick it back
5091 bne-- mImrgRC ; Try again if we collided...
5092
5093 blr ; Leave with the PCA still locked up...
5094
5095mIPUnlock: eieio ; Make sure all updates come first
5096
5097 stw r6,0(r7) ; Unlock
de355530 5098 blr
d7e50217 5099
55e303ae
A
5100;
5101; 64-bit version
5102;
5103 .align 5
d7e50217 5104
55e303ae
A
5105mapInvPte64:
5106 lwz r0,mpPte(r31) ; Grab the PTE offset
5107 ld r5,mpVAddr(r31) ; Grab the virtual address
5108 mfsdr1 r7 ; Get the pointer to the hash table
5109 rldicr r10,r7,0,45 ; Clean up the hash table base
5110 andi. r3,r0,mpHValid ; Is there a possible PTE?
5111 srdi r7,r0,5 ; Convert to PCA units
5112 rldicr r7,r7,0,61 ; Clean up PCA
5113 subfic r7,r7,-4 ; Convert to -4 based negative index
5114 mflr r2 ; Save the return
5115 add r7,r10,r7 ; Point to the PCA directly
5116 beqlr-- ; There was no PTE to start with...
5117
5118 bl mapLockPteg ; Lock the PTEG
5119
5120 lwz r0,mpPte(r31) ; Grab the PTE offset again
5121 mtlr r2 ; Restore the LR
5122 andi. r3,r0,mpHValid ; Is there a possible PTE?
5123 beq-- mIPUnlock ; There is no PTE, someone took it so just unlock and leave...
5124
5125 rlwinm r3,r0,0,0,30 ; Clear the valid bit
5126 add r3,r3,r10 ; Point to the actual PTE
5127 ld r4,0(r3) ; Get the top of the PTE
5128
5129 li r8,tlbieLock ; Get the TLBIE lock
5130 rldicr r0,r4,0,62 ; Clear the valid bit
5131 std r0,0(r3) ; Invalidate the PTE
5132
5133 rldicr r2,r4,16,35 ; Shift the AVPN over to match VPN
5134 sync ; Make sure everyone sees the invalidate
5135 rldimi r2,r5,0,36 ; Cram in the page portion of the EA
5136
5137mITLBIE64: lwarx r0,0,r8 ; Get the TLBIE lock
5138 mr. r0,r0 ; Is it locked?
5139 li r0,1 ; Get our lock word
5140 bne-- mITLBIE64a ; It is locked, toss reservation and wait...
5141
5142 stwcx. r0,0,r8 ; Try to get it
5143 bne-- mITLBIE64 ; We was beat...
5144
5145 rldicl r2,r2,0,16 ; Clear bits 0:15 because we are under orders
5146
5147 li r0,0 ; Lock clear value
5148
5149 tlbie r2 ; Invalidate it everywhere
5150
5151 stw r0,tlbieLock(0) ; Clear the tlbie lock
5152
5153 eieio ; Make sure that the tlbie happens first
5154 tlbsync ; Wait for everyone to catch up
5155 isync
5156 ptesync ; Wait for quiet again
5157
5158mINoTS64: sync ; Make sure of it all
5159
5160 ld r5,8(r3) ; Get the real part
5161 srdi r10,r5,12 ; Change physical address to a ppnum
5162 b mINmerge ; Join the common 32-64-bit code...
5163
5164mITLBIE64a: li r5,lgKillResv ; Killing field
5165 stwcx. r5,0,r5 ; Kill reservation
5166
5167mITLBIE64b: lwz r0,0(r8) ; Get the TLBIE lock
5168 mr. r0,r0 ; Is it locked?
5169 beq++ mITLBIE64 ; Nope, try again...
5170 b mITLBIE64b ; Yup, wait for it...
5171
5172;
5173; mapLockPteg - Locks a PTEG
5174; R7 points to PCA entry
5175; R6 contains PCA on return
5176;
5177;
1c79356b
A
5178
5179 .align 5
55e303ae
A
5180
5181mapLockPteg:
5182 lwarx r6,0,r7 ; Pick up the PCA
5183 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
5184 ori r0,r6,PCAlock ; Set the lock bit
5185 bne-- mLSkill ; It is locked...
5186
5187 stwcx. r0,0,r7 ; Try to lock the PTEG
5188 bne-- mapLockPteg ; We collided...
5189
5190 isync ; Nostradamus lied
5191 blr ; Leave...
5192
5193mLSkill: li r6,lgKillResv ; Get killing field
5194 stwcx. r6,0,r6 ; Kill it
1c79356b 5195
55e303ae
A
5196mapLockPteh:
5197 lwz r6,0(r7) ; Pick up the PCA
5198 rlwinm. r0,r6,0,PCAlockb,PCAlockb ; Is the PTEG locked?
5199 beq++ mapLockPteg ; Nope, try again...
5200 b mapLockPteh ; Yes, wait for it...
1c79356b 5201
55e303ae
A
5202
5203;
5204; The mapSelSlot function selects a PTEG slot to use. As input, it expects R6
5205; to contain the PCA. When it returns, R3 contains 0 if an unoccupied slot was
5206; selected, 1 if it stole a non-block PTE, or 2 if it stole a block mapped PTE.
5207; R4 returns the slot index.
5208;
5209; CR7 also indicates that we have a block mapping
5210;
5211; The PTEG allocation controls are a bit map of the state of the PTEG.
5212; PCAfree indicates that the PTE slot is empty.
5213; PCAauto means that it comes from an autogen area. These
5214; guys do not keep track of reference and change and are actually "wired".
5215; They are easy to maintain. PCAsteal
5216; is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these
5217; fields fit in a single word and are loaded and stored under control of the
5218; PTEG control area lock (PCAlock).
5219;
5220; Note that PCAauto does not contribute to the steal calculations at all. Originally
5221; it did, autogens were second in priority. This can result in a pathalogical
5222; case where an instruction can not make forward progress, or one PTE slot
5223; thrashes.
5224;
5225; Note that the PCA must be locked when we get here.
5226;
5227; Physically, the fields are arranged:
5228; 0: PCAfree
5229; 1: PCAsteal
5230; 2: PCAauto
5231; 3: PCAmisc
5232;
5233;
5234; At entry, R6 contains new unlocked PCA image (real PCA is locked and untouched)
5235;
5236; At exit:
5237;
5238; R3 = 0 - no steal
5239; R3 = 1 - steal regular
5240; R3 = 2 - steal autogen
5241; R4 contains slot number
5242; R6 contains updated PCA image
5243;
5244
5245 .align 5
1c79356b 5246
55e303ae
A
5247mapSelSlot: lis r10,0 ; Clear autogen mask
5248 li r9,0 ; Start a mask
5249 beq cr7,mSSnotblk ; Skip if this is not a block mapping
5250 ori r10,r10,lo16(0xFFFF) ; Make sure we mark a block mapping (autogen)
5251
5252mSSnotblk: rlwinm r11,r6,16,24,31 ; Isolate just the steal mask
5253 oris r9,r9,0x8000 ; Get a mask
5254 cntlzw r4,r6 ; Find a slot or steal one
5255 ori r9,r9,lo16(0x8000) ; Insure that we have 0x80008000
5256 rlwinm r4,r4,0,29,31 ; Isolate bit position
5257 rlwimi r11,r11,8,16,23 ; Get set to march a 1 back into top of 8 bit rotate
5258 srw r2,r9,r4 ; Get mask to isolate selected inuse and autogen flags
5259 srwi r11,r11,1 ; Slide steal mask right
5260 and r8,r6,r2 ; Isolate the old in use and autogen bits
5261 andc r6,r6,r2 ; Allocate the slot and also clear autogen flag
5262 addi r0,r8,0x7F00 ; Push autogen flag to bit 16
5263 and r2,r2,r10 ; Keep the autogen part if autogen
5264 addis r8,r8,0xFF00 ; Push in use to bit 0 and invert
5265 or r6,r6,r2 ; Add in the new autogen bit
5266 rlwinm r0,r0,17,31,31 ; Get a 1 if the old was autogenned (always 0 if not in use)
5267 rlwinm r8,r8,1,31,31 ; Isolate old in use
5268 rlwimi r6,r11,16,8,15 ; Stick the new steal slot in
5269
5270 add r3,r0,r8 ; Get 0 if no steal, 1 if steal normal, 2 if steal autogen
5271 blr ; Leave...
1c79356b 5272
55e303ae
A
5273;
5274; Shared/Exclusive locks
5275;
5276; A shared/exclusive lock allows multiple shares of a lock to be taken
5277; but only one exclusive. A shared lock can be "promoted" to exclusive
5278; when it is the only share. If there are multiple sharers, the lock
5279; must be "converted". A promotion drops the share and gains exclusive as
5280; an atomic operation. If anyone else has a share, the operation fails.
5281; A conversion first drops the share and then takes an exclusive lock.
5282;
5283; We will want to add a timeout to this eventually.
5284;
5285; R3 is set to 0 for success, non-zero for failure
5286;
5287
5288;
5289; Convert a share into an exclusive
5290;
5291
5292 .align 5
1c79356b 5293
55e303ae
A
5294sxlkConvert:
5295
5296 lis r0,0x8000 ; Get the locked lock image
5297#if 0
5298 mflr r0 ; (TEST/DEBUG)
5299 oris r0,r0,0x8000 ; (TEST/DEBUG)
5300#endif
5301
5302sxlkCTry: lwarx r2,0,r3 ; Get the lock word
5303 cmplwi r2,1 ; Does it just have our share?
5304 subi r2,r2,1 ; Drop our share in case we do not get it
5305 bne-- sxlkCnotfree ; No, we need to unlock...
5306 stwcx. r0,0,r3 ; Try to take it exclusively
5307 bne-- sxlkCTry ; Collision, try again...
1c79356b 5308
55e303ae
A
5309 isync
5310 li r3,0 ; Set RC
5311 blr ; Leave...
5312
5313sxlkCnotfree:
5314 stwcx. r2,0,r3 ; Try to drop our share...
5315 bne-- sxlkCTry ; Try again if we collided...
5316 b sxlkExclusive ; Go take it exclusively...
5317
5318;
5319; Promote shared to exclusive
5320;
5321
5322 .align 5
1c79356b 5323
55e303ae
A
5324sxlkPromote:
5325 lis r0,0x8000 ; Get the locked lock image
5326#if 0
5327 mflr r0 ; (TEST/DEBUG)
5328 oris r0,r0,0x8000 ; (TEST/DEBUG)
5329#endif
5330
5331sxlkPTry: lwarx r2,0,r3 ; Get the lock word
5332 cmplwi r2,1 ; Does it just have our share?
5333 bne-- sxlkPkill ; No, just fail (R3 is non-zero)...
5334 stwcx. r0,0,r3 ; Try to take it exclusively
5335 bne-- sxlkPTry ; Collision, try again...
1c79356b 5336
55e303ae
A
5337 isync
5338 li r3,0 ; Set RC
1c79356b 5339 blr ; Leave...
55e303ae
A
5340
5341sxlkPkill: li r2,lgKillResv ; Point to killing field
5342 stwcx. r2,0,r2 ; Kill reservation
5343 blr ; Leave
5344
5345
5346
5347;
5348; Take lock exclusivily
5349;
5350
5351 .align 5
1c79356b 5352
55e303ae
A
5353sxlkExclusive:
5354 lis r0,0x8000 ; Get the locked lock image
5355#if 0
5356 mflr r0 ; (TEST/DEBUG)
5357 oris r0,r0,0x8000 ; (TEST/DEBUG)
5358#endif
5359
5360sxlkXTry: lwarx r2,0,r3 ; Get the lock word
5361 mr. r2,r2 ; Is it locked?
5362 bne-- sxlkXWait ; Yes...
5363 stwcx. r0,0,r3 ; Try to take it
5364 bne-- sxlkXTry ; Collision, try again...
1c79356b 5365
55e303ae
A
5366 isync ; Toss anything younger than us
5367 li r3,0 ; Set RC
5368 blr ; Leave...
1c79356b 5369
55e303ae
A
5370 .align 5
5371
5372sxlkXWait: li r2,lgKillResv ; Point to killing field
5373 stwcx. r2,0,r2 ; Kill reservation
1c79356b 5374
55e303ae
A
5375sxlkXWaiu: lwz r2,0(r3) ; Get the lock again
5376 mr. r2,r2 ; Is it free yet?
5377 beq++ sxlkXTry ; Yup...
5378 b sxlkXWaiu ; Hang around a bit more...
1c79356b 5379
55e303ae
A
5380;
5381; Take a share of the lock
5382;
1c79356b
A
5383
5384 .align 5
55e303ae
A
5385
5386sxlkShared: lwarx r2,0,r3 ; Get the lock word
5387 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
5388 addi r2,r2,1 ; Up the share count
5389 bne-- sxlkSWait ; Yes...
5390 stwcx. r2,0,r3 ; Try to take it
5391 bne-- sxlkShared ; Collision, try again...
5392
5393 isync ; Toss anything younger than us
5394 li r3,0 ; Set RC
5395 blr ; Leave...
5396
5397 .align 5
d7e50217 5398
55e303ae
A
5399sxlkSWait: li r2,lgKillResv ; Point to killing field
5400 stwcx. r2,0,r2 ; Kill reservation
d7e50217 5401
55e303ae
A
5402sxlkSWaiu: lwz r2,0(r3) ; Get the lock again
5403 rlwinm. r0,r2,0,0,0 ; Is it locked exclusively?
5404 beq++ sxlkShared ; Nope...
5405 b sxlkSWaiu ; Hang around a bit more...
5406
5407;
5408; Unlock either exclusive or shared.
5409;
5410
5411 .align 5
5412
5413sxlkUnlock: eieio ; Make sure we order our stores out
5414
5415sxlkUnTry: lwarx r2,0,r3 ; Get the lock
5416 rlwinm. r0,r2,0,0,0 ; Do we hold it exclusively?
5417 subi r2,r2,1 ; Remove our share if we have one
5418 li r0,0 ; Clear this
5419 bne-- sxlkUExclu ; We hold exclusive...
5420
5421 stwcx. r2,0,r3 ; Try to lose our share
5422 bne-- sxlkUnTry ; Collision...
5423 blr ; Leave...
5424
5425sxlkUExclu: stwcx. r0,0,r3 ; Unlock and release reservation
5426 beqlr++ ; Leave if ok...
5427 b sxlkUnTry ; Could not store, try over...
5428
5429
5430 .align 5
5431 .globl EXT(fillPage)
5432
5433LEXT(fillPage)
5434
5435 mfsprg r0,2 ; Get feature flags
5436 mtcrf 0x02,r0 ; move pf64Bit to cr
5437
5438 rlwinm r4,r4,0,1,0 ; Copy fill to top of 64-bit register
5439 lis r2,0x0200 ; Get vec
5440 mr r6,r4 ; Copy
5441 ori r2,r2,0x2000 ; Get FP
5442 mr r7,r4 ; Copy
5443 mfmsr r5 ; Get MSR
5444 mr r8,r4 ; Copy
5445 andc r5,r5,r2 ; Clear out permanent turn-offs
5446 mr r9,r4 ; Copy
5447 ori r2,r2,0x8030 ; Clear IR, DR and EE
5448 mr r10,r4 ; Copy
5449 andc r0,r5,r2 ; Kill them
5450 mr r11,r4 ; Copy
5451 mr r12,r4 ; Copy
5452 bt++ pf64Bitb,fpSF1 ; skip if 64-bit (only they take the hint)
5453
5454 slwi r3,r3,12 ; Make into a physical address
5455 mtmsr r2 ; Interrupts and translation off
5456 isync
5457
5458 li r2,4096/32 ; Get number of cache lines
5459
5460fp32again: dcbz 0,r3 ; Clear
5461 addic. r2,r2,-1 ; Count down
5462 stw r4,0(r3) ; Fill
5463 stw r6,4(r3) ; Fill
5464 stw r7,8(r3) ; Fill
5465 stw r8,12(r3) ; Fill
5466 stw r9,16(r3) ; Fill
5467 stw r10,20(r3) ; Fill
5468 stw r11,24(r3) ; Fill
5469 stw r12,28(r3) ; Fill
5470 addi r3,r3,32 ; Point next
5471 bgt+ fp32again ; Keep going
5472
5473 mtmsr r5 ; Restore all
1c79356b 5474 isync
55e303ae
A
5475 blr ; Return...
5476
5477 .align 5
5478
5479fpSF1: li r2,1
5480 sldi r2,r2,63 ; Get 64-bit bit
5481 or r0,r0,r2 ; Turn on 64-bit
5482 sldi r3,r3,12 ; Make into a physical address
1c79356b 5483
55e303ae 5484 mtmsrd r0 ; Interrupts and translation off
1c79356b 5485 isync
55e303ae
A
5486
5487 li r2,4096/128 ; Get number of cache lines
5488
5489fp64again: dcbz128 0,r3 ; Clear
5490 addic. r2,r2,-1 ; Count down
5491 std r4,0(r3) ; Fill
5492 std r6,8(r3) ; Fill
5493 std r7,16(r3) ; Fill
5494 std r8,24(r3) ; Fill
5495 std r9,32(r3) ; Fill
5496 std r10,40(r3) ; Fill
5497 std r11,48(r3) ; Fill
5498 std r12,56(r3) ; Fill
5499 std r4,64+0(r3) ; Fill
5500 std r6,64+8(r3) ; Fill
5501 std r7,64+16(r3) ; Fill
5502 std r8,64+24(r3) ; Fill
5503 std r9,64+32(r3) ; Fill
5504 std r10,64+40(r3) ; Fill
5505 std r11,64+48(r3) ; Fill
5506 std r12,64+56(r3) ; Fill
5507 addi r3,r3,128 ; Point next
5508 bgt+ fp64again ; Keep going
5509
5510 mtmsrd r5 ; Restore all
5511 isync
5512 blr ; Return...
5513
5514 .align 5
5515 .globl EXT(mapLog)
5516
5517LEXT(mapLog)
5518
5519 mfmsr r12
5520 lis r11,hi16(EXT(mapdebug))
5521 ori r11,r11,lo16(EXT(mapdebug))
5522 lwz r10,0(r11)
5523 mr. r10,r10
5524 bne++ mLxx
5525 mr r10,r3
5526mLxx: rlwinm r0,r12,0,MSR_DR_BIT+1,MSR_DR_BIT-1
5527 mtmsr r0
5528 isync
5529 stw r4,0(r10)
5530 stw r4,4(r10)
5531 stw r5,8(r10)
5532 stw r6,12(r10)
5533 mtmsr r12
5534 isync
5535 addi r10,r10,16
5536 stw r10,0(r11)
1c79356b 5537 blr
55e303ae
A
5538
5539#if 1
5540 .align 5
5541 .globl EXT(checkBogus)
5542
5543LEXT(checkBogus)
5544
5545 BREAKPOINT_TRAP
5546 blr ; No-op normally
5547
5548#endif
5549
5550
1c79356b
A
5551
5552